repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
denys-duchier/django
|
tests/generic_inline_admin/models.py
|
133
|
1617
|
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Episode(models.Model):
name = models.CharField(max_length=100)
length = models.CharField(max_length=100, blank=True)
author = models.CharField(max_length=100, blank=True)
class Media(models.Model):
"""
Media that can associated to any object.
"""
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
url = models.URLField()
description = models.CharField(max_length=100, blank=True)
keywords = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.url
#
# Generic inline with unique_together
#
class Category(models.Model):
name = models.CharField(max_length=50)
class PhoneNumber(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
phone_number = models.CharField(max_length=30)
category = models.ForeignKey(Category, models.SET_NULL, null=True, blank=True)
class Meta:
unique_together = (('content_type', 'object_id', 'phone_number',),)
class Contact(models.Model):
name = models.CharField(max_length=50)
phone_numbers = GenericRelation(PhoneNumber, related_query_name='phone_numbers')
#
# Generic inline with can_delete=False
#
class EpisodePermanent(Episode):
pass
|
bsd-3-clause
|
MostAwesomeDude/construct
|
construct/formats/graphics/bmp.py
|
1
|
3315
|
"""
Windows/OS2 Bitmap (BMP)
this could have been a perfect show-case file format, but they had to make
it ugly (all sorts of alignment or
"""
from construct import *
#===============================================================================
# pixels: uncompressed
#===============================================================================
def UncompressedRows(subcon, align_to_byte = False):
"""argh! lines must be aligned to a 4-byte boundary, and bit-pixel
lines must be aligned to full bytes..."""
if align_to_byte:
line_pixels = Bitwise(
Aligned(Array(lambda ctx: ctx.width, subcon), modulus = 8)
)
else:
line_pixels = Array(lambda ctx: ctx.width, subcon)
return Array(lambda ctx: ctx.height,
Aligned(line_pixels, modulus = 4)
)
uncompressed_pixels = Switch("uncompressed", lambda ctx: ctx.bpp,
{
1 : UncompressedRows(Bit("index"), align_to_byte = True),
4 : UncompressedRows(Nibble("index"), align_to_byte = True),
8 : UncompressedRows(Byte("index")),
24 : UncompressedRows(
Sequence("rgb", Byte("red"), Byte("green"), Byte("blue"))
),
}
)
#===============================================================================
# pixels: Run Length Encoding (RLE) 8 bit
#===============================================================================
class RunLengthAdapter(Adapter):
def _encode(self, obj):
return len(obj), obj[0]
def _decode(self, obj):
length, value = obj
return [value] * length
rle8pixel = RunLengthAdapter(
Sequence("rle8pixel",
Byte("length"),
Byte("value")
)
)
#===============================================================================
# file structure
#===============================================================================
bitmap_file = Struct("bitmap_file",
# header
Const(String("signature", 2), "BM"),
ULInt32("file_size"),
Padding(4),
ULInt32("data_offset"),
ULInt32("header_size"),
Enum(Alias("version", "header_size"),
v2 = 12,
v3 = 40,
v4 = 108,
),
ULInt32("width"),
ULInt32("height"),
Value("number_of_pixels", lambda ctx: ctx.width * ctx.height),
ULInt16("planes"),
ULInt16("bpp"), # bits per pixel
Enum(ULInt32("compression"),
Uncompressed = 0,
RLE8 = 1,
RLE4 = 2,
Bitfields = 3,
JPEG = 4,
PNG = 5,
),
ULInt32("image_data_size"), # in bytes
ULInt32("horizontal_dpi"),
ULInt32("vertical_dpi"),
ULInt32("colors_used"),
ULInt32("important_colors"),
# palette (24 bit has no palette)
OnDemand(
Array(lambda ctx: 2 ** ctx.bpp if ctx.bpp <= 8 else 0,
Struct("palette",
Byte("blue"),
Byte("green"),
Byte("red"),
Padding(1),
)
)
),
# pixels
OnDemandPointer(lambda ctx: ctx.data_offset,
Switch("pixels", lambda ctx: ctx.compression,
{
"Uncompressed" : uncompressed_pixels,
}
),
),
)
if __name__ == "__main__":
obj = bitmap_file.parse_stream(open("../../tests/bitmap8.bmp", "rb"))
print obj
print repr(obj.pixels.value)
|
mit
|
licode/scikit-beam
|
skbeam/core/fitting/models.py
|
4
|
5983
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li (lili@bnl.gov) #
# created on 09/10/2014 #
# #
# Original code: #
# @author: Mirna Lerotic, 2nd Look Consulting #
# http://www.2ndlookconsulting.com/ #
# Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import inspect
import logging
from lmfit import Model
from .lineshapes import (elastic, compton, lorentzian2)
from .base.parameter_data import get_para
logger = logging.getLogger(__name__)
def set_default(model_name, func_name):
"""
Set values and bounds to Model parameters in lmfit.
Parameters
----------
model_name : class object
Model class object from lmfit
func_name : function
function name of physics peak
"""
paras = inspect.getargspec(func_name)
# the first argument is independent variable, also ignored
# default values are not considered for fitting in this function
my_args = paras.args[1:]
para_dict = get_para()
for name in my_args:
if name not in para_dict.keys():
continue
my_dict = para_dict[name]
if my_dict['bound_type'] == 'none':
model_name.set_param_hint(name, vary=True)
elif my_dict['bound_type'] == 'fixed':
model_name.set_param_hint(name, vary=False, value=my_dict['value'])
elif my_dict['bound_type'] == 'lo':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
min=my_dict['min'])
elif my_dict['bound_type'] == 'hi':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
max=my_dict['max'])
elif my_dict['bound_type'] == 'lohi':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
min=my_dict['min'], max=my_dict['max'])
else:
raise TypeError("Boundary type {0} can't be "
"used".format(my_dict['bound_type']))
def _gen_class_docs(func):
"""
Parameters
----------
func : function
function of peak profile
Returns
-------
str :
documentation of the function
"""
return ("Wrap the {} function for fitting within lmfit "
"framework\n".format(func.__name__) + func.__doc__)
# DEFINE NEW MODELS
class ElasticModel(Model):
__doc__ = _gen_class_docs(elastic)
def __init__(self, *args, **kwargs):
super(ElasticModel, self).__init__(elastic, *args, **kwargs)
self.set_param_hint('epsilon', value=2.96, vary=False)
class ComptonModel(Model):
__doc__ = _gen_class_docs(compton)
def __init__(self, *args, **kwargs):
super(ComptonModel, self).__init__(compton, *args, **kwargs)
self.set_param_hint('epsilon', value=2.96, vary=False)
class Lorentzian2Model(Model):
__doc__ = _gen_class_docs(lorentzian2)
def __init__(self, *args, **kwargs):
super(Lorentzian2Model, self).__init__(lorentzian2, *args, **kwargs)
|
bsd-3-clause
|
bluemini/kuma
|
kuma/search/tests/test_types.py
|
26
|
1836
|
from nose.tools import ok_, eq_
from elasticsearch_dsl import query
from kuma.wiki.models import Document
from kuma.wiki.search import WikiDocumentType
from . import ElasticTestCase
class WikiDocumentTypeTests(ElasticTestCase):
fixtures = ElasticTestCase.fixtures + ['wiki/documents.json']
def test_get_excerpt_strips_html(self):
self.refresh()
results = WikiDocumentType.search().query('match', content='audio')
ok_(results.count() > 0)
for doc in results.execute():
excerpt = doc.get_excerpt()
ok_('audio' in excerpt)
ok_('<strong>' not in excerpt)
def test_current_locale_results(self):
self.refresh()
results = (WikiDocumentType.search()
.query(query.Match(title='article') |
query.Match(content='article'))
.filter('term', locale='en-US'))
for doc in results.execute():
eq_('en-US', doc.locale)
def test_get_excerpt_uses_summary(self):
self.refresh()
results = WikiDocumentType.search().query('match', content='audio')
ok_(results.count() > 0)
for doc in results.execute():
excerpt = doc.get_excerpt()
ok_('the word for tough things' in excerpt)
ok_('extra content' not in excerpt)
def test_hidden_slugs_get_indexable(self):
self.refresh()
title_list = WikiDocumentType.get_indexable().values_list('title',
flat=True)
ok_('User:jezdez' not in title_list)
def test_hidden_slugs_should_update(self):
jezdez_doc = Document.objects.get(slug='User:jezdez')
eq_(WikiDocumentType.should_update(jezdez_doc), False)
|
mpl-2.0
|
sander76/home-assistant
|
tests/components/homekit_controller/test_climate.py
|
6
|
26995
|
"""Basic checks for HomeKitclimate."""
from aiohomekit.model.characteristics import (
ActivationStateValues,
CharacteristicsTypes,
CurrentHeaterCoolerStateValues,
SwingModeValues,
TargetHeaterCoolerStateValues,
)
from aiohomekit.model.services import ServicesTypes
from homeassistant.components.climate.const import (
DOMAIN,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SERVICE_SET_HUMIDITY,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
)
from tests.components.homekit_controller.common import setup_test_component
HEATING_COOLING_TARGET = ("thermostat", "heating-cooling.target")
HEATING_COOLING_CURRENT = ("thermostat", "heating-cooling.current")
THERMOSTAT_TEMPERATURE_COOLING_THRESHOLD = (
"thermostat",
"temperature.cooling-threshold",
)
THERMOSTAT_TEMPERATURE_HEATING_THRESHOLD = (
"thermostat",
"temperature.heating-threshold",
)
TEMPERATURE_TARGET = ("thermostat", "temperature.target")
TEMPERATURE_CURRENT = ("thermostat", "temperature.current")
HUMIDITY_TARGET = ("thermostat", "relative-humidity.target")
HUMIDITY_CURRENT = ("thermostat", "relative-humidity.current")
# Test thermostat devices
def create_thermostat_service(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.THERMOSTAT)
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_TARGET)
char.value = 0
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_CURRENT)
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_COOLING_THRESHOLD)
char.minValue = 15
char.maxValue = 40
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD)
char.minValue = 4
char.maxValue = 30
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_TARGET)
char.minValue = 7
char.maxValue = 35
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_CURRENT)
char.value = 0
char = service.add_char(CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET)
char.value = 0
char = service.add_char(CharacteristicsTypes.RELATIVE_HUMIDITY_CURRENT)
char.value = 0
def create_thermostat_service_min_max(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.THERMOSTAT)
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_TARGET)
char.value = 0
char.minValue = 0
char.maxValue = 1
async def test_climate_respect_supported_op_modes_1(hass, utcnow):
"""Test that climate respects minValue/maxValue hints."""
helper = await setup_test_component(hass, create_thermostat_service_min_max)
state = await helper.poll_and_get_state()
assert state.attributes["hvac_modes"] == ["off", "heat"]
def create_thermostat_service_valid_vals(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.THERMOSTAT)
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_TARGET)
char.value = 0
char.valid_values = [0, 1, 2]
async def test_climate_respect_supported_op_modes_2(hass, utcnow):
"""Test that climate respects validValue hints."""
helper = await setup_test_component(hass, create_thermostat_service_valid_vals)
state = await helper.poll_and_get_state()
assert state.attributes["hvac_modes"] == ["off", "heat", "cool"]
async def test_climate_change_thermostat_state(hass, utcnow):
"""Test that we can turn a HomeKit thermostat on and off again."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
assert helper.characteristics[HEATING_COOLING_TARGET].value == 1
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_COOL},
blocking=True,
)
assert helper.characteristics[HEATING_COOLING_TARGET].value == 2
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
assert helper.characteristics[HEATING_COOLING_TARGET].value == 3
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_OFF},
blocking=True,
)
assert helper.characteristics[HEATING_COOLING_TARGET].value == 0
async def test_climate_check_min_max_values_per_mode(hass, utcnow):
"""Test that we we get the appropriate min/max values for each mode."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
climate_state = await helper.poll_and_get_state()
assert climate_state.attributes["min_temp"] == 7
assert climate_state.attributes["max_temp"] == 35
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_COOL},
blocking=True,
)
climate_state = await helper.poll_and_get_state()
assert climate_state.attributes["min_temp"] == 7
assert climate_state.attributes["max_temp"] == 35
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
climate_state = await helper.poll_and_get_state()
assert climate_state.attributes["min_temp"] == 4
assert climate_state.attributes["max_temp"] == 40
async def test_climate_change_thermostat_temperature(hass, utcnow):
"""Test that we can turn a HomeKit thermostat on and off again."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 21},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 21
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 25},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 25
async def test_climate_change_thermostat_temperature_range(hass, utcnow):
"""Test that we can set separate heat and cool setpoints in heat_cool mode."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
"entity_id": "climate.testdevice",
"hvac_mode": HVAC_MODE_HEAT_COOL,
"target_temp_high": 25,
"target_temp_low": 20,
},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 22.5
assert helper.characteristics[THERMOSTAT_TEMPERATURE_HEATING_THRESHOLD].value == 20
assert helper.characteristics[THERMOSTAT_TEMPERATURE_COOLING_THRESHOLD].value == 25
async def test_climate_change_thermostat_temperature_range_iphone(hass, utcnow):
"""Test that we can set all three set points at once (iPhone heat_cool mode support)."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
"entity_id": "climate.testdevice",
"hvac_mode": HVAC_MODE_HEAT_COOL,
"temperature": 22,
"target_temp_low": 20,
"target_temp_high": 24,
},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 22
assert helper.characteristics[THERMOSTAT_TEMPERATURE_HEATING_THRESHOLD].value == 20
assert helper.characteristics[THERMOSTAT_TEMPERATURE_COOLING_THRESHOLD].value == 24
async def test_climate_cannot_set_thermostat_temp_range_in_wrong_mode(hass, utcnow):
"""Test that we cannot set range values when not in heat_cool mode."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
"entity_id": "climate.testdevice",
"hvac_mode": HVAC_MODE_HEAT_COOL,
"temperature": 22,
"target_temp_low": 20,
"target_temp_high": 24,
},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 22
assert helper.characteristics[THERMOSTAT_TEMPERATURE_HEATING_THRESHOLD].value == 0
assert helper.characteristics[THERMOSTAT_TEMPERATURE_COOLING_THRESHOLD].value == 0
def create_thermostat_single_set_point_auto(accessory):
"""Define thermostat characteristics with a single set point in auto."""
service = accessory.add_service(ServicesTypes.THERMOSTAT)
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_TARGET)
char.value = 0
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_CURRENT)
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_TARGET)
char.minValue = 7
char.maxValue = 35
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_CURRENT)
char.value = 0
char = service.add_char(CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET)
char.value = 0
char = service.add_char(CharacteristicsTypes.RELATIVE_HUMIDITY_CURRENT)
char.value = 0
async def test_climate_check_min_max_values_per_mode_sspa_device(hass, utcnow):
"""Test appropriate min/max values for each mode on sspa devices."""
helper = await setup_test_component(hass, create_thermostat_single_set_point_auto)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
climate_state = await helper.poll_and_get_state()
assert climate_state.attributes["min_temp"] == 7
assert climate_state.attributes["max_temp"] == 35
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_COOL},
blocking=True,
)
climate_state = await helper.poll_and_get_state()
assert climate_state.attributes["min_temp"] == 7
assert climate_state.attributes["max_temp"] == 35
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
climate_state = await helper.poll_and_get_state()
assert climate_state.attributes["min_temp"] == 7
assert climate_state.attributes["max_temp"] == 35
async def test_climate_set_thermostat_temp_on_sspa_device(hass, utcnow):
"""Test setting temperature in different modes on device with single set point in auto."""
helper = await setup_test_component(hass, create_thermostat_single_set_point_auto)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 21},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 21
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 21
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
"entity_id": "climate.testdevice",
"hvac_mode": HVAC_MODE_HEAT_COOL,
"temperature": 22,
},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 22
async def test_climate_change_thermostat_humidity(hass, utcnow):
"""Test that we can turn a HomeKit thermostat on and off again."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{"entity_id": "climate.testdevice", "humidity": 50},
blocking=True,
)
assert helper.characteristics[HUMIDITY_TARGET].value == 50
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{"entity_id": "climate.testdevice", "humidity": 45},
blocking=True,
)
assert helper.characteristics[HUMIDITY_TARGET].value == 45
async def test_climate_read_thermostat_state(hass, utcnow):
"""Test that we can read the state of a HomeKit thermostat accessory."""
helper = await setup_test_component(hass, create_thermostat_service)
# Simulate that heating is on
helper.characteristics[TEMPERATURE_CURRENT].value = 19
helper.characteristics[TEMPERATURE_TARGET].value = 21
helper.characteristics[HEATING_COOLING_CURRENT].value = 1
helper.characteristics[HEATING_COOLING_TARGET].value = 1
helper.characteristics[HUMIDITY_CURRENT].value = 50
helper.characteristics[HUMIDITY_TARGET].value = 45
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_HEAT
assert state.attributes["current_temperature"] == 19
assert state.attributes["current_humidity"] == 50
assert state.attributes["min_temp"] == 7
assert state.attributes["max_temp"] == 35
# Simulate that cooling is on
helper.characteristics[TEMPERATURE_CURRENT].value = 21
helper.characteristics[TEMPERATURE_TARGET].value = 19
helper.characteristics[HEATING_COOLING_CURRENT].value = 2
helper.characteristics[HEATING_COOLING_TARGET].value = 2
helper.characteristics[HUMIDITY_CURRENT].value = 45
helper.characteristics[HUMIDITY_TARGET].value = 45
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_COOL
assert state.attributes["current_temperature"] == 21
assert state.attributes["current_humidity"] == 45
# Simulate that we are in heat/cool mode
helper.characteristics[TEMPERATURE_CURRENT].value = 21
helper.characteristics[TEMPERATURE_TARGET].value = 21
helper.characteristics[HEATING_COOLING_CURRENT].value = 0
helper.characteristics[HEATING_COOLING_TARGET].value = 3
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_HEAT_COOL
async def test_hvac_mode_vs_hvac_action(hass, utcnow):
"""Check that we haven't conflated hvac_mode and hvac_action."""
helper = await setup_test_component(hass, create_thermostat_service)
# Simulate that current temperature is above target temp
# Heating might be on, but hvac_action currently 'off'
helper.characteristics[TEMPERATURE_CURRENT].value = 22
helper.characteristics[TEMPERATURE_TARGET].value = 21
helper.characteristics[HEATING_COOLING_CURRENT].value = 0
helper.characteristics[HEATING_COOLING_TARGET].value = 1
helper.characteristics[HUMIDITY_CURRENT].value = 50
helper.characteristics[HUMIDITY_TARGET].value = 45
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "idle"
# Simulate that current temperature is below target temp
# Heating might be on and hvac_action currently 'heat'
helper.characteristics[TEMPERATURE_CURRENT].value = 19
helper.characteristics[HEATING_COOLING_CURRENT].value = 1
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "heating"
TARGET_HEATER_COOLER_STATE = ("heater-cooler", "heater-cooler.state.target")
CURRENT_HEATER_COOLER_STATE = ("heater-cooler", "heater-cooler.state.current")
HEATER_COOLER_ACTIVE = ("heater-cooler", "active")
HEATER_COOLER_TEMPERATURE_CURRENT = ("heater-cooler", "temperature.current")
TEMPERATURE_COOLING_THRESHOLD = ("heater-cooler", "temperature.cooling-threshold")
TEMPERATURE_HEATING_THRESHOLD = ("heater-cooler", "temperature.heating-threshold")
SWING_MODE = ("heater-cooler", "swing-mode")
def create_heater_cooler_service(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.HEATER_COOLER)
char = service.add_char(CharacteristicsTypes.TARGET_HEATER_COOLER_STATE)
char.value = 0
char = service.add_char(CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE)
char.value = 0
char = service.add_char(CharacteristicsTypes.ACTIVE)
char.value = 1
char = service.add_char(CharacteristicsTypes.TEMPERATURE_COOLING_THRESHOLD)
char.minValue = 7
char.maxValue = 35
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD)
char.minValue = 7
char.maxValue = 35
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_CURRENT)
char.value = 0
char = service.add_char(CharacteristicsTypes.SWING_MODE)
char.value = 0
# Test heater-cooler devices
def create_heater_cooler_service_min_max(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.HEATER_COOLER)
char = service.add_char(CharacteristicsTypes.TARGET_HEATER_COOLER_STATE)
char.value = 1
char.minValue = 1
char.maxValue = 2
async def test_heater_cooler_respect_supported_op_modes_1(hass, utcnow):
"""Test that climate respects minValue/maxValue hints."""
helper = await setup_test_component(hass, create_heater_cooler_service_min_max)
state = await helper.poll_and_get_state()
assert state.attributes["hvac_modes"] == ["heat", "cool", "off"]
def create_theater_cooler_service_valid_vals(accessory):
"""Define heater-cooler characteristics."""
service = accessory.add_service(ServicesTypes.HEATER_COOLER)
char = service.add_char(CharacteristicsTypes.TARGET_HEATER_COOLER_STATE)
char.value = 1
char.valid_values = [1, 2]
async def test_heater_cooler_respect_supported_op_modes_2(hass, utcnow):
"""Test that climate respects validValue hints."""
helper = await setup_test_component(hass, create_theater_cooler_service_valid_vals)
state = await helper.poll_and_get_state()
assert state.attributes["hvac_modes"] == ["heat", "cool", "off"]
async def test_heater_cooler_change_thermostat_state(hass, utcnow):
"""Test that we can change the operational mode."""
helper = await setup_test_component(hass, create_heater_cooler_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
assert (
helper.characteristics[TARGET_HEATER_COOLER_STATE].value
== TargetHeaterCoolerStateValues.HEAT
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_COOL},
blocking=True,
)
assert (
helper.characteristics[TARGET_HEATER_COOLER_STATE].value
== TargetHeaterCoolerStateValues.COOL
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
assert (
helper.characteristics[TARGET_HEATER_COOLER_STATE].value
== TargetHeaterCoolerStateValues.AUTOMATIC
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_OFF},
blocking=True,
)
assert (
helper.characteristics[HEATER_COOLER_ACTIVE].value
== ActivationStateValues.INACTIVE
)
async def test_heater_cooler_change_thermostat_temperature(hass, utcnow):
"""Test that we can change the target temperature."""
helper = await setup_test_component(hass, create_heater_cooler_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 20},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_HEATING_THRESHOLD].value == 20
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_COOL},
blocking=True,
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 26},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_COOLING_THRESHOLD].value == 26
async def test_heater_cooler_read_thermostat_state(hass, utcnow):
"""Test that we can read the state of a HomeKit thermostat accessory."""
helper = await setup_test_component(hass, create_heater_cooler_service)
# Simulate that heating is on
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 19
helper.characteristics[TEMPERATURE_HEATING_THRESHOLD].value = 20
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.HEATING
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.HEAT
helper.characteristics[SWING_MODE].value = SwingModeValues.DISABLED
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_HEAT
assert state.attributes["current_temperature"] == 19
assert state.attributes["min_temp"] == 7
assert state.attributes["max_temp"] == 35
# Simulate that cooling is on
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 21
helper.characteristics[TEMPERATURE_COOLING_THRESHOLD].value = 19
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.COOLING
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.COOL
helper.characteristics[SWING_MODE].value = SwingModeValues.DISABLED
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_COOL
assert state.attributes["current_temperature"] == 21
# Simulate that we are in auto mode
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 21
helper.characteristics[TEMPERATURE_COOLING_THRESHOLD].value = 21
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.COOLING
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.AUTOMATIC
helper.characteristics[SWING_MODE].value = SwingModeValues.DISABLED
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_HEAT_COOL
async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow):
"""Check that we haven't conflated hvac_mode and hvac_action."""
helper = await setup_test_component(hass, create_heater_cooler_service)
# Simulate that current temperature is above target temp
# Heating might be on, but hvac_action currently 'off'
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 22
helper.characteristics[TEMPERATURE_HEATING_THRESHOLD].value = 21
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.IDLE
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.HEAT
helper.characteristics[SWING_MODE].value = SwingModeValues.DISABLED
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "idle"
# Simulate that current temperature is below target temp
# Heating might be on and hvac_action currently 'heat'
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 19
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.HEATING
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "heating"
async def test_heater_cooler_change_swing_mode(hass, utcnow):
"""Test that we can change the swing mode."""
helper = await setup_test_component(hass, create_heater_cooler_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{"entity_id": "climate.testdevice", "swing_mode": "vertical"},
blocking=True,
)
assert helper.characteristics[SWING_MODE].value == SwingModeValues.ENABLED
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{"entity_id": "climate.testdevice", "swing_mode": "off"},
blocking=True,
)
assert helper.characteristics[SWING_MODE].value == SwingModeValues.DISABLED
async def test_heater_cooler_turn_off(hass, utcnow):
"""Test that both hvac_action and hvac_mode return "off" when turned off."""
helper = await setup_test_component(hass, create_heater_cooler_service)
# Simulate that the device is turned off but CURRENT_HEATER_COOLER_STATE still returns HEATING/COOLING
helper.characteristics[HEATER_COOLER_ACTIVE].value = ActivationStateValues.INACTIVE
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.HEATING
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.HEAT
state = await helper.poll_and_get_state()
assert state.state == "off"
assert state.attributes["hvac_action"] == "off"
|
apache-2.0
|
fweik/espresso
|
testsuite/scripts/tutorials/test_lattice_boltzmann_part3.py
|
3
|
2511
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
if '@TEST_SUFFIX@' == 'rouse':
params = {}
elif '@TEST_SUFFIX@' == 'zimm':
params = {'LOOPS': 2000, 'POLYMER_MODEL': 'Zimm', 'gpu': True}
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/lattice_boltzmann/lattice_boltzmann_part3.py",
script_suffix="@TEST_SUFFIX@", **params)
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
def test_exponents(self):
msg = 'The R_F exponent should be close to 0.588'
self.assertGreater(tutorial.rf_exponent, 0.50, msg=msg)
self.assertLess(tutorial.rf_exponent, 0.85, msg=msg)
msg = 'The R_g exponent should be close to 0.588'
self.assertGreater(tutorial.rg_exponent, 0.50, msg=msg)
self.assertLess(tutorial.rg_exponent, 0.75, msg=msg)
msg = 'The R_h exponent should be close to 0.333'
self.assertGreater(tutorial.rh_exponent, 0.30, msg=msg)
self.assertLess(tutorial.rh_exponent, 0.50, msg=msg)
np.testing.assert_allclose(tutorial.rf2_rg2_ratio, 6.0, atol=1.0,
err_msg='R_F^2/R_g^2 should be close to 6.0')
def test_diffusion_coefficients(self):
# polymer diffusion
ref_D = [0.0363, 0.0269, 0.0234]
np.testing.assert_allclose(tutorial.diffusion_msd, ref_D, rtol=0.15)
np.testing.assert_allclose(tutorial.diffusion_gk, ref_D, rtol=0.15)
# monomer diffusion
if tutorial.POLYMER_MODEL == 'Rouse':
ref_D0 = tutorial.KT / tutorial.GAMMA
self.assertAlmostEqual(tutorial.popt_msd[0], ref_D0, delta=0.02)
self.assertAlmostEqual(tutorial.popt_gk[0], ref_D0, delta=0.02)
if __name__ == "__main__":
ut.main()
|
gpl-3.0
|
arskom/spyne
|
spyne/const/xml.py
|
2
|
8841
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.const.xml`` module contains various XML-related constants like
namespace prefixes, namespace values and schema uris.
"""
NS_XML = 'http://www.w3.org/XML/1998/namespace'
NS_XSD = 'http://www.w3.org/2001/XMLSchema'
NS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
NS_WSA = 'http://schemas.xmlsoap.org/ws/2003/03/addressing'
NS_XOP = 'http://www.w3.org/2004/08/xop/include'
NS_XHTML = 'http://www.w3.org/1999/xhtml'
NS_PLINK = 'http://schemas.xmlsoap.org/ws/2003/05/partner-link/'
NS_SOAP11_ENC = 'http://schemas.xmlsoap.org/soap/encoding/'
NS_SOAP11_ENV = 'http://schemas.xmlsoap.org/soap/envelope/'
NS_SOAP12_ENC = 'http://www.w3.org/2003/05/soap-encoding'
NS_SOAP12_ENV = 'http://www.w3.org/2003/05/soap-envelope'
NS_WSDL11 = 'http://schemas.xmlsoap.org/wsdl/'
NS_WSDL11_SOAP = 'http://schemas.xmlsoap.org/wsdl/soap/'
NS_WSDL11_SOAP12 = 'http://schemas.xmlsoap.org/wsdl/soap12/'
NS_WSDL11_HTTP = 'http://schemas.xmlsoap.org/wsdl/http/'
NSMAP = {
'xml': NS_XML,
'xs': NS_XSD,
'xsi': NS_XSI,
'plink': NS_PLINK,
'wsdlsoap11': NS_WSDL11_SOAP,
'wsdlsoap12': NS_WSDL11_SOAP12,
'wsdl': NS_WSDL11,
'soap11enc': NS_SOAP11_ENC,
'soap11env': NS_SOAP11_ENV,
'soap12env': NS_SOAP12_ENV,
'soap12enc': NS_SOAP12_ENC,
'wsa': NS_WSA,
'xop': NS_XOP,
'http': NS_WSDL11_HTTP,
}
PREFMAP = None
def _regen_prefmap():
global PREFMAP
PREFMAP = dict([(b, a) for a, b in NSMAP.items()])
_regen_prefmap()
schema_location = {
NS_XSD: 'http://www.w3.org/2001/XMLSchema.xsd',
}
class DEFAULT_NS(object): pass
def get_binding_ns(protocol_type):
"Returns the wsdl binding namespace based on the protocol type"
if 'soap12' in protocol_type:
return WSDL11_SOAP12
elif 'http' in protocol_type:
return WSDL11_HTTP
else:
# Bind to Soap1.1 namespace by default for backwards compatibility
return WSDL11_SOAP
def Tnswrap(ns):
return lambda s: "{%s}%s" % (ns, s)
XML = Tnswrap(NS_XML)
XSD = Tnswrap(NS_XSD)
XSI = Tnswrap(NS_XSI)
WSA = Tnswrap(NS_WSA)
XOP = Tnswrap(NS_XOP)
XHTML = Tnswrap(NS_XHTML)
PLINK = Tnswrap(NS_PLINK)
SOAP11_ENC = Tnswrap(NS_SOAP11_ENC)
SOAP11_ENV = Tnswrap(NS_SOAP11_ENV)
SOAP12_ENC = Tnswrap(NS_SOAP12_ENC)
SOAP12_ENV = Tnswrap(NS_SOAP12_ENV)
WSDL11 = Tnswrap(NS_WSDL11)
WSDL11_SOAP = Tnswrap(NS_WSDL11_SOAP)
WSDL11_SOAP12 = Tnswrap(NS_WSDL11_SOAP12)
WSDL11_HTTP = Tnswrap(NS_WSDL11_HTTP)
# names starting with underscore need () around to be used as proper regexps
_PATT_BASE_CHAR = \
u"[\u0041-\u005A]|[\u0061-\u007A]|[\u00C0-\u00D6]|[\u00D8-\u00F6]" \
u"|[\u00F8-\u00FF]|[\u0100-\u0131]|[\u0134-\u013E]|[\u0141-\u0148]" \
u"|[\u014A-\u017E]|[\u0180-\u01C3]|[\u01CD-\u01F0]|[\u01F4-\u01F5]" \
u"|[\u01FA-\u0217]|[\u0250-\u02A8]|[\u02BB-\u02C1]|\u0386|[\u0388-\u038A]" \
u"|\u038C|[\u038E-\u03A1]|[\u03A3-\u03CE]|[\u03D0-\u03D6]" \
u"|\u03DA|\u03DC|\u03DE|\u03E0|[\u03E2-\u03F3]|[\u0401-\u040C]" \
u"|[\u040E-\u044F]|[\u0451-\u045C]|[\u045E-\u0481]|[\u0490-\u04C4]" \
u"|[\u04C7-\u04C8]|[\u04CB-\u04CC]|[\u04D0-\u04EB]|[\u04EE-\u04F5]" \
u"|[\u04F8-\u04F9]|[\u0531-\u0556]|\u0559|[\u0561-\u0586]|[\u05D0-\u05EA]" \
u"|[\u05F0-\u05F2]|[\u0621-\u063A]|[\u0641-\u064A]|[\u0671-\u06B7]" \
u"|[\u06BA-\u06BE]|[\u06C0-\u06CE]|[\u06D0-\u06D3]|\u06D5|[\u06E5-\u06E6]" \
u"|[\u0905-\u0939]|\u093D|[\u0958-\u0961]|[\u0985-\u098C]|[\u098F-\u0990]" \
u"|[\u0993-\u09A8]|[\u09AA-\u09B0]|\u09B2|[\u09B6-\u09B9]|[\u09DC-\u09DD]" \
u"|[\u09DF-\u09E1]|[\u09F0-\u09F1]|[\u0A05-\u0A0A]|[\u0A0F-\u0A10]" \
u"|[\u0A13-\u0A28]|[\u0A2A-\u0A30]|[\u0A32-\u0A33]|[\u0A35-\u0A36]" \
u"|[\u0A38-\u0A39]|[\u0A59-\u0A5C]|\u0A5E|[\u0A72-\u0A74]|[\u0A85-\u0A8B]" \
u"|\u0A8D|[\u0A8F-\u0A91]|[\u0A93-\u0AA8]|[\u0AAA-\u0AB0]|[\u0AB2-\u0AB3]" \
u"|[\u0AB5-\u0AB9]|\u0ABD|\u0AE0|[\u0B05-\u0B0C]|[\u0B0F-\u0B10]" \
u"|[\u0B13-\u0B28]|[\u0B2A-\u0B30]|[\u0B32-\u0B33]|[\u0B36-\u0B39]|\u0B3D" \
u"|[\u0B5C-\u0B5D]|[\u0B5F-\u0B61]|[\u0B85-\u0B8A]|[\u0B8E-\u0B90]" \
u"|[\u0B92-\u0B95]|[\u0B99-\u0B9A]|\u0B9C|[\u0B9E-\u0B9F]|[\u0BA3-\u0BA4]" \
u"|[\u0BA8-\u0BAA]|[\u0BAE-\u0BB5]|[\u0BB7-\u0BB9]|[\u0C05-\u0C0C]" \
u"|[\u0C0E-\u0C10]|[\u0C12-\u0C28]|[\u0C2A-\u0C33]|[\u0C35-\u0C39]" \
u"|[\u0C60-\u0C61]|[\u0C85-\u0C8C]|[\u0C8E-\u0C90]|[\u0C92-\u0CA8]" \
u"|[\u0CAA-\u0CB3]|[\u0CB5-\u0CB9]|\u0CDE|[\u0CE0-\u0CE1]|[\u0D05-\u0D0C]" \
u"|[\u0D0E-\u0D10]|[\u0D12-\u0D28]|[\u0D2A-\u0D39]|[\u0D60-\u0D61]" \
u"|[\u0E01-\u0E2E]|\u0E30|[\u0E32-\u0E33]|[\u0E40-\u0E45]|[\u0E81-\u0E82]" \
u"|\u0E84|[\u0E87-\u0E88]|\u0E8A|\u0E8D|[\u0E94-\u0E97]|[\u0E99-\u0E9F]" \
u"|[\u0EA1-\u0EA3]|\u0EA5|\u0EA7|[\u0EAA-\u0EAB]|[\u0EAD-\u0EAE]|\u0EB0" \
u"|[\u0EB2-\u0EB3]|\u0EBD|[\u0EC0-\u0EC4]|[\u0F40-\u0F47]|[\u0F49-\u0F69]" \
u"|[\u10A0-\u10C5]|[\u10D0-\u10F6]|\u1100|[\u1102-\u1103]|[\u1105-\u1107]" \
u"|\u1109|[\u110B-\u110C]|[\u110E-\u1112]|\u113C|\u113E|\u1140|\u114C" \
u"|\u114E|\u1150|[\u1154-\u1155]|\u1159|[\u115F-\u1161]|\u1163|\u1165" \
u"|\u1167|\u1169|[\u116D-\u116E]|[\u1172-\u1173]|\u1175|\u119E|\u11A8" \
u"|\u11AB|[\u11AE-\u11AF]|[\u11B7-\u11B8]|\u11BA|[\u11BC-\u11C2]|\u11EB" \
u"|\u11F0|\u11F9|[\u1E00-\u1E9B]|[\u1EA0-\u1EF9]|[\u1F00-\u1F15]" \
u"|[\u1F18-\u1F1D]|[\u1F20-\u1F45]|[\u1F48-\u1F4D]|[\u1F50-\u1F57]|\u1F59" \
u"|\u1F5B|\u1F5D|[\u1F5F-\u1F7D]|[\u1F80-\u1FB4]|[\u1FB6-\u1FBC]|\u1FBE" \
u"|[\u1FC2-\u1FC4]|[\u1FC6-\u1FCC]|[\u1FD0-\u1FD3]|[\u1FD6-\u1FDB]" \
u"|[\u1FE0-\u1FEC]|[\u1FF2-\u1FF4]|[\u1FF6-\u1FFC]|\u2126|[\u212A-\u212B]" \
u"|\u212E|[\u2180-\u2182]|[\u3041-\u3094]|[\u30A1-\u30FA]|[\u3105-\u312C]" \
u"|[\uAC00-\uD7A3]"
_PATT_IDEOGRAPHIC = u"[\u4E00-\u9FA5]|\u3007|[\u3021-\u3029]"
_PATT_COMBINING_CHAR = u"[\u0300-\u0345]|[\u0360-\u0361]|[\u0483-\u0486]" \
u"|[\u0591-\u05A1]|[\u05A3-\u05B9]|[\u05BB-\u05BD]|\u05BF|[\u05C1-\u05C2]" \
u"|\u05C4|[\u064B-\u0652]|\u0670|[\u06D6-\u06DC]|[\u06DD-\u06DF]" \
u"|[\u06E0-\u06E4]|[\u06E7-\u06E8]|[\u06EA-\u06ED]|[\u0901-\u0903]|\u093C" \
u"|[\u093E-\u094C]|\u094D|[\u0951-\u0954]|[\u0962-\u0963]|[\u0981-\u0983]" \
u"|\u09BC|\u09BE|\u09BF|[\u09C0-\u09C4]|[\u09C7-\u09C8]|[\u09CB-\u09CD]" \
u"|\u09D7|[\u09E2-\u09E3]|\u0A02|\u0A3C|\u0A3E|\u0A3F|[\u0A40-\u0A42]" \
u"|[\u0A47-\u0A48]|[\u0A4B-\u0A4D]|[\u0A70-\u0A71]|[\u0A81-\u0A83]|\u0ABC" \
u"|[\u0ABE-\u0AC5]|[\u0AC7-\u0AC9]|[\u0ACB-\u0ACD]|[\u0B01-\u0B03]|\u0B3C" \
u"|[\u0B3E-\u0B43]|[\u0B47-\u0B48]|[\u0B4B-\u0B4D]|[\u0B56-\u0B57]" \
u"|[\u0B82-\u0B83]|[\u0BBE-\u0BC2]|[\u0BC6-\u0BC8]|[\u0BCA-\u0BCD]|\u0BD7" \
u"|[\u0C01-\u0C03]|[\u0C3E-\u0C44]|[\u0C46-\u0C48]|[\u0C4A-\u0C4D]" \
u"|[\u0C55-\u0C56]|[\u0C82-\u0C83]|[\u0CBE-\u0CC4]|[\u0CC6-\u0CC8]" \
u"|[\u0CCA-\u0CCD]|[\u0CD5-\u0CD6]|[\u0D02-\u0D03]|[\u0D3E-\u0D43]" \
u"|[\u0D46-\u0D48]|[\u0D4A-\u0D4D]|\u0D57|\u0E31|[\u0E34-\u0E3A]" \
u"|[\u0E47-\u0E4E]|\u0EB1|[\u0EB4-\u0EB9]|[\u0EBB-\u0EBC]|[\u0EC8-\u0ECD]" \
u"|[\u0F18-\u0F19]|\u0F35|\u0F37|\u0F39|\u0F3E|\u0F3F|[\u0F71-\u0F84]" \
u"|[\u0F86-\u0F8B]|[\u0F90-\u0F95]|\u0F97|[\u0F99-\u0FAD]|[\u0FB1-\u0FB7]" \
u"|\u0FB9|[\u20D0-\u20DC]|\u20E1|[\u302A-\u302F]|\u3099|\u309A"
_PATT_DIGIT = u"[\u0030-\u0039]|[\u0660-\u0669]|[\u06F0-\u06F9]|[\u0966-\u096F]" \
u"|[\u09E6-\u09EF]|[\u0A66-\u0A6F]|[\u0AE6-\u0AEF]|[\u0B66-\u0B6F]" \
u"|[\u0BE7-\u0BEF]|[\u0C66-\u0C6F]|[\u0CE6-\u0CEF]|[\u0D66-\u0D6F]" \
u"|[\u0E50-\u0E59]|[\u0ED0-\u0ED9]|[\u0F20-\u0F29]"
_PATT_EXTENDER = u"\u00B7|\u02D0|\u02D1|\u0387|\u0640|\u0E46|\u0EC6|\u3005" \
u"|[\u3031-\u3035]|[\u309D-\u309E]|[\u30FC-\u30FE]"
PATT_LETTER = u"(%s)" % u'|'.join([_PATT_BASE_CHAR, _PATT_IDEOGRAPHIC])
PATT_NAMECHAR = u"(%s)" % u'|'.join([PATT_LETTER, _PATT_DIGIT,
u'.', u'-', u'_', u':', _PATT_COMBINING_CHAR, _PATT_EXTENDER])
PATT_NAME = u"(%s)(%s)+" % (u'|'.join([PATT_LETTER, u'_', u':']),
u"(%s)*" % PATT_NAMECHAR)
PATT_NMTOKEN = u"(%s)+" % PATT_NAMECHAR
|
lgpl-2.1
|
darren-rogan/CouchPotatoServer
|
libs/subliminal/__init__.py
|
109
|
1366
|
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .api import list_subtitles, download_subtitles
from .async import Pool
from .core import (SERVICES, LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE,
MATCHING_CONFIDENCE)
from .infos import __version__
import logging
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE',
'MATCHING_CONFIDENCE', 'list_subtitles', 'download_subtitles', 'Pool']
logging.getLogger(__name__).addHandler(NullHandler())
|
gpl-3.0
|
mmauroy/SickRage
|
lib/guessit/transfo/guess_episodes_rexps.py
|
28
|
12285
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from guessit.patterns.list import list_parser, all_separators_re
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
from guessit.patterns import sep, build_or_pattern
from guessit.containers import PropertiesContainer, WeakValidator, NoValidator, ChainedValidator, DefaultValidator, \
FormatterValidator
from guessit.patterns.numeral import numeral, digital_numeral, parse_numeral
class GuessEpisodesRexps(Transformer):
def __init__(self):
Transformer.__init__(self, 20)
of_separators = ['of', 'sur', '/', '\\']
of_separators_re = re.compile(build_or_pattern(of_separators, escape=True), re.IGNORECASE)
season_words = ['seasons?', 'saisons?', 'series?']
episode_words = ['episodes?']
season_markers = ['s']
episode_markers = ['e', 'ep']
self.container = PropertiesContainer(enhance=False, canonical_from_pattern=False)
season_words_re = re.compile(build_or_pattern(season_words), re.IGNORECASE)
episode_words_re = re.compile(build_or_pattern(episode_words), re.IGNORECASE)
season_markers_re = re.compile(build_or_pattern(season_markers), re.IGNORECASE)
episode_markers_re = re.compile(build_or_pattern(episode_markers), re.IGNORECASE)
def episode_parser_x(value):
return list_parser(value, 'episodeList', discrete_separators_re=re.compile('x', re.IGNORECASE))
def episode_parser_e(value):
return list_parser(value, 'episodeList', discrete_separators_re=re.compile('e', re.IGNORECASE), fill_gaps=True)
def episode_parser(value):
return list_parser(value, 'episodeList')
def season_parser(value):
return list_parser(value, 'seasonList')
class ResolutionCollisionValidator(object):
@staticmethod
def validate(prop, string, node, match, entry_start, entry_end):
# Invalidate when season or episode is more than 100.
try:
season_value = season_parser(match.group(2))
episode_value = episode_parser_x(match.group(3))
return season_value < 100 or episode_value < 100
except:
# This may occur for 1xAll or patterns like this.
return True
self.container.register_property(None, r'(' + season_words_re.pattern + sep + '?(?P<season>' + numeral + ')' + sep + '?' + season_words_re.pattern + '?)', confidence=1.0, formatter=parse_numeral)
self.container.register_property(None, r'(' + season_words_re.pattern + sep + '?(?P<season>' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*)' + sep + '?' + season_words_re.pattern + '?)' + sep, confidence=1.0, formatter={None: parse_numeral, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), FormatterValidator('season', lambda x: len(x) > 1 if hasattr(x, '__len__') else False)))
self.container.register_property(None, r'(' + season_markers_re.pattern + '(?P<season>' + digital_numeral + ')[^0-9]?' + sep + '?(?P<episodeNumber>(?:e' + digital_numeral + '(?:' + sep + '?[e-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_e, 'season': season_parser}, validator=NoValidator())
self.container.register_property(None, r'(' + season_markers_re.pattern + '(?P<season>' + digital_numeral + ')[^0-9]?' + sep + '?(?P<episodeNumber>(?:e' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser, 'season': season_parser}, validator=NoValidator())
self.container.register_property(None, sep + r'((?P<season>' + digital_numeral + ')' + sep + '' + '(?P<episodeNumber>(?:x' + sep + digital_numeral + '(?:' + sep + '[x-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_x, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), ResolutionCollisionValidator()))
self.container.register_property(None, r'((?P<season>' + digital_numeral + ')' + '(?P<episodeNumber>(?:x' + digital_numeral + '(?:[x-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_x, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), ResolutionCollisionValidator()))
self.container.register_property(None, r'(' + season_markers_re.pattern + '(?P<season>' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*))', confidence=0.6, formatter={None: parse_numeral, 'season': season_parser}, validator=NoValidator())
self.container.register_property(None, r'((?P<episodeNumber>' + digital_numeral + ')' + sep + '?v(?P<version>\d+))', confidence=0.6, formatter=parse_numeral)
self.container.register_property('version', sep + r'(V\d+)' + sep, confidence=0.6, formatter=parse_numeral, validator=NoValidator())
self.container.register_property(None, r'(ep' + sep + r'?(?P<episodeNumber>' + digital_numeral + ')' + sep + '?)', confidence=0.7, formatter=parse_numeral)
self.container.register_property(None, r'(ep' + sep + r'?(?P<episodeNumber>' + digital_numeral + ')' + sep + '?v(?P<version>\d+))', confidence=0.7, formatter=parse_numeral)
self.container.register_property(None, r'(' + episode_markers_re.pattern + '(?P<episodeNumber>' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*))', confidence=0.6, formatter={None: parse_numeral, 'episodeNumber': episode_parser})
self.container.register_property(None, r'(' + episode_words_re.pattern + sep + '?(?P<episodeNumber>' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*)' + sep + '?' + episode_words_re.pattern + '?)', confidence=0.8, formatter={None: parse_numeral, 'episodeNumber': episode_parser})
self.container.register_property(None, r'(' + episode_markers_re.pattern + '(?P<episodeNumber>' + digital_numeral + ')' + sep + '?v(?P<version>\d+))', confidence=0.6, formatter={None: parse_numeral, 'episodeNumber': episode_parser})
self.container.register_property(None, r'(' + episode_words_re.pattern + sep + '?(?P<episodeNumber>' + digital_numeral + ')' + sep + '?v(?P<version>\d+))', confidence=0.8, formatter={None: parse_numeral, 'episodeNumber': episode_parser})
self.container.register_property('episodeNumber', r'^' + sep + '+(\d{2}' + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + '\d{2}' + ')*)' + sep, confidence=0.4, formatter=episode_parser)
self.container.register_property('episodeNumber', r'^' + sep + '+0(\d{1,2}' + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + '0\d{1,2}' + ')*)' + sep, confidence=0.4, formatter=episode_parser)
self.container.register_property('episodeNumber', sep + r'(\d{2}' + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + r'\d{2}' + ')*)' + sep + '+$', confidence=0.4, formatter=episode_parser)
self.container.register_property('episodeNumber', sep + r'0(\d{1,2}' + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + r'0\d{1,2}' + ')*)' + sep + '+$', confidence=0.4, formatter=episode_parser)
self.container.register_property(None, r'((?P<episodeNumber>' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P<episodeCount>' + numeral + ')(?:' + sep + '?(?:episodes?|eps?))?)', confidence=0.7, formatter=parse_numeral)
self.container.register_property(None, r'((?:episodes?|eps?)' + sep + '?(?P<episodeNumber>' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P<episodeCount>' + numeral + '))', confidence=0.7, formatter=parse_numeral)
self.container.register_property(None, r'((?:seasons?|saisons?|s)' + sep + '?(?P<season>' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P<seasonCount>' + numeral + '))', confidence=0.7, formatter=parse_numeral)
self.container.register_property(None, r'((?P<season>' + numeral + ')' + sep + '?' + of_separators_re.pattern + sep + '?(?P<seasonCount>' + numeral + ')' + sep + '?(?:seasons?|saisons?|s))', confidence=0.7, formatter=parse_numeral)
self.container.register_canonical_properties('other', 'FiNAL', 'Complete', validator=WeakValidator())
self.container.register_property(None, r'[^0-9]((?P<season>' + digital_numeral + ')[^0-9 .-]?-?(?P<other>xAll))', confidence=1.0, formatter={None: parse_numeral, 'other': lambda x: 'Complete', 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), ResolutionCollisionValidator()))
def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options):
naming_opts.add_argument('-E', '--episode-prefer-number', action='store_true', dest='episode_prefer_number', default=False,
help='Guess "serie.213.avi" as the episodeNumber 213. Without this option, '
'it will be guessed as season 2, episodeNumber 13')
def supported_properties(self):
return ['episodeNumber', 'season', 'episodeList', 'seasonList', 'episodeCount', 'seasonCount', 'version', 'other']
def guess_episodes_rexps(self, string, node=None, options=None):
found = self.container.find_properties(string, node, options)
guess = self.container.as_guess(found, string)
if guess and node:
if 'season' in guess and 'episodeNumber' in guess:
# If two guesses contains both season and episodeNumber in same group, create an episodeList
for existing_guess in node.group_node().guesses:
if 'season' in existing_guess and 'episodeNumber' in existing_guess:
if 'episodeList' not in existing_guess:
existing_guess['episodeList'] = [existing_guess['episodeNumber']]
existing_guess['episodeList'].append(guess['episodeNumber'])
existing_guess['episodeList'].sort()
if existing_guess['episodeNumber'] > guess['episodeNumber']:
existing_guess.set_confidence('episodeNumber', 0)
else:
guess.set_confidence('episodeNumber', 0)
guess['episodeList'] = list(existing_guess['episodeList'])
elif 'episodeNumber' in guess:
# If two guesses contains only episodeNumber in same group, remove the existing one.
for existing_guess in node.group_node().guesses:
if 'episodeNumber' in existing_guess:
for k, v in existing_guess.items():
if k in guess:
del guess[k]
return guess
def should_process(self, mtree, options=None):
return mtree.guess.get('type', '').startswith('episode')
def process(self, mtree, options=None):
GuessFinder(self.guess_episodes_rexps, None, self.log, options).process_nodes(mtree.unidentified_leaves())
|
gpl-3.0
|
billy-inn/scikit-learn
|
sklearn/tests/test_cross_validation.py
|
27
|
41664
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
|
bsd-3-clause
|
mdj2/django
|
tests/custom_columns/models.py
|
114
|
1396
|
"""
17. Custom column/table names
If your database column name is different than your model attribute, use the
``db_column`` parameter. Note that you'll use the field's name, not its column
name, in API usage.
If your database table name is different than your model name, use the
``db_table`` Meta attribute. This has no effect on the API used to
query the database.
If you need to use a table name for a many-to-many relationship that differs
from the default generated name, use the ``db_table`` parameter on the
``ManyToManyField``. This has no effect on the API for querying the database.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
first_name = models.CharField(max_length=30, db_column='firstname')
last_name = models.CharField(max_length=30, db_column='last')
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class Meta:
db_table = 'my_author_table'
ordering = ('last_name','first_name')
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, db_table='my_m2m_table')
def __str__(self):
return self.headline
class Meta:
ordering = ('headline',)
|
bsd-3-clause
|
sgh/vtk
|
Graphics/Testing/Python/tubeComb.py
|
1
|
2098
|
#!/usr/bin/env python
import sys
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-A' and i < len(sys.argv)-1:
sys.path = sys.path + [sys.argv[i+1]]
import vtk
from vtk.util.misc import vtkRegressionTestImage, vtkGetDataRoot
# create planes
# Create the RenderWindow, Renderer
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren )
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
pl3d = vtk.vtkPLOT3DReader()
pl3d.SetXYZFileName( vtkGetDataRoot() + '/Data/combxyz.bin' )
pl3d.SetQFileName( vtkGetDataRoot() + '/Data/combq.bin' )
pl3d.SetScalarFunctionNumber( 100 )
pl3d.SetVectorFunctionNumber( 202 )
pl3d.Update()
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(pl3d.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
seeds = vtk.vtkLineSource()
seeds.SetPoint1(15, -5, 32)
seeds.SetPoint2(15, 5, 32)
seeds.SetResolution(10)
integ = vtk.vtkRungeKutta4()
sl = vtk.vtkStreamLine()
sl.SetIntegrator(integ)
sl.SetInputConnection(pl3d.GetOutputPort())
sl.SetSource(seeds.GetOutput())
sl.SetMaximumPropagationTime(0.1)
sl.SetIntegrationStepLength(0.1)
sl.SetIntegrationDirectionToBackward()
sl.SetStepLength(0.001)
tube = vtk.vtkTubeFilter()
tube.SetInputConnection(sl.GetOutputPort())
tube.SetRadius(0.1)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
mmapper = vtk.vtkPolyDataMapper()
mmapper.SetInputConnection(seeds.GetOutputPort())
mactor = vtk.vtkActor()
mactor.SetMapper(mmapper)
ren.AddActor(mactor)
ren.AddActor(actor)
ren.AddActor(outlineActor)
cam=ren.GetActiveCamera()
cam.SetClippingRange( 3.95297, 50 )
cam.SetFocalPoint( 8.88908, 0.595038, 29.3342 )
cam.SetPosition( -12.3332, 31.7479, 41.2387 )
cam.SetViewUp( 0.060772, -0.319905, 0.945498 )
renWin.Render()
retVal = vtkRegressionTestImage(renWin)
sys.exit( not retVal )
|
bsd-3-clause
|
DanteOnline/free-art
|
venv/lib/python3.4/site-packages/django/test/runner.py
|
60
|
26988
|
import collections
import ctypes
import itertools
import logging
import multiprocessing
import os
import pickle
import textwrap
import unittest
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import setup_test_environment, teardown_test_environment
from django.utils.datastructures import OrderedSet
from django.utils.six import StringIO
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super(DebugSQLTextTestResult, self).__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super(DebugSQLTextTestResult, self).startTest(test)
def stopTest(self, test):
super(DebugSQLTextTestResult, self).stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super(DebugSQLTextTestResult, self).addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super(DebugSQLTextTestResult, self).addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % sql_debug)
class RemoteTestResult(object):
"""
Record information about which tests have succeeded and which have failed.
The sole purpose of this class is to record events in the child processes
so they can be replayed in the master process. As a consequence it doesn't
inherit unittest.TestResult and doesn't attempt to implement all its API.
The implementation matches the unpythonic coding style of unittest2.
"""
def __init__(self):
self.events = []
self.failfast = False
self.shouldStop = False
self.testsRun = 0
@property
def test_index(self):
return self.testsRun - 1
def check_picklable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
pickle.dumps(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
if tblib is None:
print("""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
pip install tblib
""".format(test, original_exc_txt))
else:
print("""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test without the --parallel option to reproduce the
failure and get a correct traceback.
""".format(test, original_exc_txt, pickle_exc_txt))
raise
def stop_if_failfast(self):
if self.failfast:
self.stop()
def stop(self):
self.shouldStop = True
def startTestRun(self):
self.events.append(('startTestRun',))
def stopTestRun(self):
self.events.append(('stopTestRun',))
def startTest(self, test):
self.testsRun += 1
self.events.append(('startTest', self.test_index))
def stopTest(self, test):
self.events.append(('stopTest', self.test_index))
def addError(self, test, err):
self.check_picklable(test, err)
self.events.append(('addError', self.test_index, err))
self.stop_if_failfast()
def addFailure(self, test, err):
self.check_picklable(test, err)
self.events.append(('addFailure', self.test_index, err))
self.stop_if_failfast()
def addSubTest(self, test, subtest, err):
raise NotImplementedError("subtests aren't supported at this time")
def addSuccess(self, test):
self.events.append(('addSuccess', self.test_index))
def addSkip(self, test, reason):
self.events.append(('addSkip', self.test_index, reason))
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_picklable(test, err)
self.events.append(('addExpectedFailure', self.test_index, err))
def addUnexpectedSuccess(self, test):
self.events.append(('addUnexpectedSuccess', self.test_index))
self.stop_if_failfast()
class RemoteTestRunner(object):
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None):
self.failfast = failfast
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
test(result)
return result
def default_test_processes():
"""
Default number of test processes when using the --parallel option.
"""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork().
# On Python 3.4+: if multiprocessing.get_start_method() != 'fork':
if not hasattr(os, 'fork'):
return 1
try:
return int(os.environ['DJANGO_TEST_PROCESSES'])
except KeyError:
return multiprocessing.cpu_count()
_worker_id = 0
def _init_worker(counter):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
for alias in connections:
connection = connections[alias]
settings_dict = connection.creation.get_test_db_clone_settings(_worker_id)
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
subsuite_index, subsuite, failfast = args
runner = RemoteTestRunner(failfast=failfast)
result = runner.run(subsuite)
return subsuite_index, result.events
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
run_subsuite = _run_subsuite
def __init__(self, suite, processes, failfast=False):
self.subsuites = partition_suite_by_case(suite)
self.processes = processes
self.failfast = failfast
super(ParallelTestSuite, self).__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes such Model.DoesNotExist which cannot be unpickled.
"""
if tblib is not None:
tblib.pickling_support.install()
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[counter])
args = [
(index, subsuite, self.failfast)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
class DiscoverRunner(object):
"""
A Django test runner that uses unittest2 test discovery.
"""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_sql=False, parallel=0,
**kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_sql = debug_sql
self.parallel = parallel
@classmethod
def add_arguments(cls, parser):
parser.add_argument('-t', '--top-level-directory',
action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.')
parser.add_argument('-p', '--pattern', action='store', dest='pattern',
default="test*.py",
help='The test matching pattern. Defaults to test*.py.')
parser.add_argument('-k', '--keepdb', action='store_true', dest='keepdb',
default=False,
help='Preserves the test DB between runs.')
parser.add_argument('-r', '--reverse', action='store_true', dest='reverse',
default=False,
help='Reverses test cases order.')
parser.add_argument('-d', '--debug-sql', action='store_true', dest='debug_sql',
default=False,
help='Prints logged SQL queries on failure.')
parser.add_argument(
'--parallel', dest='parallel', nargs='?', default=1, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.')
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
suite = reorder_suite(suite, self.reorder_by, self.reverse)
if self.parallel > 1:
parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
parallel_units = len(parallel_suite.subsuites)
if self.parallel > parallel_units:
self.parallel = parallel_units
# If there's only one TestCase, parallelization isn't needed.
if self.parallel > 1:
suite = parallel_suite
return suite
def setup_databases(self, **kwargs):
return setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
self.parallel, **kwargs
)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else None
def run_suite(self, suite, **kwargs):
resultclass = self.get_resultclass()
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
resultclass=resultclass,
).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
for connection, old_name, destroy in old_config:
if destroy:
if self.parallel > 1:
for index in range(self.parallel):
connection.creation.destroy_test_db(
number=index + 1,
verbosity=self.verbosity,
keepdb=self.keepdb,
)
connection.creation.destroy_test_db(old_name, self.verbosity, self.keepdb)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def reorder_suite(suite, classes, reverse=False):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, tests within classes are sorted in opposite order,
but test classes are not reversed.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite_by_type(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite_by_type(suite, classes, bins, reverse=False):
"""
Partitions a test suite by test type. Also prevents duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite_by_type(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def partition_suite_by_case(suite):
"""
Partitions a test suite by test case, preserving the order of tests.
"""
groups = []
suite_class = type(suite)
for test_type, test_group in itertools.groupby(suite, type):
if issubclass(test_type, unittest.TestCase):
groups.append(suite_class(test_group))
else:
for item in test_group:
groups.extend(partition_suite_by_case(item))
return groups
def get_unique_databases_and_mirrors():
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Return two values:
- test_databases: ordered mapping of signatures to (name, list of aliases)
where all aliases share the same underlying database.
- mirrored_aliases: mapping of mirror aliases to original aliases.
"""
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
test_databases = dependency_ordered(test_databases.items(), dependencies)
test_databases = collections.OrderedDict(test_databases)
return test_databases, mirrored_aliases
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, **kwargs):
"""
Creates the test databases.
"""
test_databases, mirrored_aliases = get_unique_databases_and_mirrors()
old_names = []
for signature, (db_name, aliases) in test_databases.items():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get("TEST", {}).get("SERIALIZE", True),
)
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(
number=index + 1,
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(
connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
|
gpl-3.0
|
mrkev/three.js
|
utils/exporters/blender/addons/io_three/exporter/io.py
|
201
|
2836
|
import os
import shutil
from .. import constants, logger
from . import _json
def copy_registered_textures(dest, registration):
"""Copy the registered textures to the destination (root) path
:param dest: destination directory
:param registration: registered textures
:type dest: str
:type registration: dict
"""
logger.debug("io.copy_registered_textures(%s, %s)", dest, registration)
os.makedirs(dest, exist_ok=True)
for value in registration.values():
copy(value['file_path'], dest)
def copy(src, dst):
"""Copy a file to a destination
:param src: source file
:param dst: destination file/path
"""
logger.debug("io.copy(%s, %s)" % (src, dst))
if os.path.isdir(dst):
file_name = os.path.basename(src)
dst = os.path.join(dst, file_name)
if src != dst:
shutil.copy(src, dst)
def dump(filepath, data, options=None):
"""Dump the output to disk (JSON, msgpack, etc)
:param filepath: output file path
:param data: serializable data to write to disk
:param options: (Default value = None)
:type options: dict
"""
options = options or {}
logger.debug("io.dump(%s, data, options=%s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
logger.info("Dumping to msgpack")
func = lambda x, y: msgpack.dump(x, y)
mode = 'wb'
else:
round_off = options.get(constants.ENABLE_PRECISION)
if round_off:
_json.ROUND = options[constants.PRECISION]
else:
_json.ROUND = None
indent = options.get(constants.INDENT, True)
indent = 4 if indent else None
logger.info("Dumping to JSON")
func = lambda x, y: _json.json.dump(x, y, indent=indent)
mode = 'w'
logger.info("Writing to %s", filepath)
with open(filepath, mode=mode) as stream:
func(data, stream)
def load(filepath, options):
"""Load the contents of the file path with the correct parser
:param filepath: input file path
:param options:
:type options: dict
"""
logger.debug("io.load(%s, %s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
module = msgpack
mode = 'rb'
else:
logger.info("Loading JSON")
module = _json.json
mode = 'r'
with open(filepath, mode=mode) as stream:
data = module.load(stream)
return data
|
mit
|
caphrim007/ansible-modules-core
|
database/mysql/mysql_user.py
|
12
|
19403
|
#!/usr/bin/python
# (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mysql_user
short_description: Adds or removes a user from a MySQL database.
description:
- Adds or removes a user from a MySQL database.
version_added: "0.6"
options:
name:
description:
- name of the user (role) to add or remove
required: true
default: null
password:
description:
- set the user's password
required: false
default: null
host:
description:
- the 'host' part of the MySQL username
required: false
default: localhost
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
login_port:
description:
- Port of the MySQL server
required: false
default: 3306
version_added: '1.4'
login_unix_socket:
description:
- The path to a Unix domain socket for local connections
required: false
default: null
priv:
description:
- "MySQL privileges string in the format: C(db.table:priv1,priv2)"
required: false
default: null
append_privs:
description:
- Append the privileges defined by priv to the existing ones for this
user instead of overwriting existing ones.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.4"
state:
description:
- Whether the user should exist. When C(absent), removes
the user.
required: false
default: present
choices: [ "present", "absent" ]
check_implicit_admin:
description:
- Check if mysql allows login as root/nopassword before trying supplied credentials.
required: false
default: false
version_added: "1.3"
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb.
- Both C(login_password) and C(login_username) are required when you are
passing credentials. If none are present, the module will attempt to read
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of 'root' with no password.
- "MySQL server installs with default login_user of 'root' and no password. To secure this user
as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password,
without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
the file."
requirements: [ "ConfigParser", "MySQLdb" ]
author: Mark Theunissen
'''
EXAMPLES = """
# Create database user with name 'bob' and password '12345' with all database privileges
- mysql_user: name=bob password=12345 priv=*.*:ALL state=present
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present
# Ensure no user named 'sally' exists, also passing in the auth credentials.
- mysql_user: login_user=root login_password=123456 name=sally state=absent
# Specify grants composed of more than one word
- mysql_user: name=replication password=12345 priv=*.*:"REPLICATION CLIENT" state=present
# Revoke all privileges for user 'bob' and password '12345'
- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present
# Example privileges string format
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
# Example using login_unix_socket to connect to server
- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock
# Example .my.cnf file for setting the root password
# Note: don't use quotes around the password, because the mysql_user module
# will include them in the password but the mysql client will not
[client]
user=root
password=n<_665{vS43y
"""
import ConfigParser
import getpass
import tempfile
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL'))
class InvalidPrivsError(Exception):
pass
# ===========================================
# MySQL module specific support methods.
#
def user_exists(cursor, user, host):
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host))
count = cursor.fetchone()
return count[0] > 0
def user_add(cursor, user, host, password, new_priv):
cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password))
if new_priv is not None:
for db_table, priv in new_priv.iteritems():
privileges_grant(cursor, user,host,db_table,priv)
return True
def user_mod(cursor, user, host, password, new_priv, append_privs):
changed = False
grant_option = False
# Handle passwords
if password is not None:
cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host))
current_pass_hash = cursor.fetchone()
cursor.execute("SELECT PASSWORD(%s)", (password,))
new_pass_hash = cursor.fetchone()
if current_pass_hash[0] != new_pass_hash[0]:
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password))
changed = True
# Handle privileges
if new_priv is not None:
curr_priv = privileges_get(cursor, user,host)
# If the user has privileges on a db.table that doesn't appear at all in
# the new specification, then revoke all privileges on it.
for db_table, priv in curr_priv.iteritems():
# If the user has the GRANT OPTION on a db.table, revoke it first.
if "GRANT" in priv:
grant_option = True
if db_table not in new_priv:
if user != "root" and "PROXY" not in priv and not append_privs:
privileges_revoke(cursor, user,host,db_table,grant_option)
changed = True
# If the user doesn't currently have any privileges on a db.table, then
# we can perform a straight grant operation.
for db_table, priv in new_priv.iteritems():
if db_table not in curr_priv:
privileges_grant(cursor, user,host,db_table,priv)
changed = True
# If the db.table specification exists in both the user's current privileges
# and in the new privileges, then we need to see if there's a difference.
db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
for db_table in db_table_intersect:
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
if (len(priv_diff) > 0):
if not append_privs:
privileges_revoke(cursor, user,host,db_table,grant_option)
privileges_grant(cursor, user,host,db_table,new_priv[db_table])
changed = True
return changed
def user_delete(cursor, user, host):
cursor.execute("DROP USER %s@%s", (user, host))
return True
def privileges_get(cursor, user,host):
""" MySQL doesn't have a better method of getting privileges aside from the
SHOW GRANTS query syntax, which requires us to then parse the returned string.
Here's an example of the string that is returned from MySQL:
GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
This function makes the query and returns a dictionary containing the results.
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
if x == 'ALL PRIVILEGES':
return 'ALL'
else:
return x
for grant in grants:
res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [ pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(4):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(4):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
def privileges_unpack(priv):
""" Take a privileges string, typically passed as a parameter, and unserialize
it into a dictionary, the same format as privileges_get() above. We have this
custom format to avoid using YAML/JSON strings inside YAML playbooks. Example
of a privileges string:
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL
The privilege USAGE stands for no privileges, so we add that in on *.* if it's
not specified in the string, as MySQL will always provide this by default.
"""
output = {}
for item in priv.strip().split('/'):
pieces = item.strip().split(':')
if '.' in pieces[0]:
pieces[0] = pieces[0].split('.')
for idx, piece in enumerate(pieces):
if pieces[0][idx] != "*":
pieces[0][idx] = "`" + pieces[0][idx] + "`"
pieces[0] = '.'.join(pieces[0])
output[pieces[0]] = pieces[1].upper().split(',')
new_privs = frozenset(output[pieces[0]])
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
if '*.*' not in output:
output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL in *.* we still need
# to add USAGE as a privilege to avoid syntax errors
if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']:
output['*.*'].append('USAGE')
return output
def privileges_revoke(cursor, user,host,db_table,grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option:
query = ["REVOKE GRANT OPTION ON %s" % mysql_quote_identifier(db_table, 'table')]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
def privileges_grant(cursor, user,host,db_table,priv):
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv))
query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("TO %s@%s")
if 'GRANT' in priv:
query.append("WITH GRANT OPTION")
if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
query = ' '.join(query)
cursor.execute(query, (user, host))
def strip_quotes(s):
""" Remove surrounding single or double quotes
>>> print strip_quotes('hello')
hello
>>> print strip_quotes('"hello"')
hello
>>> print strip_quotes("'hello'")
hello
>>> print strip_quotes("'hello")
'hello
"""
single_quote = "'"
double_quote = '"'
if s.startswith(single_quote) and s.endswith(single_quote):
s = s.strip(single_quote)
elif s.startswith(double_quote) and s.endswith(double_quote):
s = s.strip(double_quote)
return s
def config_get(config, section, option):
""" Calls ConfigParser.get and strips quotes
See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html
"""
return strip_quotes(config.get(section, option))
def _safe_cnf_load(config, path):
data = {'user':'', 'password':''}
# read in user/pass
f = open(path, 'r')
for line in f.readlines():
line = line.strip()
if line.startswith('user='):
data['user'] = line.split('=', 1)[1].strip()
if line.startswith('password=') or line.startswith('pass='):
data['password'] = line.split('=', 1)[1].strip()
f.close()
# write out a new cnf file with only user/pass
fh, newpath = tempfile.mkstemp(prefix=path + '.')
f = open(newpath, 'wb')
f.write('[client]\n')
f.write('user=%s\n' % data['user'])
f.write('password=%s\n' % data['password'])
f.close()
config.readfp(open(newpath))
os.remove(newpath)
return config
def load_mycnf():
config = ConfigParser.RawConfigParser()
mycnf = os.path.expanduser('~/.my.cnf')
if not os.path.exists(mycnf):
return False
try:
config.readfp(open(mycnf))
except (IOError):
return False
except:
config = _safe_cnf_load(config, mycnf)
# We support two forms of passwords in .my.cnf, both pass= and password=,
# as these are both supported by MySQL.
try:
passwd = config_get(config, 'client', 'password')
except (ConfigParser.NoOptionError):
try:
passwd = config_get(config, 'client', 'pass')
except (ConfigParser.NoOptionError):
return False
# If .my.cnf doesn't specify a user, default to user login name
try:
user = config_get(config, 'client', 'user')
except (ConfigParser.NoOptionError):
user = getpass.getuser()
creds = dict(user=user,passwd=passwd)
return creds
def connect(module, login_user, login_password):
if module.params["login_unix_socket"]:
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql")
else:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
return db_connection.cursor()
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
user=dict(required=True, aliases=['name']),
password=dict(default=None),
host=dict(default="localhost"),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
append_privs=dict(type="bool", default="no"),
check_implicit_admin=dict(default=False),
)
)
user = module.params["user"]
password = module.params["password"]
host = module.params["host"]
state = module.params["state"]
priv = module.params["priv"]
check_implicit_admin = module.params['check_implicit_admin']
append_privs = module.boolean(module.params["append_privs"])
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
if priv is not None:
try:
priv = privileges_unpack(priv)
except Exception, e:
module.fail_json(msg="invalid privileges string: %s" % str(e))
# Either the caller passes both a username and password with which to connect to
# mysql, or they pass neither and allow this module to read the credentials from
# ~/.my.cnf.
login_password = module.params["login_password"]
login_user = module.params["login_user"]
if login_user is None and login_password is None:
mycnf_creds = load_mycnf()
if mycnf_creds is False:
login_user = "root"
login_password = ""
else:
login_user = mycnf_creds["user"]
login_password = mycnf_creds["passwd"]
elif login_password is None or login_user is None:
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
cursor = None
try:
if check_implicit_admin:
try:
cursor = connect(module, 'root', '')
except:
pass
if not cursor:
cursor = connect(module, login_user, login_password)
except Exception, e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
if state == "present":
if user_exists(cursor, user, host):
try:
changed = user_mod(cursor, user, host, password, priv, append_privs)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
else:
if password is None:
module.fail_json(msg="password parameter required when adding a user")
try:
changed = user_add(cursor, user, host, password, priv)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
elif state == "absent":
if user_exists(cursor, user, host):
changed = user_delete(cursor, user, host)
else:
changed = False
module.exit_json(changed=changed, user=user)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
k0ste/ansible
|
lib/ansible/modules/iptables.py
|
20
|
28339
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: iptables
short_description: Modify iptables rules
version_added: "2.0"
author:
- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
- Sébastien DA ROCHA (@sebastiendarocha)
description:
- C(iptables) is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel.
- This module does not handle the saving and/or loading of rules, but rather
only manipulates the current rules that are present in memory. This is the
same as the behaviour of the C(iptables) and C(ip6tables) command which
this module uses internally.
notes:
- This module just deals with individual rules.If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command should operate on.
- If the kernel is configured with automatic module loading, an attempt will be made
to load the appropriate module for that table if it is not already there.
type: str
choices: [ filter, nat, mangle, raw, security ]
default: filter
state:
description:
- Whether the rule should be absent or present.
type: str
choices: [ absent, present ]
default: present
action:
description:
- Whether the rule should be appended at the bottom or inserted at the top.
- If the rule already exists the chain will not be modified.
type: str
choices: [ append, insert ]
default: append
version_added: "2.2"
rule_num:
description:
- Insert the rule as the given rule number.
- This works only with C(action=insert).
type: str
version_added: "2.5"
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
type: str
choices: [ ipv4, ipv6 ]
default: ipv4
chain:
description:
- Specify the iptables chain to modify.
- This could be a user-defined chain or one of the standard iptables chains, like
C(INPUT), C(FORWARD), C(OUTPUT), C(PREROUTING), C(POSTROUTING), C(SECMARK) or C(CONNSECMARK).
type: str
protocol:
description:
- The protocol of the rule or of the packet to check.
- The specified protocol can be one of C(tcp), C(udp), C(udplite), C(icmp), C(esp),
C(ah), C(sctp) or the special keyword C(all), or it can be a numeric value,
representing one of these protocols or a different one.
- A protocol name from I(/etc/protocols) is also allowed.
- A C(!) argument before the protocol inverts the test.
- The number zero is equivalent to all.
- C(all) will match with all protocols and is taken as default when this option is omitted.
type: str
source:
description:
- Source specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
destination:
description:
- Destination specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
tcp_flags:
description:
- TCP flags specification.
- C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
type: dict
default: {}
version_added: "2.4"
suboptions:
flags:
description:
- List of flags you want to examine.
type: list
flags_set:
description:
- Flags to be set.
type: list
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property.
- The set of matches make up the condition under which a target is invoked.
- Matches are evaluated first to last if specified as an array and work in short-circuit
fashion, i.e. if one extension yields false, evaluation will stop.
type: list
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet matches it.
- The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below).
- If this option is omitted in a rule (and the goto parameter
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
type: str
gateway:
description:
- This specifies the IP address of host to send the cloned packets.
- This option is only valid when C(jump) is set to C(TEE).
type: str
version_added: "2.8"
log_prefix:
description:
- Specifies a log text for the rule. Only make sense with a LOG jump.
type: str
version_added: "2.5"
log_level:
description:
- Logging level according to the syslogd-defined priorities.
- The value can be strings or numbers from 1-8.
- This parameter is only applicable if C(jump) is set to C(LOG).
type: str
version_added: "2.8"
choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
goto:
description:
- This specifies that the processing should continue in a user specified chain.
- Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
type: str
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the C(INPUT), C(FORWARD) and C(PREROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins with
this name will match.
- If this option is omitted, any interface name will match.
type: str
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the C(FORWARD), C(OUTPUT) and C(POSTROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins
with this name will match.
- If this option is omitted, any interface name will match.
type: str
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets.
- Since there is no way to tell the source or destination ports of such
a packet (or ICMP type), such a packet will not match any rules which specify them.
- When the "!" argument precedes fragment argument, the rule will only match head fragments,
or unfragmented packets.
type: str
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during C(INSERT), C(APPEND), C(REPLACE) operations).
type: str
source_port:
description:
- Source port or port range specification.
- This can either be a service name or a port number.
- An inclusive range can also be specified, using the format C(first:last).
- If the first port is omitted, C(0) is assumed; if the last is omitted, C(65535) is assumed.
- If the first port is greater than the second one they will be swapped.
type: str
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped.
This is only valid if the rule also specifies one of the following
protocols: tcp, udp, dccp or sctp."
type: str
to_ports:
description:
- This specifies a destination port or range of ports to use, without
this, the destination port is never altered.
- This is only valid if the rule also specifies one of the protocol
C(tcp), C(udp), C(dccp) or C(sctp).
type: str
to_destination:
description:
- This specifies a destination address to use with C(DNAT).
- Without this, the destination address is never altered.
type: str
version_added: "2.1"
to_source:
description:
- This specifies a source address to use with C(SNAT).
- Without this, the source address is never altered.
type: str
version_added: "2.2"
syn:
description:
- This allows matching packets that have the SYN bit set and the ACK
and RST bits unset.
- When negated, this matches all packets with the RST or the ACK bits set.
type: str
choices: [ ignore, match, negate ]
default: ignore
version_added: "2.5"
set_dscp_mark:
description:
- This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value.
- Mutually exclusive with C(set_dscp_mark_class).
type: str
version_added: "2.1"
set_dscp_mark_class:
description:
- This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark.
- Mutually exclusive with C(set_dscp_mark).
type: str
version_added: "2.1"
comment:
description:
- This specifies a comment that will be added to the rule.
type: str
ctstate:
description:
- C(ctstate) is a list of the connection states to match in the conntrack module.
- Possible states are C(INVALID), C(NEW), C(ESTABLISHED), C(RELATED), C(UNTRACKED), C(SNAT), C(DNAT)
type: list
default: []
src_range:
description:
- Specifies the source IP range to match in the iprange module.
type: str
version_added: "2.8"
dst_range:
description:
- Specifies the destination IP range to match in the iprange module.
type: str
version_added: "2.8"
limit:
description:
- Specifies the maximum average number of matches to allow per second.
- The number can specify units explicitly, using `/second', `/minute',
`/hour' or `/day', or parts of them (so `5/second' is the same as
`5/s').
type: str
limit_burst:
description:
- Specifies the maximum burst before the above limit kicks in.
type: str
version_added: "2.1"
uid_owner:
description:
- Specifies the UID or username to use in match by owner rule.
- From Ansible 2.6 when the C(!) argument is prepended then the it inverts
the rule to apply instead to all users except that one specified.
type: str
version_added: "2.1"
gid_owner:
description:
- Specifies the GID or group to use in match by owner rule.
type: str
version_added: "2.9"
reject_with:
description:
- 'Specifies the error packet type to return while rejecting. It implies
"jump: REJECT"'
type: str
version_added: "2.1"
icmp_type:
description:
- This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
command 'iptables -p icmp -h'
type: str
version_added: "2.2"
flush:
description:
- Flushes the specified table and chain of all rules.
- If no chain is specified then the entire table is purged.
- Ignores all other parameters.
type: bool
version_added: "2.2"
policy:
description:
- Set the policy for the chain to the given target.
- Only built-in chains can have policies.
- This parameter requires the C(chain) parameter.
- Ignores all other parameters.
type: str
choices: [ ACCEPT, DROP, QUEUE, RETURN ]
version_added: "2.2"
wait:
description:
- Wait N seconds for the xtables lock to prevent multiple instances of
the program from running concurrently.
type: str
version_added: "2.10"
'''
EXAMPLES = r'''
- name: Block specific IP
iptables:
chain: INPUT
source: 8.8.8.8
jump: DROP
become: yes
- name: Forward port 80 to 8600
iptables:
table: nat
chain: PREROUTING
in_interface: eth0
protocol: tcp
match: tcp
destination_port: 80
jump: REDIRECT
to_ports: 8600
comment: Redirect web traffic to port 8600
become: yes
- name: Allow related and established connections
iptables:
chain: INPUT
ctstate: ESTABLISHED,RELATED
jump: ACCEPT
become: yes
- name: Allow new incoming SYN packets on TCP port 22 (SSH).
iptables:
chain: INPUT
protocol: tcp
destination_port: 22
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new SSH connections.
- name: Match on IP ranges
iptables:
chain: FORWARD
src_range: 192.168.1.100-192.168.1.199
dst_range: 10.0.0.1-10.0.0.50
jump: ACCEPT
- name: Tag all outbound tcp packets with DSCP mark 8
iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark: 8
protocol: tcp
- name: Tag all outbound tcp packets with DSCP DiffServ class CS1
iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark_class: CS1
protocol: tcp
- name: Insert a rule on line 5
iptables:
chain: INPUT
protocol: tcp
destination_port: 8080
jump: ACCEPT
action: insert
rule_num: 5
- name: Set the policy for the INPUT chain to DROP
iptables:
chain: INPUT
policy: DROP
- name: Reject tcp with tcp-reset
iptables:
chain: INPUT
protocol: tcp
reject_with: tcp-reset
ip_version: ipv4
- name: Set tcp flags
iptables:
chain: OUTPUT
jump: DROP
protocol: tcp
tcp_flags:
flags: ALL
flags_set:
- ACK
- RST
- SYN
- FIN
- name: iptables flush filter
iptables:
chain: "{{ item }}"
flush: yes
with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
- name: iptables flush nat
iptables:
table: nat
chain: '{{ item }}'
flush: yes
with_items: [ 'INPUT', 'OUTPUT', 'PREROUTING', 'POSTROUTING' ]
- name: Log packets arriving into an user-defined chain
iptables:
chain: LOGGING
action: append
state: present
limit: 2/second
limit_burst: 20
log_prefix: "IPTABLES:INFO: "
log_level: info
'''
import re
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
IPTABLES_WAIT_SUPPORT_ADDED = '1.4.20'
IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED = '1.6.0'
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
ICMP_TYPE_OPTIONS = dict(
ipv4='--icmp-type',
ipv6='--icmpv6-type',
)
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
if param[0] == '!':
rule.extend(['!', flag, param[1:]])
else:
rule.extend([flag, param])
def append_tcp_flags(rule, param, flag):
if param:
if 'flags' in param and 'flags_set' in param:
rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
def append_match_flag(rule, param, flag, negatable):
if param == 'match':
rule.extend([flag])
elif negatable and param == 'negate':
rule.extend(['!', flag])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def append_wait(rule, param, flag):
if param:
rule.extend([flag, param])
def construct_rule(params):
rule = []
append_wait(rule, params['wait'], '-w')
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
append_param(rule, params['jump'], '-j', False)
if params.get('jump') and params['jump'].lower() == 'tee':
append_param(rule, params['gateway'], '--gateway', False)
append_param(rule, params['log_prefix'], '--log-prefix', False)
append_param(rule, params['log_level'], '--log-level', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
append_param(
rule,
params['set_dscp_mark_class'],
'--set-dscp-class',
False)
append_match_flag(rule, params['syn'], '--syn', True)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
if 'conntrack' in params['match']:
append_csv(rule, params['ctstate'], '--ctstate')
elif 'state' in params['match']:
append_csv(rule, params['ctstate'], '--state')
elif params['ctstate']:
append_match(rule, params['ctstate'], 'conntrack')
append_csv(rule, params['ctstate'], '--ctstate')
if 'iprange' in params['match']:
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
elif params['src_range'] or params['dst_range']:
append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
append_param(rule, params['uid_owner'], '--uid-owner', False)
append_match(rule, params['gid_owner'], 'owner')
append_match_flag(rule, params['gid_owner'], '--gid-owner', True)
append_param(rule, params['gid_owner'], '--gid-owner', False)
if params['jump'] is None:
append_jump(rule, params['reject_with'], 'REJECT')
append_param(rule, params['reject_with'], '--reject-with', False)
append_param(
rule,
params['icmp_type'],
ICMP_TYPE_OPTIONS[params['ip_version']],
False)
return rule
def push_arguments(iptables_path, action, params, make_rule=True):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
if action == '-I' and params['rule_num']:
cmd.extend([params['rule_num']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
def check_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, _, __ = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def insert_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-I', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def flush_table(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def set_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
cmd.append(params['policy'])
module.run_command(cmd, check_rc=True)
def get_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-L', params)
rc, out, _ = module.run_command(cmd, check_rc=True)
chain_header = out.split("\n")[0]
result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
if result:
return result.group(1)
return None
def get_iptables_version(iptables_path, module):
cmd = [iptables_path, '--version']
rc, out, _ = module.run_command(cmd, check_rc=True)
return out.split('v')[1].rstrip('\n')
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(type='str', default='present', choices=['absent', 'present']),
action=dict(type='str', default='append', choices=['append', 'insert']),
ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
chain=dict(type='str'),
rule_num=dict(type='str'),
protocol=dict(type='str'),
wait=dict(type='str'),
source=dict(type='str'),
to_source=dict(type='str'),
destination=dict(type='str'),
to_destination=dict(type='str'),
match=dict(type='list', default=[]),
tcp_flags=dict(type='dict',
options=dict(
flags=dict(type='list'),
flags_set=dict(type='list'))
),
jump=dict(type='str'),
gateway=dict(type='str'),
log_prefix=dict(type='str'),
log_level=dict(type='str',
choices=['0', '1', '2', '3', '4', '5', '6', '7',
'emerg', 'alert', 'crit', 'error',
'warning', 'notice', 'info', 'debug'],
default=None,
),
goto=dict(type='str'),
in_interface=dict(type='str'),
out_interface=dict(type='str'),
fragment=dict(type='str'),
set_counters=dict(type='str'),
source_port=dict(type='str'),
destination_port=dict(type='str'),
to_ports=dict(type='str'),
set_dscp_mark=dict(type='str'),
set_dscp_mark_class=dict(type='str'),
comment=dict(type='str'),
ctstate=dict(type='list', default=[]),
src_range=dict(type='str'),
dst_range=dict(type='str'),
limit=dict(type='str'),
limit_burst=dict(type='str'),
uid_owner=dict(type='str'),
gid_owner=dict(type='str'),
reject_with=dict(type='str'),
icmp_type=dict(type='str'),
syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
flush=dict(type='bool', default=False),
policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
['flush', 'policy'],
),
required_if=[
['jump', 'TEE', ['gateway']],
['jump', 'tee', ['gateway']],
]
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
flush=module.params['flush'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
# Check if chain option is required
if args['flush'] is False and args['chain'] is None:
module.fail_json(msg="Either chain or flush parameter must be specified.")
if module.params.get('log_prefix', None) or module.params.get('log_level', None):
if module.params['jump'] is None:
module.params['jump'] = 'LOG'
elif module.params['jump'] != 'LOG':
module.fail_json(msg="Logging options can only be used with the LOG jump target.")
# Check if wait option is supported
iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
module.params['wait'] = ''
else:
module.params['wait'] = None
# Flush the table
if args['flush'] is True:
args['changed'] = True
if not module.check_mode:
flush_table(iptables_path, module, module.params)
# Set the policy
elif module.params['policy']:
current_policy = get_chain_policy(iptables_path, module, module.params)
if not current_policy:
module.fail_json(msg='Can\'t detect current policy')
changed = current_policy != module.params['policy']
args['changed'] = changed
if changed and not module.check_mode:
set_chain_policy(iptables_path, module, module.params)
else:
insert = (module.params['action'] == 'insert')
rule_is_present = check_present(iptables_path, module, module.params)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
if args['changed'] is False:
# Target is already up to date
module.exit_json(**args)
# Check only; don't modify
if not module.check_mode:
if should_be_present:
if insert:
insert_rule(iptables_path, module, module.params)
else:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
if __name__ == '__main__':
main()
|
gpl-3.0
|
celebdor/python-midonetclient
|
src/midonetclient/topology/hosts.py
|
2
|
1390
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2015 Midokura Europe SARL, All Rights Reserved.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import uuid
from ..protobuf import utils
from . import get as base_get
from . import get_all as base_get_all
from . import TYPE
from . import msg_type_map
get = functools.partial(base_get, kind=TYPE['HOST'])
get_all = functools.partial(base_get_all, kind=TYPE['HOST'])
def get_dict(sock, obj_uuid):
"""Returns the Host dict that corresponds to the specified uuid string"""
return utils.proto_to_dict(
get(sock, obj_uuid=uuid.UUID(hex=obj_uuid)).update.host,
message_type_map=msg_type_map)
def get_all_dict(sock):
for response in get_all(sock):
yield utils.proto_to_dict(response.update.host,
message_type_map=msg_type_map)
|
apache-2.0
|
chillbu/cblib
|
thirdparty/protobuf-2.5.0/python/google/protobuf/service_reflection.py
|
601
|
11010
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains metaclasses used to create protocol service and service stub
classes from ServiceDescriptor objects at runtime.
The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to
inject all useful functionality into the classes output by the protocol
compiler at compile-time.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class GeneratedServiceType(type):
"""Metaclass for service classes created at runtime from ServiceDescriptors.
Implementations for all methods described in the Service class are added here
by this class. We also create properties to allow getting/setting all fields
in the protocol message.
The protocol compiler currently uses this metaclass to create protocol service
classes at runtime. Clients can also manually create their own classes at
runtime, as in this example:
mydescriptor = ServiceDescriptor(.....)
class MyProtoService(service.Service):
__metaclass__ = GeneratedServiceType
DESCRIPTOR = mydescriptor
myservice_instance = MyProtoService()
...
"""
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
"""Creates a message service class.
Args:
name: Name of the class (ignored, but required by the metaclass
protocol).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service class is subclassed.
if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY]
service_builder = _ServiceBuilder(descriptor)
service_builder.BuildService(cls)
class GeneratedServiceStubType(GeneratedServiceType):
"""Metaclass for service stubs created at runtime from ServiceDescriptors.
This class has similar responsibilities as GeneratedServiceType, except that
it creates the service stub classes.
"""
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
"""Creates a message service stub class.
Args:
name: Name of the class (ignored, here).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service stub is subclassed.
if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]
service_stub_builder = _ServiceStubBuilder(descriptor)
service_stub_builder.BuildServiceStub(cls)
class _ServiceBuilder(object):
"""This class constructs a protocol service class using a service descriptor.
Given a service descriptor, this class constructs a class that represents
the specified service descriptor. One service builder instance constructs
exactly one service class. That means all instances of that class share the
same builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
service class.
"""
self.descriptor = service_descriptor
def BuildService(self, cls):
"""Constructs the service class.
Args:
cls: The class that will be constructed.
"""
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
cls.GetRequestClass = self._GetRequestClass
cls.GetResponseClass = self._GetResponseClass
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateNonImplementedMethod(method))
def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
"""Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback)
def _GetRequestClass(self, method_descriptor):
"""Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class
def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class
def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
"""
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback))
def _NonImplementedMethod(self, method_name, rpc_controller, callback):
"""The body of all methods in the generated service class.
Args:
method_name: Name of the method being executed.
rpc_controller: RPC controller used to execute this method.
callback: A callback which will be invoked when the method finishes.
"""
rpc_controller.SetFailed('Method %s not implemented.' % method_name)
callback(None)
class _ServiceStubBuilder(object):
"""Constructs a protocol service stub class using a service descriptor.
Given a service descriptor, this class constructs a suitable stub class.
A stub is just a type-safe wrapper around an RpcChannel which emulates a
local implementation of the service.
One service stub builder instance constructs exactly one class. It means all
instances of that class share the same service stub builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service stub class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
stub class.
"""
self.descriptor = service_descriptor
def BuildServiceStub(self, cls):
"""Constructs the stub class.
Args:
cls: The class that will be constructed.
"""
def _ServiceStubInit(stub, rpc_channel):
stub.rpc_channel = rpc_channel
self.cls = cls
cls.__init__ = _ServiceStubInit
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateStubMethod(method))
def _GenerateStubMethod(self, method):
return (lambda inst, rpc_controller, request, callback=None:
self._StubMethod(inst, method, rpc_controller, request, callback))
def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
"""
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback)
|
apache-2.0
|
adamchainz/ansible
|
lib/ansible/plugins/test/files.py
|
62
|
1339
|
# (c) 2015, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os.path import isdir, isfile, isabs, exists, lexists, islink, samefile, ismount
from ansible import errors
class TestModule(object):
''' Ansible file jinja2 tests '''
def tests(self):
return {
# file testing
'is_dir': isdir,
'is_file': isfile,
'is_link': islink,
'exists': exists,
'link_exists': lexists,
# path testing
'is_abs': isabs,
'is_same_file': samefile,
'is_mount': ismount,
}
|
gpl-3.0
|
zhoulingjun/django
|
tests/gis_tests/geoapp/test_serializers.py
|
245
|
3731
|
from __future__ import unicode_literals
import json
from django.contrib.gis.geos import LinearRing, Point, Polygon
from django.core import serializers
from django.test import TestCase, mock, skipUnlessDBFeature
from django.utils import six
from .models import City, MultiFields, PennsylvaniaCity
@skipUnlessDBFeature("gis_enabled")
class GeoJSONSerializerTests(TestCase):
fixtures = ['initial']
def test_builtin_serializers(self):
"""
'geojson' should be listed in available serializers.
"""
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('geojson', all_formats),
self.assertIn('geojson', public_formats)
def test_serialization_base(self):
geojson = serializers.serialize('geojson', City.objects.all().order_by('name'))
try:
geodata = json.loads(geojson)
except Exception:
self.fail("Serialized output is not valid JSON")
self.assertEqual(len(geodata['features']), len(City.objects.all()))
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Point')
self.assertEqual(geodata['features'][0]['properties']['name'], 'Chicago')
def test_geometry_field_option(self):
"""
When a model has several geometry fields, the 'geometry_field' option
can be used to specify the field to use as the 'geometry' key.
"""
MultiFields.objects.create(
city=City.objects.first(), name='Name', point=Point(5, 23),
poly=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))
geojson = serializers.serialize('geojson', MultiFields.objects.all())
geodata = json.loads(geojson)
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Point')
geojson = serializers.serialize('geojson', MultiFields.objects.all(),
geometry_field='poly')
geodata = json.loads(geojson)
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Polygon')
def test_fields_option(self):
"""
The fields option allows to define a subset of fields to be present in
the 'properties' of the generated output.
"""
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
geojson = serializers.serialize('geojson', PennsylvaniaCity.objects.all(),
fields=('county', 'point'))
geodata = json.loads(geojson)
self.assertIn('county', geodata['features'][0]['properties'])
self.assertNotIn('founded', geodata['features'][0]['properties'])
def test_srid_option(self):
geojson = serializers.serialize('geojson', City.objects.all().order_by('name'), srid=2847)
geodata = json.loads(geojson)
self.assertEqual(
[int(c) for c in geodata['features'][0]['geometry']['coordinates']],
[1564802, 5613214])
@mock.patch('django.contrib.gis.serializers.geojson.HAS_GDAL', False)
def test_without_gdal(self):
# Without coordinate transformation, the serialization should succeed:
serializers.serialize('geojson', City.objects.all())
with six.assertRaisesRegex(self, serializers.base.SerializationError, '.*GDAL is not installed'):
# Coordinate transformations need GDAL
serializers.serialize('geojson', City.objects.all(), srid=2847)
def test_deserialization_exception(self):
"""
GeoJSON cannot be deserialized.
"""
with self.assertRaises(serializers.base.SerializerDoesNotExist):
serializers.deserialize('geojson', '{}')
|
bsd-3-clause
|
LittlePeng/redis-monitor
|
src/api/util/RDP.py
|
14
|
1160
|
"""
The Ramer-Douglas-Peucker algorithm roughly ported from the pseudo-code provided
by http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
"""
from math import sqrt
def distance(a, b):
return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def point_line_distance(point, start, end):
if (start == end):
return distance(point, start)
else:
n = abs(
(end[0] - start[0]) * (start[1] - point[1]) - (start[0] - point[0]) * (end[1] - start[1])
)
d = sqrt(
(end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
)
return n / d
def rdp(points, epsilon):
"""
Reduces a series of points to a simplified version that loses detail, but
maintains the general shape of the series.
"""
dmax = 0.0
index = 0
for i in range(1, len(points) - 1):
d = point_line_distance(points[i], points[0], points[-1])
if d > dmax:
index = i
dmax = d
if dmax >= epsilon:
results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)
else:
results = [points[0], points[-1]]
return results
|
mit
|
SKIRT/PTS
|
modeling/maps/selector.py
|
1
|
26666
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.maps.selector Contains the ComponentMapsSelector class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ...core.basics.log import log
from .selectioncomponent import MapsSelectionComponent
from ...core.tools.utils import lazyproperty
from ...core.tools import filesystem as fs
from ...core.tools import numbers
from ...core.tools import sequences
from ...magic.core.mask import Mask
from ...core.filter.filter import parse_filter
from ...core.tools.parsing import real
from ...magic.core.image import Image
from ...magic.tools import colours
from ...magic.basics.mask import Mask as oldMask
from ...core.basics.containers import ordered_by_key
from ...core.tools import formatting as fmt
from ...core.tools.serialization import write_dict
# -----------------------------------------------------------------
class ComponentMapsSelector(MapsSelectionComponent):
"""
This class...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param interactive:
:return:
"""
# Call the constructor of the base class
super(ComponentMapsSelector, self).__init__(*args, **kwargs)
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Auto-select
if self.config.auto: self.auto_select()
# 3. Prompt
self.prompt()
# 4. Writing
self.write()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ComponentMapsSelector, self).setup(**kwargs)
# Set random
if self.config.random: self.config.random_old = self.config.random_young = self.config.random_ionizing = self.config.random_dust = self.config.random
# Set all
if self.config.all: self.config.all_old = self.config.all_young = self.config.all_ionizing = self.config.all_dust = True
# Make selections
self.old_selection = sequences.make_selection(self.old_map_names, self.config.old, self.config.not_old, nrandom=self.config.random_old, all=self.config.all_old, indices=self.config.old_indices, not_indices=self.config.not_old_indices)
self.young_selection = sequences.make_selection(self.young_map_names, self.config.young, self.config.not_young, nrandom=self.config.random_young, all=self.config.all_young, indices=self.config.young_indices, not_indices=self.config.not_young_indices)
self.ionizing_selection = sequences.make_selection(self.ionizing_map_names, self.config.ionizing, self.config.not_ionizing, nrandom=self.config.random_ionizing, all=self.config.all_ionizing, indices=self.config.ionizing_indices, not_indices=self.config.not_ionizing_indices)
self.dust_selection = sequences.make_selection(self.dust_map_names, self.config.dust, self.config.not_dust, nrandom=self.config.random_dust, all=self.config.all_dust, indices=self.config.dust_indices, not_indices=self.config.not_dust_indices)
# -----------------------------------------------------------------
@lazyproperty
def old_selection_origins(self):
"""
This function ...
:return:
"""
origins = set()
for name in self.old_selection: origins.update(self.old_map_origins[name])
return list(origins)
# -----------------------------------------------------------------
@lazyproperty
def young_selection_origins(self):
"""
This function ...
:return:
"""
origins = set()
for name in self.young_selection: origins.update(self.young_map_origins[name])
return list(origins)
# -----------------------------------------------------------------
@lazyproperty
def ionizing_selection_origins(self):
"""
This function ...
:return:
"""
origins = set()
for name in self.ionizing_selection: origins.update(self.ionizing_map_origins[name])
return list(origins)
# -----------------------------------------------------------------
@lazyproperty
def dust_selection_origins(self):
"""
This function ...
:return:
"""
origins = set()
for name in self.dust_selection: origins.update(self.dust_map_origins[name])
return list(origins)
# -----------------------------------------------------------------
@lazyproperty
def selection_origins(self):
"""
This function ...
:return:
"""
origins = set()
origins.update(self.old_selection_origins)
origins.update(self.young_selection_origins)
origins.update(self.ionizing_selection_origins)
origins.update(self.dust_selection_origins)
return list(origins)
# -----------------------------------------------------------------
def auto_select(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate maps ...")
# Make selection
old_name, young_name, ionizing_name, dust_name = self.auto_select_maps()
# Show selections
log.info("Selected the following maps automaticaly:")
log.info("")
log.info(" - " + fmt.bold + "old stellar disk: " + fmt.reset + old_name)
log.info(" - " + fmt.bold + "young stellar disk: " + fmt.reset + young_name)
log.info(" - " + fmt.bold + "ionizing stellar disk: " + fmt.reset + ionizing_name)
log.info(" - " + fmt.bold + "dust disk: " + fmt.reset + dust_name)
log.info("")
# Set selections
self.old_selection = [old_name]
self.young_selection = [young_name]
self.ionizing_selection = [ionizing_name]
self.dust_selection = [dust_name]
# -----------------------------------------------------------------
def auto_select_maps(self):
"""
This function ...
:return:
"""
# sSFR
ssfr_method, ssfr_name = self.auto_select_ssfr_map()
# TIR
tir_method, tir_name = self.auto_select_tir_map()
# Attenuation
attenuation_method, attenuation_name = self.auto_select_attenuation_map(tir_method, tir_name, ssfr_method, ssfr_name)
# Old
old_method, old_name = self.auto_select_old_map()
# Dust
dust_method, dust_name = self.auto_select_dust_map(attenuation_method, attenuation_name)
# Young
young_name = self.auto_select_young_map(attenuation_method, attenuation_name, old_name)
# Hot dust
hot_dust_method, hot_dust_name = self.auto_select_hot_dust_map(old_name)
# Ionizing
ionizing_name = self.auto_select_ionizing_map(hot_dust_name)
# Make full names
#full_old_name = old_method + "_" + old_name
full_old_name = old_name # because we only consider disk maps
full_young_name = young_name
full_ionizing_name = ionizing_name
full_dust_name = dust_method + "_" + dust_name
# Return the full names
return full_old_name, full_young_name, full_ionizing_name, full_dust_name
# -----------------------------------------------------------------
def auto_select_ssfr_map(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate sSFR map ...")
preferred_method = "colours"
preferred_colours = ["FUV-r", "FUV-i", "FUV-H", "FUV-g"]
# Select sSFR
if not self.has_ssfr_maps: raise IOError("No sSFR maps are present")
if not self.ssfr_has_methods: raise IOError("Place the contents of the sSFR maps inside a 'colours' directory")
# Get sSFR method names
ssfr_methods = self.ssfr_map_methods
if preferred_method not in ssfr_methods: raise RuntimeError("Cannot make automatic choice when sSFR maps are not based on colours")
# Get the sSFR colour map names
map_names = self.ssfr_map_names_for_method("colours")
# Select the preferred sSFR colour map name
ssfr_map_name = None
# Loop over the preferred colours
for colour in preferred_colours:
map_name = find_matching_colour_map_name(colour, map_names)
if map_name is not None:
ssfr_map_name = map_name
break
# Check
if ssfr_map_name is None: raise RuntimeError("Cannot make automatic choice: none of the expected sSFR colour maps are present")
# Return method and map name
return preferred_method, ssfr_map_name
# -----------------------------------------------------------------
def auto_select_tir_map(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate TIR map ...")
# Preferred methods
preferred_methods = ["multi", "single"]
# Check
if not self.has_tir_maps: raise IOError("No TIR maps are present")
if not self.tir_has_methods: raise IOError("No methods for the TIR maps")
# Get TIR method names
tir_methods = self.tir_map_methods
# The best combination
best = None
best_nfilters = None
best_has_spire = None
# Loop over the preferred methods
for method in preferred_methods:
# No maps for this method
if method not in tir_methods: continue
# Get the map names for this method
map_names = self.tir_map_names_for_method(method)
# Loop over the maps
for name in map_names:
# Get the filters (origins) for this map
filters = self.tir_origins[method][name]
nfilters = len(filters)
has_spire = sequences.contains_any(filters, self.spire_filters)
if best is None:
best = (method, name)
best_nfilters = nfilters
best_has_spire = has_spire
elif nfilters > best_nfilters:
best = (method, name)
best_nfilters = nfilters
best_has_spire = has_spire
elif nfilters == best_nfilters and best_has_spire and not has_spire:
best = (method, name)
best_nfilters = nfilters
best_has_spire = has_spire
# Return the best map
return best
# -----------------------------------------------------------------
def auto_select_attenuation_map(self, tir_method, tir_name, ssfr_method, ssfr_name):
"""
This function ...
:param tir_method:
:param tir_name:
:param ssfr_method:
:param ssfr_name:
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate FUV attenuation map ...")
# Check
if not self.has_attenuation_maps: raise IOError("No atttenuation maps are present")
if not self.attenuation_has_methods: raise IOError("No methods for the attenuation maps")
# Get attenuation method names
attenuation_methods = self.attenuation_map_methods
# Prefer Cortese, Buat otherwise
if "cortese" in attenuation_methods: return self.auto_select_cortese_attenuation_map(tir_method, tir_name, ssfr_method, ssfr_name)
elif "buat" in attenuation_methods: return self.auto_select_buat_attenuation_map(tir_method, tir_name)
else: raise ValueError("Cannot find a proper attenuation map method")
# -----------------------------------------------------------------
def auto_select_cortese_attenuation_map(self, tir_method, tir_name, ssfr_method, ssfr_name):
"""
This function ...
:param tir_method:
:param tir_name:
:param ssfr_method:
:param ssfr_name:
:return:
"""
# Inform the user
log.debug("Automatically selecting appropriate Cortese attenuation map ...")
# Loop over the map names
map_name = None
for name in self.attenuation_map_names_for_method("cortese"):
if tir_name in name and ssfr_name in name:
map_name = name
break
# Not found?
if map_name is None: raise RuntimeError("Something went wrong finding the required map")
# Return
return "cortese", map_name
# -----------------------------------------------------------------
def auto_select_buat_attenuation_map(self, tir_method, tir_name):
"""
This function ...
:param tir_method:
:param tir_name:
:return:
"""
# Inform the user
log.debug("Automatically selecting appropriate Buat attenuation map ...")
preferred_filters = ["FUV", "NUV"]
map_name = None
# Loop over the preferred filters
for uv_filter_name in preferred_filters:
# Loop over the maps for this UV filter
for name in self.attenuation_map_names_for_method("buat"):
if not name.startswith(uv_filter_name): continue
if tir_name in name:
map_name = name
break
# Not found?
if map_name is None: raise RuntimeError("Something went wrong finding the required map")
# Return
return "buat", map_name
# -----------------------------------------------------------------
def auto_select_old_map(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate old stellar map ...")
preferred_filters = ["IRAC I1", "IRAC I2", "2MASS H", "2MASS K", "WISE W1", "WISE W2"]
# Check
if not self.has_old_maps: raise IOError("No old stellar maps are present")
if not self.old_has_methods: raise IOError("No methods for the old stellar maps")
method_name = "disk"
if method_name not in self.old_map_methods: raise ValueError("'disk' method is not present among the old stellar maps")
# Loop over the preferred filters
map_name = None
for filter_name in preferred_filters:
fltr = parse_filter(filter_name)
# Loop over the maps
for name in self.old_map_names_for_method(method_name):
map_filter = parse_filter(name)
if map_filter == fltr:
map_name = name
break
# No map?
if map_name is None: raise ValueError("No appropriate old stellar disk map was found")
# Return
return method_name, map_name
# -----------------------------------------------------------------
def auto_select_dust_map(self, attenuation_method, attenuation_name):
"""
Thisfunction ...
:param attenuation_method:
:param attenuation_name:
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate dust map ...")
preferred_method = "attenuation"
# Check
if not self.has_dust_maps: raise IOError("No dust maps are present")
if not self.dust_has_methods: raise IOError("No methods for the dust maps")
if preferred_method not in self.dust_map_methods: raise ValueError("'attenuation' method is not present among the dust maps")
# Loop over the attenuation dust maps
map_name = None
for name in self.dust_map_names_for_method(preferred_method):
# Check whether the right one
if attenuation_method in name and attenuation_name in name:
map_name = name
break
# Check if found
if map_name is None: raise RuntimeError("Appropriate dust map not found")
# Return
return preferred_method, map_name
# -----------------------------------------------------------------
def auto_select_young_map(self, attenuation_method, attenuation_name, old_name):
"""
This function ...
:param attenuation_method:
:param attenuation_name:
:param old_name:
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate young stellar map ...")
# Check
if not self.has_young_maps: raise IOError("No young stellar maps are present")
if self.young_has_methods: raise IOError("Didn't expect different methods for the young stellar maps")
# Get the paths to the young stellar maps
paths = self.get_young_map_paths()
# Initialize dictionary to store the number of negatives for each factor
nnegatives_dict = dict()
names_dict = dict()
# Loop over the young stellar maps made with the specific attenuation map
# AND WITH THE SPECIFIC OLD STELLAR FILTER
for name in self.young_map_names_no_methods:
if not (attenuation_method in name and attenuation_name in name): continue
if not old_name in name: continue
# Get the factor
factor = real(name.split("__")[-1])
# Open the map to get the number of negatives in a central ellipse
map_path = paths[name]
image = Image.from_file(map_path)
if "negatives" not in image.masks:
log.warning("Negatives mask not present in the '" + name + "' young stellar map image: skipping ...")
continue
# Count the number of negatives
mask = image.masks["negatives"]
if isinstance(mask, oldMask): mask = Mask(mask, wcs=image.wcs) # Fix type
nnegatives = mask.relative_nmasked_in(self.central_ellipse)
# Add to dictionary
nnegatives_dict[factor] = nnegatives
names_dict[factor] = name
# Check if anything is found
if len(nnegatives_dict) == 0: raise RuntimeError("No appropriate young stellar maps were found")
# If only one is found
if len(nnegatives_dict) == 1:
log.warning("Map was found for only one factor")
factor = names_dict.keys()[0]
return names_dict[factor]
# Sort
nnegatives_dict = ordered_by_key(nnegatives_dict)
# Find the factor
factor = find_factor_max_nnegatives(nnegatives_dict, self.config.young_max_nnegatives)
# Get the corresponding map name
map_name = names_dict[factor]
# Return the map name
return map_name
# -----------------------------------------------------------------
def auto_select_hot_dust_map(self, old_name):
"""
This function ...
:param old_name:
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate hot dust map ...")
method_name = "hot"
# Check
if not self.has_dust_maps: raise IOError("No dust maps are present")
if not self.dust_has_methods: raise IOError("No methods for the dust maps")
# No hot dust maps
if method_name not in self.dust_map_methods:
log.warning("No hot dust maps are present")
return None, None
# Get the paths to the hot dust maps
paths = self.get_dust_map_paths(flatten=False, method=method_name)
# Iinitialize dictionary to store the number of negatives for each factor
nnegatives_dict = dict()
names_dict = dict()
# Loop over the hot dust maps correct with the same old stellar filter as the old stellar disk map
for name in self.dust_map_names_for_method(method_name):
if not name.startswith(old_name): continue
# Get the factor
factor = real(name.split("__")[1])
# Open the map to get the number of negatives in a central ellipse
map_path = paths[name]
image = Image.from_file(map_path)
if "negatives" not in image.masks:
log.warning("Negatives mask not present in the '" + name + "' hot dust map image: skipping ...")
continue
# Count the number of negatives
mask = image.masks["negatives"]
if isinstance(mask, oldMask): mask = Mask(mask, wcs=image.wcs) # Fix type
nnegatives = mask.relative_nmasked_in(self.central_ellipse)
# Add to dictionary
nnegatives_dict[factor] = nnegatives
names_dict[factor] = name
# Check
if len(nnegatives_dict) == 0: raise RuntimeError("No appropriate hot dust maps were found")
# Only one is found
if len(nnegatives_dict) == 1:
log.warning("Maps was found for only one factor")
factor = names_dict.keys()[0]
return method_name, names_dict[factor]
# Sort
nnegatives_dict = ordered_by_key(nnegatives_dict)
# Find the factor
factor = find_factor_max_nnegatives(nnegatives_dict, self.config.hot_dust_max_nnegatives)
# Get the corresponding map name
map_name = names_dict[factor]
# Return
return method_name, map_name
# -----------------------------------------------------------------
def auto_select_ionizing_map(self, hot_dust_name):
"""
This function ...
:param hot_dust_name:
:return:
"""
# Inform the user
log.info("Automatically selecting appropriate ionizing stellar map ...")
# Check
if not self.has_ionizing_maps: raise IOError("No ionizing stellar maps are present")
if self.ionizing_has_methods: raise IOError("Didn't expect different methods for the ionizing stellar maps")
# No hot dust maps could be made
if hot_dust_name is None:
name = "halpha"
if name not in self.ionizing_map_names_no_methods: raise RuntimeError("Could not find appropriate ionizing stellar map: no hot dust and no halpha map")
else: return name
# Hot dust map was found
else:
# Loop over the ionizing stellar map names
map_name = None
for name in self.ionizing_map_names_no_methods:
if hot_dust_name in name:
map_name = name
break
# Check
if map_name is None: raise RuntimeError("Could not find the appropriate ionizing stellar map")
# Return
return map_name
# -----------------------------------------------------------------
def prompt(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Prompting for user input ...")
# Old
if not self.has_old_selection: self.prompt_old()
# Young
if not self.has_young_selection: self.prompt_young()
# Ionizing
if not self.has_ionizing_selection: self.prompt_ionizing()
# Dust
if not self.has_dust_selection: self.prompt_dust()
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the selection
self.write_selection()
# -----------------------------------------------------------------
def write_selection(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the selection ...")
# Make single selection dictionary
selection = dict()
# Set the selected map names
selection["old"] = self.old_selection
selection["young"] = self.young_selection
selection["ionizing"] = self.ionizing_selection
selection["dust"] = self.dust_selection
# Determine path for the selection file
current_indices = fs.files_in_path(self.maps_components_path, extension="dat", returns="name", startswith="selection", convert=int, convert_split_pattern="_", convert_split_index=1)
index = numbers.lowest_missing_integer(current_indices)
selection_path = fs.join(self.maps_components_path, "selection_" + str(index) + ".dat")
# Write the selection
write_dict(selection, selection_path)
# -----------------------------------------------------------------
@property
def maps_sub_path(self):
"""
This function ...
:return:
"""
return None
# -----------------------------------------------------------------
def find_matching_colour_map_name(colour, map_names):
"""
This function ...
:param colour:
:param map_names:
:return:
"""
# Loop over the map names
for name in map_names:
# Select
if colours.same_colour(colour, name): return name
# Nothing found
return None
# -----------------------------------------------------------------
def find_factor_max_nnegatives(nnegatives, max_nnegatives):
"""
This function ...
:param nnegatives:
:param max_nnegatives:
:return:
"""
# Loop over the factors in reverse order
for factor in reversed(nnegatives.keys()):
# Get the number of negatives
negatives = nnegatives[factor]
# Succes?
if negatives < max_nnegatives: return factor
# Error
raise ValueError("None of the maps have a relative number of negatives lower than the limit of " + str(max_nnegatives*100) + "%")
# -----------------------------------------------------------------
|
agpl-3.0
|
virtualopensystems/nova
|
nova/openstack/common/report/views/text/header.py
|
79
|
1534
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Text Views With Headers
This package defines several text views with headers
"""
class HeaderView(object):
"""A Text View With a Header
This view simply serializes the model and places the given
header on top.
:param header: the header (can be anything on which str() can be called)
"""
def __init__(self, header):
self.header = header
def __call__(self, model):
return str(self.header) + "\n" + str(model)
class TitledView(HeaderView):
"""A Text View With a Title
This view simply serializes the model, and places
a preformatted header containing the given title
text on top. The title text can be up to 64 characters
long.
:param str title: the title of the view
"""
FORMAT_STR = ('=' * 72) + "\n===={0: ^64}====\n" + ('=' * 72)
def __init__(self, title):
super(TitledView, self).__init__(self.FORMAT_STR.format(title))
|
apache-2.0
|
cherrydocker/minos
|
supervisor/superlance/crashmailbatch.py
|
5
|
4173
|
#!/usr/bin/env python -u
##############################################################################
#
# Copyright (c) 2007 Agendaless Consulting and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the BSD-like license at
# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
# this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
# FITNESS FOR A PARTICULAR PURPOSE
#
##############################################################################
# A event listener meant to be subscribed to PROCESS_STATE_CHANGE
# events. It will send mail when processes that are children of
# supervisord transition unexpectedly to the EXITED state.
# A supervisor config snippet that tells supervisor to use this script
# as a listener is below.
#
# [eventlistener:crashmailbatch]
# command=python crashmailbatch --toEmail=you@bar.com --fromEmail=me@bar.com
# events=PROCESS_STATE,TICK_60
doc = """\
crashmailbatch.py [--interval=<batch interval in minutes>]
[--toEmail=<email address>]
[--fromEmail=<email address>]
[--subject=<email subject>]
[--smtpHost=<hostname or address>]
Options:
--interval - batch cycle length (in minutes). The default is 1.0 minute.
This means that all events in each cycle are batched together
and sent as a single email
--toEmail - the email address to send alerts to
--fromEmail - the email address to send alerts from
--password - the password of the from mail user
--subject - the email subject line
--smtpHost - the SMTP server's hostname or address (defaults to 'localhost')
--supervisordPort - the supervisord server's listening port
A sample invocation:
crashmailbatch.py --toEmail="you@bar.com" --fromEmail="me@bar.com"
"""
import ConfigParser
import os
import socket
from supervisor import childutils
from superlance.process_state_email_monitor import ProcessStateEmailMonitor
class CrashMailBatch(ProcessStateEmailMonitor):
process_state_events = ['PROCESS_STATE_EXITED']
def load_alert_config_file(self):
alert_config_path = '%s/../alert.cfg' % os.path.dirname(__file__)
parser = ConfigParser.SafeConfigParser()
if os.path.exists(alert_config_path):
parser.read([alert_config_path])
return parser
def add_customized_mail_list(self, pheaders):
self.customized_mail_list = []
name_list = pheaders['groupname'].split('--')
alert_section = str()
if len(name_list) == 3:
service, cluster, job = name_list
alert_section = service + "--" + cluster
else:
raise ValueError("Invalid cluster name: %s" % pheaders['groupname'])
if self.alert_config_parser.has_option(alert_section, 'to_emails'):
mail_list = [mail.strip()
for mail in self.alert_config_parser.get(alert_section, 'to_emails').split(",")]
for mail in mail_list:
if mail not in self.to_emails:
self.customized_mail_list.append(mail)
def __init__(self, **kwargs):
ProcessStateEmailMonitor.__init__(self, **kwargs)
self.hostname = socket.gethostname()
self.local_ip = socket.gethostbyname(self.hostname)
self.subject = 'Crash alert from supervisord on %s' % self.hostname
self.now = kwargs.get('now', None)
self.alert_config_parser = self.load_alert_config_file()
def get_process_state_change_msg(self, headers, payload):
pheaders, pdata = childutils.eventdata(payload+'\n')
if int(pheaders['expected']):
return None
self.add_customized_mail_list(pheaders)
txt = 'Process %(groupname)s:%(processname)s (pid %(pid)s) died \
unexpectedly' % pheaders
return '%s -- http://%s:%d -- %s' % (childutils.get_asctime(self.now),
self.local_ip, self.supervisord_port, txt)
def main():
crash = CrashMailBatch.create_from_cmd_line()
crash.run()
if __name__ == '__main__':
main()
|
apache-2.0
|
ebifrier/saya_chan
|
third-party/gtest-1.7.0/test/gtest_break_on_failure_unittest.py
|
2140
|
7339
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
gpl-3.0
|
kei-yamazaki/cf-php-build-pack
|
lib/build_pack_utils/process.py
|
3
|
8083
|
from __future__ import print_function
import signal
import subprocess
import sys
import logging
from datetime import datetime
from threading import Thread
from Queue import Queue, Empty
#
# This code comes from Honcho. Didn't need the whole Honcho
# setup, so I just swiped this part which is what the build
# pack utils library needs.
#
# https://github.com/nickstenning/honcho
#
# I've modified parts to fit better with this module.
#
# Copyright (c) 2012 Nick Stenning, http://whiteink.com/
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def _enqueue_output(proc, queue):
if not proc.quiet:
for line in iter(proc.stdout.readline, b''):
try:
line = line.decode('utf-8')
except UnicodeDecodeError as e:
queue.put((proc, e))
continue
if not line.endswith('\n'):
line += '\n'
queue.put((proc, line))
proc.stdout.close()
class Process(subprocess.Popen):
def __init__(self, cmd, name=None, quiet=False, *args, **kwargs):
self.name = name
self.quiet = quiet
self.reader = None
self.printer = None
self.dead = False
if self.quiet:
self.name = "{0} (quiet)".format(self.name)
defaults = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': True,
'bufsize': 1,
'close_fds': True
}
defaults.update(kwargs)
super(Process, self).__init__(cmd, *args, **defaults)
class ProcessManager(object):
"""
Here's where the business happens. The ProcessManager multiplexes and
pretty-prints the output from a number of Process objects, typically added
using the add_process() method.
Example:
pm = ProcessManager()
pm.add_process('name', 'ruby server.rb')
pm.add_process('name', 'python worker.py')
pm.loop()
"""
def __init__(self):
self.processes = []
self.queue = Queue()
self.returncode = None
self._terminating = False
self._log = logging.getLogger('process')
def add_process(self, name, cmd, quiet=False):
"""
Add a process to this manager instance:
Arguments:
name - a human-readable identifier for the process
(e.g. 'worker'/'server')
cmd - the command-line used to run the process
(e.g. 'python run.py')
"""
self._log.debug("Adding process [%s] with cmd [%s]", name, cmd)
self.processes.append(Process(cmd, name=name, quiet=quiet))
def loop(self):
"""
Enter the main loop of the program. This will print the multiplexed
output of all the processes in this ProcessManager to sys.stdout, and
will block until all the processes have completed.
If one process terminates, all the others will be terminated
and loop() will return.
Returns: the returncode of the first process to exit, or 130 if
interrupted with Ctrl-C (SIGINT)
"""
self._init_readers()
self._init_printers()
for proc in self.processes:
self._log.info("Started [%s] with pid [%s]", proc.name, proc.pid)
while True:
try:
proc, line = self.queue.get(timeout=0.1)
except Empty:
pass
except KeyboardInterrupt:
self._log.exception("SIGINT received")
self.returncode = 130
self.terminate()
else:
self._print_line(proc, line)
for proc in self.processes:
if not proc.dead and proc.poll() is not None:
self._log.info('process [%s] with pid [%s] terminated',
proc.name, proc.pid)
proc.dead = True
# Set the returncode of the ProcessManager instance if not
# already set.
if self.returncode is None:
self.returncode = proc.returncode
self.terminate()
if not self._process_count() > 0:
break
while True:
try:
proc, line = self.queue.get(timeout=0.1)
except Empty:
break
else:
self._print_line(proc, line)
return self.returncode
def terminate(self):
"""
Terminate all the child processes of this ProcessManager, bringing the
loop() to an end.
"""
if self._terminating:
return False
self._terminating = True
self._log.info("sending SIGTERM to all processes")
for proc in self.processes:
if proc.poll() is None:
self._log.info("sending SIGTERM to pid [%d]", proc.pid)
proc.terminate()
def kill(signum, frame):
# If anything is still alive, SIGKILL it
for proc in self.processes:
if proc.poll() is None:
self._log.info("sending SIGKILL to pid [%d]", proc.pid)
proc.kill()
signal.signal(signal.SIGALRM, kill) # @UndefinedVariable
signal.alarm(5) # @UndefinedVariable
def _process_count(self):
return [p.poll() for p in self.processes].count(None)
def _init_readers(self):
for proc in self.processes:
self._log.debug("Starting [%s]", proc.name)
t = Thread(target=_enqueue_output, args=(proc, self.queue))
t.daemon = True # thread dies with the program
t.start()
def _init_printers(self):
width = max(len(p.name) for p in
filter(lambda x: not x.quiet, self.processes))
for proc in self.processes:
proc.printer = Printer(sys.stdout,
name=proc.name,
width=width)
def _print_line(self, proc, line):
if isinstance(line, UnicodeDecodeError):
self._log.error(
"UnicodeDecodeError while decoding line from process [%s]",
proc.name)
else:
print(line, end='', file=proc.printer)
class Printer(object):
def __init__(self, output=sys.stdout, name='unknown', width=0):
self.output = output
self.name = name
self.width = width
self._write_prefix = True
def write(self, *args, **kwargs):
new_args = []
for arg in args:
lines = arg.split('\n')
lines = [self._prefix() + l if l else l for l in lines]
new_args.append('\n'.join(lines))
self.output.write(*new_args, **kwargs)
def _prefix(self):
time = datetime.now().strftime('%H:%M:%S')
name = self.name.ljust(self.width)
prefix = '{time} {name} | '.format(time=time, name=name)
return prefix
|
apache-2.0
|
NaPs/Kolekto
|
kolekto/commands/flags.py
|
1
|
2010
|
from kolekto.commands import Command
from kolekto.printer import printer
from kolekto.helpers import get_hash
class FlagCommand(Command):
flags = [] # Flags to set when this flag is set
unset_flags = [] # Flags to unset when this flag is set
unflag_unset_flags = [] # Flags to unset when this flag is unset
def prepare(self):
self.add_arg('input', metavar='movie-hash-or-file', nargs='+')
self.add_arg('--unflag', '-u', action='store_true', default=False)
def run(self, args, config):
mdb = self.get_metadata_db(args.tree)
for movie_input in args.input:
movie_hash = get_hash(movie_input)
try:
movie = mdb.get(movie_hash)
except KeyError:
printer.p('Unknown movie hash.')
return
if args.unflag:
for flag in self.unflag_unset_flags:
try:
del movie[flag]
except KeyError:
pass
else:
for flag in self.flags:
movie[flag] = True
for flag in self.unset_flags:
try:
del movie[flag]
except KeyError:
pass
mdb.save(movie_hash, movie)
class Watch(FlagCommand):
""" Flag a movie as watched.
"""
flags = ['watched']
unflag_unset_flags = ['watched', 'favorite', 'crap']
help = 'flag a movie as watched'
class Favorite(FlagCommand):
""" Flag a movie as favorite.
"""
flags = ['favorite', 'watched']
unset_flags = ['crap']
unflag_unset_flags = ['favorite']
help = 'flag a movie as favorite (and also set it as watched)'
class Crap(FlagCommand):
""" Flag a movie as crap.
"""
flags = ['crap', 'watched']
unset_flags = ['favorite']
unflag_unset_flags = ['crap']
help = 'flag a movie as crap (and also set it as watched)'
|
mit
|
helldorado/ansible
|
lib/ansible/modules/system/debconf.py
|
44
|
5675
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections.
- Or just query existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements:
- debconf
- debconf-utils
options:
name:
description:
- Name of package to configure.
type: str
required: true
aliases: [ pkg ]
question:
description:
- A debconf configuration setting.
type: str
aliases: [ selection, setting ]
vtype:
description:
- The type of the value supplied.
- C(seen) was added in Ansible 2.2.
type: str
choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ]
value:
description:
- Value to set the configuration to.
type: str
aliases: [ answer ]
unseen:
description:
- Do not set 'seen' flag when pre-seeding.
type: bool
default: no
author:
- Brian Coca (@bcoca)
'''
EXAMPLES = r'''
- name: Set default locale to fr_FR.UTF-8
debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
- name: set to generate locales
debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
- name: Accept oracle license
debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: 'true'
vtype: select
- name: Specifying package you can register/return the list of questions and current values
debconf:
name: tzdata
'''
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[key.strip('*').strip()] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['pkg']),
question=dict(type='str', aliases=['selection', 'setting']),
vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
value=dict(type='str', aliases=['answer']),
unseen=dict(type='bool'),
),
required_together=(['question', 'vtype', 'value'],),
supports_check_mode=True,
)
# TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
# if question doesn't exist, value cannot match
if question not in prev:
changed = True
else:
existing = prev[question]
# ensure we compare booleans supplied to the way debconf sees them (true/false strings)
if vtype == 'boolean':
value = to_text(value).lower()
existing = to_text(prev[question]).lower()
if value != existing:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = {question: value}
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
if __name__ == '__main__':
main()
|
gpl-3.0
|
pixlra/HARP-fork
|
PythonLib/Encoder/Base_Encoder.py
|
1
|
9022
|
#!/usr/bin/env python
# coding: utf8
# (c) 2014 Dominic Springer
# File licensed under GNU GPL (see HARP_License.txt)
from Imports_Basic import *
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class Base_Encoder(object):
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#Name = None
#EncBin = None
#DecBin = res("~/lib_Codecs/HM_13.0_Std/bin/TAppDecoder")
#Ini = self.get_INI_Content(SrcDir + "/HM_Encoder.ini")
#PSNR = PSNRTool
#Override = "ReservedForOverride"
#InfoStr = "ReservedForInfoString"
#==========================================
def __init__(self, OutputDir, Passport,
Name, InfoStr,
EncBin, DecBin, INI_FN, OverrideStr,
Sequ, QP,
PSNR_Tool):
#==========================================
#self.ScriptDir = os.path.dirname(os.path.abspath(__file__)) # location of script
#self.TmpDir = os.path.abspath(self.ScriptDir + "/../../tmp")
self.OutputDir = OutputDir
self.Passport = Passport
self.Name = Name
self.InfoStr = InfoStr
self.EncBin = EncBin
self.DecBin = DecBin
self.INI_FN = INI_FN
self.OverrideStr = OverrideStr
self.Sequ = Sequ
self.QP = QP
self.PSNR_Tool = PSNR_Tool
self.Config = load_INI(INI_FN);
self.Prefix = os.path.expandvars(self.Config.get("General", "Prefix")) #expand $HOME
assertFileExists(EncBin, "EncBin missing")
#assertFileExists(DecBin, "DecBin missing")
assertFileExists(INI_FN, "INI_FN missing")
assertFileExists(Sequ.AbsFN, "Sequ.AbsFN missing")
#assertFileExists(PSNR_Tool, "PSNR_Tool missing")
#==========================================
def set_Sequence(Sequ):
#==========================================
self.Sequ = Sequ
#==========================================
def set_QP(QP):
#==========================================
self.QP = QP
#==========================================
def printEncoderInfo(self):
#==========================================
assert Sequ != None and QP != None
print "\n----------------------------------"
print self.Name + "-Encoding of " + basename(self.Sequ.BN)
print "QP = " + self.QP
print "----------------------------------"
print "Encoder: using enc binary " + self.Codec.EncBin
print "Encoder: using dec binary " + self.Codec.DecBin
print "Encoder: OverrideStr " + (self.OverrideStr if self.OverrideStr != "" else "off")
print "Encoder: ParamName = " + self.ParamName
print "Encoder: ParamValue = " + str(self.ParamValue)
#==========================================
def get_INI_Arguments(self):
#==========================================
Call = ""
# Section
Section = "Args_Sequence"
List = self.Config.options(Section)
for Arg in List:
Value = self.Config.get(Section, Arg)
if Value != None: #if no value present
Final = (" %s%s=%s" %(self.Prefix, Arg, Value))
else:
Final = (" %s%s" %(self.Prefix, Arg))
Call += Final
# Section
Section = "Args_RDO"
List = self.Config.options(Section)
for Arg in List:
Value = self.Config.get(Section, Arg)
if Value != None: #if no value present
Final = (" %s%s=%s" %(self.Prefix, Arg, Value))
else:
Final = (" %s%s" %(self.Prefix, Arg))
Call += Final
# Section
Section = "Args_Encoding"
List = self.Config.options(Section)
#print List
for Arg in List:
Value = self.Config.get(Section, Arg)
if Value != None: #if no value present
Final = (" %s%s=%s" %(self.Prefix, Arg, Value))
else:
Final = (" %s%s" %(self.Prefix, Arg))
Call += Final
# Section
Section = "Args_Other"
List = self.Config.options(Section)
for Arg in List:
Value = self.Config.get(Section, Arg)
if Value != None: #if no value present
Final = (" %s%s=%s" %(self.Prefix, Arg, Value))
else:
Final = (" %s%s" %(self.Prefix, Arg))
Call += Final
# setting override argument
if self.OverrideStr != "":
print "Overriding with: " + self.OverrideStr
Call += " " + self.OverrideStr
return Call
#==========================================
def calculate_PSNR_SSIM(self, recyuv):
#==========================================
#PSNR
Call = self.Codec.PSNR + " -psnr -s " + self.DimX + " " + self.DimY + " " + recyuv + " \"" + self.SequName + "\""
process = subprocess.Popen(Call, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
print "PSNR tool returned:\n" + out
assert(process.returncode == 0)
#REGEX SEARCH
res1 = re.search(r"Avg:\s*\d*\.\d*\t", out)
assert res1 != None, "Regex not found"
res2 = re.search(r"\d*\.\d*", res1.group())
assert res2 != None, "Regex not found"
result = float(res2.group())
PSNR = result
#SSIM
Call = self.Codec.PSNR + " -ssim -s " + self.DimX + " " + self.DimY + " " + recyuv + " \"" + self.SequName + "\""
process = subprocess.Popen(Call, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
assert(process.returncode == 0)
print "SSIM tool returned:\n" + out
#REGEX SEARCH
res1 = re.search(r"Avg:\s*\d*\.\d*\t", out)
assert res1 != None, "Regex not found"
res2 = re.search(r"\d*\.\d*", res1.group())
assert res2 != None, "Regex not found"
result = float(res2.group())
SSIM = result
print "Avg Y-PSNR: " + str(PSNR)
print "Avg Y-SSIM: " + str(SSIM)
return PSNR, SSIM
#==========================================
def calculateResults(self, recyuv):
#==========================================
PSNR, SSIM = self.calculate_PSNR_SSIM(recyuv)
# calculating bitrate
FilesizeBytes = float(os.path.getsize(self.bitstream))
BitrateMBits = (FilesizeBytes * 8 * float(self.Fps)) / (float(self.NumFrames) * 1000 * 1000)
print("Size of %s: %.4f KB" % (self.bitstream, FilesizeBytes/1000))
print("Bitrate: %.8f MBit/s" % BitrateMBits)
#str(self.ParamValue) + "\t\t" +
Result = str(self.ParamValue) + "\t\t" + \
"{:.4f}".format(PSNR) + "\t\t" + "{:.4f}".format(SSIM) + \
"\t\t" + "{:.4f}".format(BitrateMBits) + "\n"
return Result
#==========================================
def appendToLog(self, Call, out, err):
#==========================================
#we keep this call for later
self.ThreadLock.acquire()
LogFileName = "./tmp/Log_" + self.Codec.Name + "_CalledCommands.txt"
Log = open(LogFileName, "a")
Log.write(Call + "\n\n")
Log.close()
LogFileName = "./tmp/Log_" + self.Codec.Name + "_Stdout.txt"
Log = open(LogFileName, "a")
Log.write("=========================================\n")
Log.write("Call:\n" + Call + "\n")
Log.write("=========================================\n")
Log.write("Stdout:\n" + out + "\nStderr:\n" + err + "\n\n\n")
Log.close()
self.ThreadLock.release()
#==========================================
def execute(self, Call):
#==========================================
print "Calling (one liner): "
print Call
print "Calling (human readable): "
print Call.replace(" ", "\n")
# Calling!
#assert os.system(Call) == 0, "Encoder call returned error"
process = subprocess.Popen(Call, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# activate real-time output on stdout
for line in iter(process.stdout.readline, b''):
print line,
out, err = process.communicate()
self.appendToLog(Call, out, err)
ErrorStr = "Error: Call returned -1\n" + Call + "\nStdout:\n" + out + "\nStderr:\n" + err
assert(process.returncode == 0), ErrorStr
#print "Stdout:\n" + out + "\nStderr:\n" + err
|
gpl-3.0
|
ouijan/rabbit
|
tests/test_app.py
|
1
|
3766
|
import unittest
from mock import *
from rabbit import settings
from rabbit.app import App
from rabbit.config import Config
from rabbit.command import Command
from rabbit.group import Group
class TestApp(unittest.TestCase):
def test_it_creates_CONFIG_FILE_global(self):
self.assertNotEqual(settings.CONFIG_FILE, None)
def test_it_sets_CONFIG_FILE_to_rabbit_yaml(self):
self.assertEquals(settings.CONFIG_FILE, 'rabbit.yaml')
def test_it_creates_a_new_object(self):
app = App()
self.assertTrue(isinstance(app, (App)))
def test_it_sets_config_property(self):
app = App()
self.assertTrue(isinstance(app.config, (Config)))
def test_it_sets_baseGroup_property(self):
app = App()
self.assertTrue(isinstance(app.baseGroup, (Group)))
@patch('rabbit.app.App.bootstrap')
def test_it_runs_bootstrap_on_init(self, bootstrap):
app = App()
bootstrap.assert_called_with()
"""
Bootstrap Tests
- Runs the loadHomeConfig Method
- Runs the loadHomeConfig Method
"""
@patch('rabbit.app.App.loadHomeConfig')
def test_bootstrap_runs_the_loadHomeComfig(self, loadHomeConfig):
app = App()
app.bootstrap()
loadHomeConfig.assert_called_with()
@patch('rabbit.app.App.loadLocalConfig')
def test_bootstrap_runs_the_loadLocalComfig(self, loadLocalConfig):
app = App()
app.bootstrap()
loadLocalConfig.assert_called_with()
@patch('rabbit.app.App.loadCommands')
def test_bootstrap_runs_the_loadLocalComfig(self, loadCommands):
app = App()
app.bootstrap()
loadCommands.assert_called_with()
"""
loadHomeConfig Tests
- it runs the app.config.load on the correct path
"""
@patch('rabbit.config.Config.load')
@patch('os.path.expanduser')
def test_bootstrap_runs_the_loadLocalConfig(self, expanduser_mock, config_load):
app = App()
expanduser_mock.return_value = 'test'
homepath = "test/" + settings.CONFIG_FILE
app.loadHomeConfig()
config_load.assert_called_with(homepath)
"""
loadLocalConfig Tests
- it runs the app.config.load on the correct path
"""
@patch('rabbit.config.Config.load')
def test_bootstrap_runs_the_loadLocalConfig(self, config_load):
app = App()
localpath = "./" + settings.CONFIG_FILE
app.loadLocalConfig()
config_load.assert_called_with(localpath)
"""
loadCommands Tests
- It doesnt call addCommand when no commands are present
- It creates a new command with given data
- It calls addCommand for each command
"""
@patch('rabbit.config.Config.get')
def test_it_doesnt_call_addCommand_when_no_commands_are_present(self, config_get):
app = App()
config_get.return_value = None
result = app.loadCommands()
self.assertFalse(result)
@patch('rabbit.config.Config.get')
@patch('rabbit.app.App.createCommand')
def test_it_creates_a_new_command_with_given_data(self, create_command, config_get):
app = App()
config_get.return_value = [1]
result = app.loadCommands()
create_command.assert_called_with(1)
self.assertTrue(result)
@patch('rabbit.config.Config.get')
@patch('rabbit.app.App.createCommand')
@patch('rabbit.app.App.addCommand')
def test_it_calls_addCommand_for_each_command(self, add_command, create_command, config_get):
app = App()
config_get.return_value = [1]
create_command.return_value = 'test';
result = app.loadCommands()
add_command.assert_called_with('test')
self.assertTrue(result)
"""
addCommand Tests
- It validates command object falsy
- It finds the correct child group
- It adds the command to the given child group
"""
def test_it_validates_command_object_falsy(self):
app = App()
command = object
result = app.addCommand(command)
self.assertFalse(result)
def test_it_finds_the_correct_child_group(self):
app = App()
command = Command()
result = app.addCommand(command)
self.assertTrue(result)
|
mit
|
museumsvictoria/nodel-recipes
|
(retired)/udp/script.py
|
2
|
1996
|
# Copyright (c) 2014 Museum Victoria
# This software is released under the MIT license (see license.txt for details)
'''This node demonstrates a simple udp controller.'''
import socket
# Functions used by this node
def send_udp_string(msg):
#open socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(msg, (param_ipAddress, param_port))
except socket.error, msg:
print "error: %s\n" % msg
local_event_Error.emit(msg)
finally:
if sock:
sock.close()
# Local actions this Node provides
def local_action_Start(arg = None):
"""{"title":"Turns on","desc":"Turns this node on.","group":"General"}"""
print 'Action TurnOn requested'
send_udp_string('start\n')
def local_action_Stop(arg = None):
"""{"title":"Turns off","desc":"Turns this node off.","group":"General"}"""
print 'Action TurnOff requested'
send_udp_string('stop\n')
def local_action_SetLogging(arg = None):
"""{"title":"Set logging","desc":"Set logging level.","schema":{"title":"Level","type":"string","enum":["file","normal"],"required":"true"},"group":"General"}"""
print 'Action SetLogging requested - '+arg
send_udp_string('logging:'+arg+'\n')
def local_action_SetVolume(arg = None):
"""{"title":"Set volume","desc":"Set volume.","schema":{"title":"Level","type":"integer","required":"true"},"group":"General"}"""
print 'Action SetVolume requested - '+str(arg)
send_udp_string('volume:'+str(arg)+'\n')
# Local events this Node provides
local_event_Error = LocalEvent('{"title":"Error","desc":"An error has occured while communicating with the device.","group":"General"}')
# local_event_Error.emit(arg)
# Parameters used by this Node
param_ipAddress = Parameter('{"desc":"The IP address to connect to.","schema":{"type":"string"},"value":"192.168.100.1"}')
param_port = Parameter('{"desc":"The Port to connect to.","schema":{"type":"integer"},"value":"80"}')
def main(arg = None):
# Start your script here.
print 'Nodel script started.'
|
mit
|
tlodge/homehub.nox
|
src/nox/coreapps/examples/countdown.py
|
10
|
1618
|
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
# Trivial example using reactor timer method to countdown from three
from nox.lib.core import *
import logging
logger = logging.getLogger('nox.coreapps.examples.countdown')
numbers = ["one","two","three"]
index = 0
class countdown(Component):
def __init__(self, ctxt):
Component.__init__(self, ctxt)
def install(self):
# call every second
self.post_callback(1, lambda : self.count_down())
def getInterface(self):
return str(countdown)
def count_down(self):
global index
# No, this isn't mispelled:. If you're curious, see Farscape
# episode 1.17
logger.debug("%s %s" % (numbers[index], 'mippippi'))
index+=1
if index < len(numbers):
self.post_callback(1, lambda : self.count_down())
def getFactory():
class Factory:
def instance(self, ctxt):
return countdown(ctxt)
return Factory()
|
gpl-3.0
|
igraph/python-igraph
|
src/igraph/statistics.py
|
1
|
22508
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""
Statistics related stuff in igraph
"""
import math
__all__ = (
"FittedPowerLaw",
"Histogram",
"RunningMean",
"mean",
"median",
"percentile",
"quantile",
"power_law_fit",
)
class FittedPowerLaw(object):
"""Result of fitting a power-law to a vector of samples
Example:
>>> result = power_law_fit([1, 2, 3, 4, 5, 6])
>>> result # doctest:+ELLIPSIS
FittedPowerLaw(continuous=False, alpha=2.42..., xmin=3.0, L=-7.54..., \
D=0.21..., p=0.993...)
>>> print(result) # doctest:+ELLIPSIS
Fitted power-law distribution on discrete data
<BLANKLINE>
Exponent (alpha) = 2.42...
Cutoff (xmin) = 3.000000
<BLANKLINE>
Log-likelihood = -7.54...
<BLANKLINE>
H0: data was drawn from the fitted distribution
<BLANKLINE>
KS test statistic = 0.21...
p-value = 0.993...
<BLANKLINE>
H0 could not be rejected at significance level 0.05
>>> result.alpha # doctest:+ELLIPSIS
2.42...
>>> result.xmin
3.0
>>> result.continuous
False
"""
def __init__(self, continuous, alpha, xmin, L, D, p):
self.continuous = continuous
self.xmin = xmin
self.alpha = alpha
self.L = L
self.D = D
self.p = p
def __repr__(self):
return "%s(continuous=%r, alpha=%r, xmin=%r, L=%r, D=%r, p=%r)" % (
self.__class__.__name__,
self.continuous,
self.alpha,
self.xmin,
self.L,
self.D,
self.p,
)
def __str__(self):
return self.summary(significance=0.05)
def summary(self, significance=0.05):
"""Returns the summary of the power law fit.
@param significance: the significance level of the Kolmogorov-Smirnov test
used to decide whether the input data could have come from the fitted
distribution
@return: the summary as a string
"""
result = [
"Fitted power-law distribution on %s data"
% ("discrete", "continuous")[bool(self.continuous)]
]
result.append("")
result.append("Exponent (alpha) = %f" % self.alpha)
result.append("Cutoff (xmin) = %f" % self.xmin)
result.append("")
result.append("Log-likelihood = %f" % self.L)
result.append("")
result.append("H0: data was drawn from the fitted distribution")
result.append("")
result.append("KS test statistic = %f" % self.D)
result.append("p-value = %f" % self.p)
result.append("")
if self.p < significance:
result.append("H0 rejected at significance level %g" % significance)
else:
result.append(
"H0 could not be rejected at significance " "level %g" % significance
)
return "\n".join(result)
class Histogram(object):
"""Generic histogram class for real numbers
Example:
>>> h = Histogram(5) # Initializing, bin width = 5
>>> h << [2,3,2,7,8,5,5,0,7,9] # Adding more items
>>> print(h)
N = 10, mean +- sd: 4.8000 +- 2.9740
[ 0, 5): **** (4)
[ 5, 10): ****** (6)
"""
def __init__(self, bin_width=1, data=None):
"""Initializes the histogram with the given data set.
@param bin_width: the bin width of the histogram.
@param data: the data set to be used. Must contain real numbers.
"""
self._bin_width = float(bin_width)
self._bins = None
self._min, self._max = None, None
self._running_mean = RunningMean()
self.clear()
if data:
self.add_many(data)
def _get_bin(self, num, create=False):
"""Returns the bin index corresponding to the given number.
@param num: the number for which the bin is being sought
@param create: whether to create a new bin if no bin exists yet.
@return: the index of the bin or C{None} if no bin exists yet and
{create} is C{False}."""
if len(self._bins) == 0:
if not create:
result = None
else:
self._min = int(num / self._bin_width) * self._bin_width
self._max = self._min + self._bin_width
self._bins = [0]
result = 0
return result
if num >= self._min:
binidx = int((num - self._min) / self._bin_width)
if binidx < len(self._bins):
return binidx
if not create:
return None
extra_bins = binidx - len(self._bins) + 1
self._bins.extend([0] * extra_bins)
self._max = self._min + len(self._bins) * self._bin_width
return binidx
if not create:
return None
extra_bins = int(math.ceil((self._min - num) / self._bin_width))
self._bins[0:0] = [0] * extra_bins
self._min -= extra_bins * self._bin_width
self._max = self._min + len(self._bins) * self._bin_width
return 0
@property
def n(self):
"""Returns the number of elements in the histogram"""
return len(self._running_mean)
@property
def mean(self):
"""Returns the mean of the elements in the histogram"""
return self._running_mean.mean
@property
def sd(self):
"""Returns the standard deviation of the elements in
the histogram"""
return self._running_mean.sd
@property
def var(self):
"""Returns the variance of the elements in the histogram"""
return self._running_mean.var
def add(self, num, repeat=1):
"""Adds a single number to the histogram.
@param num: the number to be added
@param repeat: number of repeated additions
"""
num = float(num)
binidx = self._get_bin(num, True)
self._bins[binidx] += repeat
self._running_mean.add(num, repeat)
def add_many(self, data):
"""Adds a single number or the elements of an iterable to the histogram.
@param data: the data to be added"""
try:
iterator = iter(data)
except TypeError:
iterator = iter([data])
for x in iterator:
self.add(x)
__lshift__ = add_many
def clear(self):
"""Clears the collected data"""
self._bins = []
self._min, self._max = None, None
self._running_mean = RunningMean()
def bins(self):
"""Generator returning the bins of the histogram in increasing order
@return: a tuple with the following elements: left bound, right bound,
number of elements in the bin"""
x = self._min
for elem in self._bins:
yield (x, x + self._bin_width, elem)
x += self._bin_width
def __plot__(self, context, bbox, _, **kwds):
"""Plotting support"""
from igraph.drawing.coord import DescartesCoordinateSystem
coord_system = DescartesCoordinateSystem(
context,
bbox,
(
kwds.get("min", self._min),
0,
kwds.get("max", self._max),
kwds.get("max_value", max(self._bins)),
),
)
# Draw the boxes
context.set_line_width(1)
context.set_source_rgb(1.0, 0.0, 0.0)
x = self._min
for value in self._bins:
top_left_x, top_left_y = coord_system.local_to_context(x, value)
x += self._bin_width
bottom_right_x, bottom_right_y = coord_system.local_to_context(x, 0)
context.rectangle(
top_left_x,
top_left_y,
bottom_right_x - top_left_x,
bottom_right_y - top_left_y,
)
context.fill()
# Draw the axes
coord_system.draw()
def to_string(self, max_width=78, show_bars=True, show_counts=True):
"""Returns the string representation of the histogram.
@param max_width: the maximal width of each line of the string
This value may not be obeyed if it is too small.
@param show_bars: specify whether the histogram bars should be shown
@param show_counts: specify whether the histogram counts should be
shown. If both I{show_bars} and I{show_counts} are C{False},
only a general descriptive statistics (number of elements, mean and
standard deviation) is shown.
"""
if self._min is None or self._max is None:
return "N = 0"
# Determine how many decimal digits should we use
if int(self._min) == self._min and int(self._bin_width) == self._bin_width:
number_format = "%d"
else:
number_format = "%.3f"
num_length = max(len(number_format % self._min), len(number_format % self._max))
number_format = "%" + str(num_length) + number_format[1:]
format_string = "[%s, %s): %%s" % (number_format, number_format)
# Calculate the scale of the bars on the histogram
if show_bars:
maxval = max(self._bins)
if show_counts:
maxval_length = len(str(maxval))
scale = maxval // (max_width - 2 * num_length - maxval_length - 9)
else:
scale = maxval // (max_width - 2 * num_length - 6)
scale = max(scale, 1)
result = ["N = %d, mean +- sd: %.4f +- %.4f" % (self.n, self.mean, self.sd)]
if show_bars:
# Print the bars
if scale > 1:
result.append("Each * represents %d items" % scale)
if show_counts:
format_string += " (%d)"
for left, right, cnt in self.bins():
result.append(
format_string % (left, right, "*" * (cnt // scale), cnt)
)
else:
for left, right, cnt in self.bins():
result.append(format_string % (left, right, "*" * (cnt // scale)))
elif show_counts:
# Print the counts only
for left, right, cnt in self.bins():
result.append(format_string % (left, right, cnt))
return "\n".join(result)
def __str__(self):
return self.to_string()
class RunningMean(object):
"""Running mean calculator.
This class can be used to calculate the mean of elements from a
list, tuple, iterable or any other data source. The mean is
calculated on the fly without explicitly summing the values,
so it can be used for data sets with arbitrary item count. Also
capable of returning the standard deviation (also calculated on
the fly)
"""
def __init__(self, items=None, n=0.0, mean=0.0, sd=0.0):
"""RunningMean(items=None, n=0.0, mean=0.0, sd=0.0)
Initializes the running mean calculator.
There are two possible ways to initialize the calculator.
First, one can provide an iterable of items; alternatively,
one can specify the number of items, the mean and the
standard deviation if we want to continue an interrupted
calculation.
@param items: the items that are used to initialize the
running mean calcuator. If C{items} is given, C{n},
C{mean} and C{sd} must be zeros.
@param n: the initial number of elements already processed.
If this is given, C{items} must be C{None}.
@param mean: the initial mean. If this is given, C{items}
must be C{None}.
@param sd: the initial standard deviation. If this is given,
C{items} must be C{None}."""
if items is not None:
if n != 0 or mean != 0 or sd != 0:
raise ValueError("n, mean and sd must be zeros if items is not None")
self.clear()
self.add_many(items)
else:
self._nitems = float(n)
self._mean = float(mean)
if n > 1:
self._sqdiff = float(sd) ** 2 * float(n - 1)
self._sd = float(sd)
else:
self._sqdiff = 0.0
self._sd = 0.0
def add(self, value, repeat=1):
"""RunningMean.add(value, repeat=1)
Adds the given value to the elements from which we calculate
the mean and the standard deviation.
@param value: the element to be added
@param repeat: number of repeated additions
"""
repeat = int(repeat)
self._nitems += repeat
delta = value - self._mean
self._mean += repeat * delta / self._nitems
self._sqdiff += (repeat * delta) * (value - self._mean)
if self._nitems > 1:
self._sd = (self._sqdiff / (self._nitems - 1)) ** 0.5
def add_many(self, values):
"""RunningMean.add(values)
Adds the values in the given iterable to the elements from
which we calculate the mean. Can also accept a single number.
The left shift (C{<<}) operator is aliased to this function,
so you can use it to add elements as well:
>>> rm=RunningMean()
>>> rm << [1,2,3,4]
>>> rm.result # doctest:+ELLIPSIS
(2.5, 1.290994...)
@param values: the element(s) to be added
@type values: iterable"""
try:
iterator = iter(values)
except TypeError:
iterator = iter([values])
for value in iterator:
self.add(value)
def clear(self):
"""Resets the running mean calculator."""
self._nitems, self._mean = 0.0, 0.0
self._sqdiff, self._sd = 0.0, 0.0
@property
def result(self):
"""Returns the current mean and standard deviation as a tuple"""
return self._mean, self._sd
@property
def mean(self):
"""Returns the current mean"""
return self._mean
@property
def sd(self):
"""Returns the current standard deviation"""
return self._sd
@property
def var(self):
"""Returns the current variation"""
return self._sd ** 2
def __repr__(self):
return "%s(n=%r, mean=%r, sd=%r)" % (
self.__class__.__name__,
int(self._nitems),
self._mean,
self._sd,
)
def __str__(self):
return "Running mean (N=%d, %f +- %f)" % (self._nitems, self._mean, self._sd)
__lshift__ = add_many
def __float__(self):
return float(self._mean)
def __int__(self):
return int(self._mean)
def __complex__(self):
return complex(self._mean)
def __len__(self):
return int(self._nitems)
def mean(xs):
"""Returns the mean of an iterable.
Example:
>>> mean([1, 4, 7, 11])
5.75
@param xs: an iterable yielding numbers.
@return: the mean of the numbers provided by the iterable.
@see: RunningMean() if you also need the variance or the standard deviation
"""
return RunningMean(xs).mean
def median(xs, sort=True):
"""Returns the median of an unsorted or sorted numeric vector.
@param xs: the vector itself.
@param sort: whether to sort the vector. If you know that the vector is
sorted already, pass C{False} here.
@return: the median, which will always be a float, even if the vector
contained integers originally.
"""
if sort:
xs = sorted(xs)
mid = int(len(xs) / 2)
if 2 * mid == len(xs):
return float(xs[mid - 1] + xs[mid]) / 2
else:
return float(xs[mid])
def percentile(xs, p=(25, 50, 75), sort=True):
"""Returns the pth percentile of an unsorted or sorted numeric vector.
This is equivalent to calling quantile(xs, p/100.0); see L{quantile}
for more details on the calculation.
Example:
>>> round(percentile([15, 20, 40, 35, 50], 40), 2)
26.0
>>> for perc in percentile([15, 20, 40, 35, 50], (0, 25, 50, 75, 100)):
... print("%.2f" % perc)
...
15.00
17.50
35.00
45.00
50.00
@param xs: the vector itself.
@param p: the percentile we are looking for. It may also be a list if you
want to calculate multiple quantiles with a single call. The default
value calculates the 25th, 50th and 75th percentile.
@param sort: whether to sort the vector. If you know that the vector is
sorted already, pass C{False} here.
@return: the pth percentile, which will always be a float, even if the vector
contained integers originally. If p is a list, the result will also be a
list containing the percentiles for each item in the list.
"""
if hasattr(p, "__iter__"):
return quantile(xs, (x / 100.0 for x in p), sort)
return quantile(xs, p / 100.0, sort)
def power_law_fit(data, xmin=None, method="auto", return_alpha_only=False):
"""Fitting a power-law distribution to empirical data
@param data: the data to fit, a list containing integer values
@param xmin: the lower bound for fitting the power-law. If C{None},
the optimal xmin value will be estimated as well. Zero means that
the smallest possible xmin value will be used.
@param method: the fitting method to use. The following methods are
implemented so far:
- C{continuous}, C{hill}: exact maximum likelihood estimation
when the input data comes from a continuous scale. This is
known as the Hill estimator. The statistical error of
this estimator is M{(alpha-1) / sqrt(n)}, where alpha is the
estimated exponent and M{n} is the number of data points above
M{xmin}. The estimator is known to exhibit a small finite
sample-size bias of order M{O(n^-1)}, which is small when
M{n > 100}. igraph will try to compensate for the finite sample
size if n is small.
- C{discrete}: exact maximum likelihood estimation when the
input comes from a discrete scale (see Clauset et al among the
references).
- C{auto}: exact maximum likelihood estimation where the continuous
method is used if the input vector contains at least one fractional
value and the discrete method is used if the input vector contains
integers only.
@return: a L{FittedPowerLaw} object. The fitted C{xmin} value and the
power-law exponent can be queried from the C{xmin} and C{alpha}
properties of the returned object.
@newfield ref: Reference
@ref: MEJ Newman: Power laws, Pareto distributions and Zipf's law.
Contemporary Physics 46, 323-351 (2005)
@ref: A Clauset, CR Shalizi, MEJ Newman: Power-law distributions
in empirical data. E-print (2007). arXiv:0706.1062"""
from igraph._igraph import _power_law_fit
if xmin is None or xmin < 0:
xmin = -1
method = method.lower()
if method not in ("continuous", "hill", "discrete", "auto"):
raise ValueError("unknown method: %s" % method)
force_continuous = method in ("continuous", "hill")
fit = FittedPowerLaw(*_power_law_fit(data, xmin, force_continuous))
if return_alpha_only:
from igraph import deprecated
deprecated(
"The return_alpha_only keyword argument of power_law_fit is "
"deprecated from igraph 0.7 and will be removed in igraph 0.8"
)
return fit.alpha
else:
return fit
def quantile(xs, q=(0.25, 0.5, 0.75), sort=True):
"""Returns the qth quantile of an unsorted or sorted numeric vector.
There are a number of different ways to calculate the sample quantile. The
method implemented by igraph is the one recommended by NIST. First we
calculate a rank n as q(N+1), where N is the number of items in xs, then we
split n into its integer component k and decimal component d. If k <= 1,
we return the first element; if k >= N, we return the last element,
otherwise we return the linear interpolation between xs[k-1] and xs[k]
using a factor d.
Example:
>>> round(quantile([15, 20, 40, 35, 50], 0.4), 2)
26.0
@param xs: the vector itself.
@param q: the quantile we are looking for. It may also be a list if you
want to calculate multiple quantiles with a single call. The default
value calculates the 25th, 50th and 75th percentile.
@param sort: whether to sort the vector. If you know that the vector is
sorted already, pass C{False} here.
@return: the qth quantile, which will always be a float, even if the vector
contained integers originally. If q is a list, the result will also be a
list containing the quantiles for each item in the list.
"""
if not xs:
raise ValueError("xs must not be empty")
if sort:
xs = sorted(xs)
if hasattr(q, "__iter__"):
qs = q
return_single = False
else:
qs = [q]
return_single = True
result = []
for q in qs:
if q < 0 or q > 1:
raise ValueError("q must be between 0 and 1")
n = float(q) * (len(xs) + 1)
k, d = int(n), n - int(n)
if k >= len(xs):
result.append(xs[-1])
elif k < 1:
result.append(xs[0])
else:
result.append((1 - d) * xs[k - 1] + d * xs[k])
if return_single:
result = result[0]
return result
def sd(xs):
"""Returns the standard deviation of an iterable.
Example:
>>> sd([1, 4, 7, 11]) #doctest:+ELLIPSIS
4.2720...
@param xs: an iterable yielding numbers.
@return: the standard deviation of the numbers provided by the iterable.
@see: RunningMean() if you also need the mean
"""
return RunningMean(xs).sd
def var(xs):
"""Returns the variance of an iterable.
Example:
>>> var([1, 4, 8, 11]) #doctest:+ELLIPSIS
19.333333...
@param xs: an iterable yielding numbers.
@return: the variance of the numbers provided by the iterable.
@see: RunningMean() if you also need the mean
"""
return RunningMean(xs).var
|
gpl-2.0
|
ldirer/scikit-learn
|
sklearn/manifold/locally_linear.py
|
14
|
26498
|
"""Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from scipy.sparse.linalg import eigsh
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg : float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``solver`` == 'arpack'.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``solver`` == 'arpack'.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = stable_cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``eigen_solver`` == 'arpack'.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X, dtype=float)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
bsd-3-clause
|
doganov/edx-platform
|
common/djangoapps/track/tests/test_tracker.py
|
35
|
3443
|
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
import track.tracker as tracker
from track.backends import BaseBackend
SIMPLE_SETTINGS = {
'default': {
'ENGINE': 'track.tests.test_tracker.DummyBackend',
'OPTIONS': {
'flag': True
}
}
}
MULTI_SETTINGS = {
'first': {
'ENGINE': 'track.tests.test_tracker.DummyBackend',
},
'second': {
'ENGINE': 'track.tests.test_tracker.DummyBackend',
}
}
class TestTrackerInstantiation(TestCase):
"""Test that a helper function can instantiate backends from their name."""
def setUp(self):
# pylint: disable=protected-access
super(TestTrackerInstantiation, self).setUp()
self.get_backend = tracker._instantiate_backend_from_name
def test_instatiate_backend(self):
name = 'track.tests.test_tracker.DummyBackend'
options = {'flag': True}
backend = self.get_backend(name, options)
self.assertIsInstance(backend, DummyBackend)
self.assertTrue(backend.flag)
def test_instatiate_backends_with_invalid_values(self):
def get_invalid_backend(name, parameters):
return self.get_backend(name, parameters)
options = {}
name = 'track.backends.logger'
self.assertRaises(ValueError, get_invalid_backend, name, options)
name = 'track.backends.logger.Foo'
self.assertRaises(ValueError, get_invalid_backend, name, options)
name = 'this.package.does.not.exists'
self.assertRaises(ValueError, get_invalid_backend, name, options)
name = 'unittest.TestCase'
self.assertRaises(ValueError, get_invalid_backend, name, options)
class TestTrackerDjangoInstantiation(TestCase):
"""Test if backends are initialized properly from Django settings."""
@override_settings(TRACKING_BACKENDS=SIMPLE_SETTINGS)
def test_django_simple_settings(self):
"""Test configuration of a simple backend"""
backends = self._reload_backends()
self.assertEqual(len(backends), 1)
tracker.send({})
self.assertEqual(backends.values()[0].count, 1)
@override_settings(TRACKING_BACKENDS=MULTI_SETTINGS)
def test_django_multi_settings(self):
"""Test if multiple backends can be configured properly."""
backends = self._reload_backends().values()
self.assertEqual(len(backends), 2)
event_count = 10
for _ in xrange(event_count):
tracker.send({})
self.assertEqual(backends[0].count, event_count)
self.assertEqual(backends[1].count, event_count)
@override_settings(TRACKING_BACKENDS=MULTI_SETTINGS)
def test_django_remove_settings(self):
"""Test if a backend can be remove by setting it to None."""
settings.TRACKING_BACKENDS.update({'second': None})
backends = self._reload_backends()
self.assertEqual(len(backends), 1)
def _reload_backends(self):
# pylint: disable=protected-access
# Reset backends
tracker._initialize_backends_from_django_settings()
return tracker.backends
class DummyBackend(BaseBackend):
def __init__(self, **options):
super(DummyBackend, self).__init__(**options)
self.flag = options.get('flag', False)
self.count = 0
def send(self, event):
self.count += 1
|
agpl-3.0
|
keerts/home-assistant
|
tests/helpers/test_customize.py
|
3
|
4270
|
"""Test the customize helper."""
import homeassistant.helpers.customize as customize
from voluptuous import MultipleInvalid
import pytest
class MockHass(object):
"""Mock object for HassAssistant."""
data = {}
class TestHelpersCustomize(object):
"""Test homeassistant.helpers.customize module."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.entity_id = 'test.test'
self.hass = MockHass()
def _get_overrides(self, overrides):
test_domain = 'test.domain'
customize.set_customize(self.hass, test_domain, overrides)
return customize.get_overrides(self.hass, test_domain, self.entity_id)
def test_override_single_value(self):
"""Test entity customization through configuration."""
result = self._get_overrides([
{'entity_id': [self.entity_id], 'key': 'value'}])
assert result == {'key': 'value'}
def test_override_multiple_values(self):
"""Test entity customization through configuration."""
result = self._get_overrides([
{'entity_id': [self.entity_id], 'key1': 'value1'},
{'entity_id': [self.entity_id], 'key2': 'value2'}])
assert result == {'key1': 'value1', 'key2': 'value2'}
def test_override_same_value(self):
"""Test entity customization through configuration."""
result = self._get_overrides([
{'entity_id': [self.entity_id], 'key': 'value1'},
{'entity_id': [self.entity_id], 'key': 'value2'}])
assert result == {'key': 'value2'}
def test_override_by_domain(self):
"""Test entity customization through configuration."""
result = self._get_overrides([
{'entity_id': ['test'], 'key': 'value'}])
assert result == {'key': 'value'}
def test_override_by_glob(self):
"""Test entity customization through configuration."""
result = self._get_overrides([
{'entity_id': ['test.?e*'], 'key': 'value'}])
assert result == {'key': 'value'}
def test_override_exact_over_glob_over_domain(self):
"""Test entity customization through configuration."""
result = self._get_overrides([
{'entity_id': ['test.test'], 'key1': 'valueExact'},
{'entity_id': ['test.tes?'],
'key1': 'valueGlob',
'key2': 'valueGlob'},
{'entity_id': ['test'],
'key1': 'valueDomain',
'key2': 'valueDomain',
'key3': 'valueDomain'}])
assert result == {
'key1': 'valueExact',
'key2': 'valueGlob',
'key3': 'valueDomain'}
def test_override_deep_dict(self):
"""Test we can deep-overwrite a dict."""
result = self._get_overrides(
[{'entity_id': [self.entity_id],
'test': {'key1': 'value1', 'key2': 'value2'}},
{'entity_id': [self.entity_id],
'test': {'key3': 'value3', 'key2': 'value22'}}])
assert result['test'] == {
'key1': 'value1',
'key2': 'value22',
'key3': 'value3'}
def test_schema_bad_schema(self):
"""Test bad customize schemas."""
for value in (
{'test.test': 10},
{'test.test': ['hello']},
{'entity_id': {'a': 'b'}},
{'entity_id': 10},
[{'test.test': 'value'}],
):
with pytest.raises(
MultipleInvalid,
message="{} should have raised MultipleInvalid".format(
value)):
customize.CUSTOMIZE_SCHEMA(value)
def test_get_customize_schema_allow_extra(self):
"""Test schema with ALLOW_EXTRA."""
for value in (
{'test.test': {'hidden': True}},
{'test.test': {'key': ['value1', 'value2']}},
[{'entity_id': 'id1', 'key': 'value'}],
):
customize.CUSTOMIZE_SCHEMA(value)
def test_get_customize_schema_csv(self):
"""Test schema with comma separated entity IDs."""
assert [{'entity_id': ['id1', 'id2', 'id3']}] == \
customize.CUSTOMIZE_SCHEMA([{'entity_id': 'id1,ID2 , id3'}])
|
apache-2.0
|
Pablo126/SSBW
|
Tarea4/tarea4/lib/python3.5/site-packages/django/conf/locale/mk/formats.py
|
504
|
1742
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
gpl-3.0
|
aviabrams/django-pagination
|
pagination/tests.py
|
16
|
3645
|
"""
>>> from django.core.paginator import Paginator
>>> from pagination.templatetags.pagination_tags import paginate
>>> from django.template import Template, Context
>>> p = Paginator(range(15), 2)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2, 3, 4, 5, 6, 7, 8]
>>> p = Paginator(range(17), 2)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> p = Paginator(range(19), 2)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2, 3, 4, None, 7, 8, 9, 10]
>>> p = Paginator(range(21), 2)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2, 3, 4, None, 8, 9, 10, 11]
# Testing orphans
>>> p = Paginator(range(5), 2, 1)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2]
>>> p = Paginator(range(21), 2, 1)
>>> paginate({'paginator': p, 'page_obj': p.page(1)})['pages']
[1, 2, 3, 4, None, 7, 8, 9, 10]
>>> t = Template("{% load pagination_tags %}{% autopaginate var 2 %}{% paginate %}")
>>> from django.http import HttpRequest as DjangoHttpRequest
>>> class HttpRequest(DjangoHttpRequest):
... page = 1
>>> t.render(Context({'var': range(21), 'request': HttpRequest()}))
u'\\n\\n<div class="pagination">...
>>>
>>> t = Template("{% load pagination_tags %}{% autopaginate var %}{% paginate %}")
>>> t.render(Context({'var': range(21), 'request': HttpRequest()}))
u'\\n\\n<div class="pagination">...
>>> t = Template("{% load pagination_tags %}{% autopaginate var 20 %}{% paginate %}")
>>> t.render(Context({'var': range(21), 'request': HttpRequest()}))
u'\\n\\n<div class="pagination">...
>>> t = Template("{% load pagination_tags %}{% autopaginate var by %}{% paginate %}")
>>> t.render(Context({'var': range(21), 'by': 20, 'request': HttpRequest()}))
u'\\n\\n<div class="pagination">...
>>> t = Template("{% load pagination_tags %}{% autopaginate var by as foo %}{{ foo }}")
>>> t.render(Context({'var': range(21), 'by': 20, 'request': HttpRequest()}))
u'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]'
>>>
# Testing InfinitePaginator
>>> from paginator import InfinitePaginator
>>> InfinitePaginator
<class 'pagination.paginator.InfinitePaginator'>
>>> p = InfinitePaginator(range(20), 2, link_template='/bacon/page/%d')
>>> p.validate_number(2)
2
>>> p.orphans
0
>>> p3 = p.page(3)
>>> p3
<Page 3>
>>> p3.end_index()
6
>>> p3.has_next()
True
>>> p3.has_previous()
True
>>> p.page(10).has_next()
False
>>> p.page(1).has_previous()
False
>>> p3.next_link()
'/bacon/page/4'
>>> p3.previous_link()
'/bacon/page/2'
# Testing FinitePaginator
>>> from paginator import FinitePaginator
>>> FinitePaginator
<class 'pagination.paginator.FinitePaginator'>
>>> p = FinitePaginator(range(20), 2, offset=10, link_template='/bacon/page/%d')
>>> p.validate_number(2)
2
>>> p.orphans
0
>>> p3 = p.page(3)
>>> p3
<Page 3>
>>> p3.start_index()
10
>>> p3.end_index()
6
>>> p3.has_next()
True
>>> p3.has_previous()
True
>>> p3.next_link()
'/bacon/page/4'
>>> p3.previous_link()
'/bacon/page/2'
>>> p = FinitePaginator(range(20), 20, offset=10, link_template='/bacon/page/%d')
>>> p2 = p.page(2)
>>> p2
<Page 2>
>>> p2.has_next()
False
>>> p3.has_previous()
True
>>> p2.next_link()
>>> p2.previous_link()
'/bacon/page/1'
>>> from pagination.middleware import PaginationMiddleware
>>> from django.core.handlers.wsgi import WSGIRequest
>>> from StringIO import StringIO
>>> middleware = PaginationMiddleware()
>>> request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart', 'wsgi.input': StringIO()})
>>> middleware.process_request(request)
>>> request.upload_handlers.append('asdf')
"""
|
bsd-3-clause
|
chongtianfeiyu/kbengine
|
kbe/res/scripts/common/Lib/distutils/fancy_getopt.py
|
207
|
17784
|
"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
import sys, string, re
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = str.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return long_option.translate(longopt_xlate)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
"aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't"
% (long, alias_to))
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError(
"invalid long option name '%s' "
"(must be letters, numbers, hyphens only" % long)
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
def getopt(self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = True
else:
created_object = False
self._grok_option_table()
short_opts = ' '.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error as msg:
raise DistutilsArgError(msg)
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
def get_option_order(self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError("'getopt()' hasn't been called yet")
else:
return self.option_order
def generate_help(self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
return lines
def print_help(self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
def fancy_getopt(options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
def wrap_text(text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = text.expandtabs()
text = text.translate(WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(''.join(cur_line))
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return opt.translate(longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__(self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print("width: %d" % w)
print("\n".join(wrap_text(text, w)))
print()
|
lgpl-3.0
|
ionelmc/virtualenv
|
virtualenv/_utils.py
|
1
|
1837
|
from __future__ import absolute_import, division, print_function
import os
import re
import shutil
import stat
def ensure_directory(directory, *args, **kwargs):
# Fail if the destination exists and it's not a directory
if not os.path.isdir(directory):
os.makedirs(directory, *args, **kwargs)
def copyfile(srcfile, destfile, skip=re.compile(r".*\.pyc\Z|__pycache__\Z", re.IGNORECASE)):
ensure_directory(os.path.dirname(destfile))
if os.path.isdir(srcfile):
# TODO: just use shutil.copytree to avoid bikeshedding
for name in os.listdir(srcfile):
if not skip.match(name):
copyfile(
os.path.join(srcfile, name),
os.path.join(destfile, name)
)
else:
# We use copyfile (not move, copy, or copy2) to be extra sure that we are
# not moving directories over (copyfile fails for directories) as well as
# to ensure that we are not copying over any metadata because we want more
# control over what metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Grab the stat data for the source file so we can use it to copy over
# certain metadata to the destination file.
st = os.stat(srcfile)
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
permissions = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(destfile, permissions)
class cached_property(object): # flake8: noqa
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
|
mit
|
MicroTrustRepos/microkernel
|
src/l4/pkg/python/contrib/Demo/tkinter/guido/brownian2.py
|
42
|
1379
|
# Brownian motion -- an example of a NON multi-threaded Tkinter program ;)
# By Michele Simoniato, inspired by brownian.py
from Tkinter import *
import random
import sys
WIDTH = 400
HEIGHT = 300
SIGMA = 10
BUZZ = 2
RADIUS = 2
LAMBDA = 10
FILL = 'red'
stop = 0 # Set when main loop exits
root = None # main window
def particle(canvas): # particle = iterator over the moves
r = RADIUS
x = random.gauss(WIDTH/2.0, SIGMA)
y = random.gauss(HEIGHT/2.0, SIGMA)
p = canvas.create_oval(x-r, y-r, x+r, y+r, fill=FILL)
while not stop:
dx = random.gauss(0, BUZZ)
dy = random.gauss(0, BUZZ)
try:
canvas.move(p, dx, dy)
except TclError:
break
else:
yield None
def move(particle): # move the particle at random time
particle.next()
dt = random.expovariate(LAMBDA)
root.after(int(dt*1000), move, particle)
def main():
global root, stop
root = Tk()
canvas = Canvas(root, width=WIDTH, height=HEIGHT)
canvas.pack(fill='both', expand=1)
np = 30
if sys.argv[1:]:
np = int(sys.argv[1])
for i in range(np): # start the dance
move(particle(canvas))
try:
root.mainloop()
finally:
stop = 1
if __name__ == '__main__':
main()
|
gpl-2.0
|
tobegit3hub/deep_cnn
|
java_predict_client/src/main/proto/tensorflow/python/ops/resource_variable_ops.py
|
2
|
7588
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to use variables as resources."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resources
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_resource_variable_ops import *
# pylint: enable=wildcard-import
ops.RegisterShape("VarHandleOp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("CreateVariableOp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReadVariableOp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AssignVariableOp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AssignAddVariableOp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("VarIsInitializedOp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ResourceGather")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ResourceScatterAdd")(common_shapes.call_cpp_shape_fn)
def _register_variable_read(read, collections, trainable):
"""Helper function to put a read from a variable in the collections."""
if collections is None:
collections = []
if (trainable and
ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES not in collections):
collections = (list(collections) +
[ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES])
ops.add_to_collections(collections, read)
class ResourceVariable(object):
"""Variable based on resource handles.
TODO(apassos): fill this out explaining the semantics and Variable
compatibility when the API has settled more.
"""
def __init__(self,
initial_value=None,
name=None,
trainable=True,
collections=None,
dtype=None,
shape=None):
"""Creates a variable.
Args:
initial_value: A `Tensor` or Python object convertible to a `Tensor`
representing the initial value of this variable.
name: The name of this variable. Automatically uniquified.
trainable: Whether the global read of this variable will be used for
training.
collections: Additional collections to which the `read` operation for
this variable is to be added. Defaults to [].
dtype: The type of this variable. Can be omitted if it can be deduced
from the initial_value. If different from the type of the initial
value it will be cast to this type.
shape: The shape of this variable. Only specify if there is no initial
value but shape inference is desired.
"""
if initial_value is not None:
initial_value = ops.convert_to_tensor(initial_value)
if dtype is None:
assert initial_value is not None, ("Trying to create a resource variable "
"with no dtype or initial value. At"
" least one of these must be set.")
dtype = initial_value.dtype
elif initial_value is not None:
initial_value = math_ops.cast(initial_value, dtype)
if shape is None:
if initial_value is not None:
shape = initial_value.get_shape().as_proto()
else:
shape = tensor_shape.unknown_shape()
else:
shape = tensor_shape.as_shape(shape)
self._dtype = dtype
with ops.name_scope(name, "Variable", [initial_value]) as name:
self._handle = gen_resource_variable_ops.var_handle_op(shared_name=name,
name=name,
dtype=dtype,
shape=shape)
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Create"):
self._initialize_op = gen_resource_variable_ops.create_variable_op(
self._handle, initial_value)
resources.register_resource(self._handle,
self._initialize_op,
self._is_initialized_op)
with ops.name_scope("Read"):
self._value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
_register_variable_read(
self._value, trainable=trainable, collections=collections)
@property
def dtype(self):
"""The dtype of this variable."""
return self._dtype
@property
def create(self):
"""The op responsible for initializing this variable."""
return self._initialize_op
@property
def handle(self):
"""The handle by which this variable can be accessed."""
return self._handle
@property
def value(self):
"""A cached operation which reads the value of this variable."""
return self._value
@property
def op(self):
"""The op which reads the value of this variable."""
return self._value.op
def eval(self, session=None):
"""Evaluates and returns the value of this variable."""
return self._value.eval(session=session)
def read_value(self, collections=None, trainable=True):
"""Constructs an op which reads the value of this variable.
Should be used when there are multiple reads, or when it is desirable to
read the value only after some condition is true.
Args:
collections: any collections in which this operation should be inserted.
trainable: whether this read is to be used for training.
Returns:
the read operation.
"""
with ops.name_scope("Read"):
value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
_register_variable_read(value, collections=collections, trainable=trainable)
return value
def sparse_read(self, indices, collections=None, trainable=True, name=None):
with ops.name_scope("Gather" if name is None else name):
value = gen_resource_variable_ops.resource_gather(
self._handle, indices, Tparams=self._dtype)
_register_variable_read(value, collections=collections, trainable=trainable)
return value
# pylint: disable=unused-argument
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
if dtype is not None and dtype != var.value.dtype:
print("trying to switch the dtype to ", dtype, " from ", var.value.dtype)
return NotImplemented
return var.value
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)
|
apache-2.0
|
noironetworks/nova
|
nova/context.py
|
12
|
10174
|
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
import copy
from keystoneclient import auth
from keystoneclient import service_catalog
from oslo_context import context
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from nova import exception
from nova.i18n import _, _LW
from nova import policy
LOG = logging.getLogger(__name__)
class _ContextAuthPlugin(auth.BaseAuthPlugin):
"""A keystoneclient auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
sc = {'serviceCatalog': sc}
self.service_catalog = service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
endpoint_type=interface,
region_name=region_name)
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id=None, project_id=None,
is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False,
user_auth_plugin=None, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
user = kwargs.pop('user', None)
tenant = kwargs.pop('tenant', None)
super(RequestContext, self).__init__(
auth_token=auth_token,
user=user_id or user,
tenant=project_id or tenant,
domain=kwargs.pop('domain', None),
user_domain=kwargs.pop('user_domain', None),
project_domain=kwargs.pop('project_domain', None),
is_admin=is_admin,
read_only=kwargs.pop('read_only', False),
show_deleted=kwargs.pop('show_deleted', False),
request_id=request_id,
resource_uuid=kwargs.pop('resource_uuid', None),
overwrite=overwrite)
# oslo_context's RequestContext.to_dict() generates this field, we can
# safely ignore this as we don't use it.
kwargs.pop('user_identity', None)
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: %s') %
str(kwargs))
# FIXME(dims): user_id and project_id duplicate information that is
# already present in the oslo_context's RequestContext. We need to
# get rid of them.
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('volume', 'volumev2', 'key-manager')]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME(dims): defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'read_deleted': getattr(self, 'read_deleted', 'no'),
'roles': getattr(self, 'roles', None),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': timeutils.strtime(self.timestamp) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'quota_class': getattr(self, 'quota_class', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None),
'instance_lock_checked': getattr(self, 'instance_lock_checked',
False)
})
return values
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None):
"""Return a version of this context with admin flag set."""
context = copy.deepcopy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def __str__(self):
return "<Context %s>" % self.to_dict()
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is not an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.Forbidden() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.Forbidden()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.Forbidden()
elif context.project_id != project_id:
raise exception.Forbidden()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.Forbidden()
elif context.user_id != user_id:
raise exception.Forbidden()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.Forbidden()
elif context.quota_class != class_name:
raise exception.Forbidden()
|
apache-2.0
|
wan-qy/shadowsocks
|
shadowsocks/manager.py
|
925
|
9692
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 100
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
|
apache-2.0
|
edlabh/SickRage
|
lib/dateutil/rrule.py
|
17
|
58639
|
# -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC <http://www.ietf.org/rfc/rfc2445.txt>`_,
including support for caching of results.
"""
import itertools
import datetime
import calendar
import sys
try:
from math import gcd
except ImportError:
from fractions import gcd
from six import advance_iterator, integer_types
from six.moves import _thread
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
FREQNAMES = ['YEARLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY','SECONDLY']
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n == 0")
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
comp = lambda dc, dtc: dc >= dtc
else:
comp = lambda dc, dtc: dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
yield d
if count is not None:
n += 1
if n >= count:
break
def between(self, after, before, inc=False, count=1):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
How many occurrences will be generated.
:param until:
If given, this must be a datetime instance, that will specify the
limit of the recurrence. If a recurrence instance happens to be the
same as the datetime instance given in the until keyword, this will
be the last occurrence.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
# Cache the original byxxx rules, if they are provided, as the _byxxx
# attributes do not necessarily map to the inputs, and this can be
# a problem in generating the strings. Only store things if they've
# been supplied (the string retrieval will just use .get())
self._original_rule = {}
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if self._bysetpos:
self._original_rule['bysetpos'] = self._bysetpos
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
self._original_rule['bymonth'] = None
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == MONTHLY:
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == WEEKLY:
byweekday = dtstart.weekday()
self._original_rule['byweekday'] = None
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
if 'bymonth' not in self._original_rule:
self._original_rule['bymonth'] = self._bymonth
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
self._original_rule['byyearday'] = self._byyearday
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
self._original_rule['byeaster'] = self._byeaster
else:
self._byeaster = None
# bymonthday
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
bymonthday = set(bymonthday) # Ensure it's unique
self._bymonthday = tuple(sorted([x for x in bymonthday if x > 0]))
self._bynmonthday = tuple(sorted([x for x in bymonthday if x < 0]))
# Storing positive numbers first, then negative numbers
if 'bymonthday' not in self._original_rule:
self._original_rule['bymonthday'] = tuple(
itertools.chain(self._bymonthday, self._bynmonthday))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
self._original_rule['byweekno'] = self._byweekno
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
orig_byweekday = [weekday(x) for x in self._byweekday]
else:
orig_byweekday = tuple()
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
orig_bynweekday = [weekday(*x) for x in self._bynweekday]
else:
orig_bynweekday = tuple()
if 'byweekday' not in self._original_rule:
self._original_rule['byweekday'] = tuple(itertools.chain(
orig_byweekday, orig_bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = set((dtstart.hour,))
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
self._original_rule['byhour'] = self._byhour
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = set((dtstart.minute,))
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
self._original_rule['byminute'] = self._byminute
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
self._original_rule['bysecond'] = self._bysecond
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def __str__(self):
"""
Output a string that would generate this RRULE if passed to rrulestr.
This is mostly compatible with RFC2445, except for the
dateutil-specific extension BYEASTER.
"""
output = []
h, m, s = [None] * 3
if self._dtstart:
output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
h, m, s = self._dtstart.timetuple()[3:6]
parts = ['FREQ=' + FREQNAMES[self._freq]]
if self._interval != 1:
parts.append('INTERVAL=' + str(self._interval))
if self._wkst:
parts.append('WKST=' + str(self._wkst))
if self._count:
parts.append('COUNT=' + str(self._count))
if self._original_rule.get('byweekday') is not None:
# The str() method on weekday objects doesn't generate
# RFC2445-compliant strings, so we should modify that.
original_rule = dict(self._original_rule)
wday_strings = []
for wday in original_rule['byweekday']:
if wday.n:
wday_strings.append('{n:+d}{wday}'.format(
n=wday.n,
wday=repr(wday)[0:2]))
else:
wday_strings.append(repr(wday))
original_rule['byweekday'] = wday_strings
else:
original_rule = self._original_rule
partfmt = '{name}={vals}'
for name, key in [('BYSETPOS', 'bysetpos'),
('BYMONTH', 'bymonth'),
('BYMONTHDAY', 'bymonthday'),
('BYYEARDAY', 'byyearday'),
('BYWEEKNO', 'byweekno'),
('BYDAY', 'byweekday'),
('BYHOUR', 'byhour'),
('BYMINUTE', 'byminute'),
('BYSECOND', 'bysecond'),
('BYEASTER', 'byeaster')]:
value = original_rule.get(key)
if value:
parts.append(partfmt.format(name=name, vals=(','.join(str(v)
for v in value))))
output.append(';'.join(parts))
return '\n'.join(output)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday and
-ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
-ii.nextyearlen+i-ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal + i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour+interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
valid = False
rep_rate = (24*60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute+interval, 60)
div, hour = divmod(hour+nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399 - (hour * 3600 + minute * 60 + second))
// interval) * interval)
rep_rate = (24 * 3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second+interval, 60)
div, minute = divmod(minute+nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365 + calendar.isleap(year)
self.nextyearlen = 365 + calendar.isleap(year + 1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None] * self.yearlen
i = datetime.date(year, month, day).toordinal() - self.yearordinal
dset[i] = i
return dset, i, i + 1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
self.genlist.remove(self)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
advance_iterator(exlist[0])
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
rlist.sort()
self._len = total
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
else:
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError("unsupported DTSTART parm: "+parm)
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
rset.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
|
gpl-3.0
|
thinkle/gourmet
|
gourmet/recipeIdentifier.py
|
1
|
9681
|
"""recipeIdentifier.py
This module contains code for creating hashes to identify recipes
based on title & instructions (recipe hash) or based on ingredients (ingredient hash).
The hash_recipe function is a convenience function that provide both a
recipe hash and an ingredient hash.
For individual hashes, use the get_recipe_hash and get_ingredient_hash
functions.
"""
from gi.repository import Gtk
import xml.sax.saxutils
from gourmet import convert
import hashlib, difflib, types, re
from gettext import gettext as _
from .gglobals import REC_ATTRS,TEXT_ATTR_DIC,INT_REC_ATTRS
IMAGE_ATTRS = ['image','thumb']
ALL_ATTRS = [r[0] for r in REC_ATTRS] + list(TEXT_ATTR_DIC.keys()) + IMAGE_ATTRS
REC_FIELDS = ['title',
'instructions',
]
ING_FIELDS = ['amount','unit']
STANDARD_UNITS = ['g.','ml.']
# Hash stuff.
def standardize_ingredient (ing_object, converter):
if ing_object.item:
ing = ing_object.item
else:
ing = ing_object.ingkey
unit,amount = ing_object.unit,ing_object.amount
gconv = converter.converter(unit,'g.')
vconv = converter.converter(unit,'ml.')
if not (gconv or vconv):
gconv = converter.converter(unit,'g.',ing)
vconv = converter.converter(unit,'ml.',ing)
if gconv:
unit = 'g.'
if amount: amount = amount*gconv
elif vconv:
unit = 'ml.'
if amount: amount = amount*vconv
if unit in ['g.','ml.']:
# Round to the 10s place...
if amount:
amount = round(amount,-1)
istring = "%s %s %s"%(amount,unit,ing)
return istring.lower()
def get_ingredient_hash (ings, conv):
ings = [standardize_ingredient(i,conv) for i in ings]
ings.sort()
ings = '\n'.join(ings)
m = hashlib.md5(ings.encode('utf8'))
#print 'Hash',ings,m.hexdigest()
return m.hexdigest()
def get_recipe_hash (recipe_object):
recstrings = []
for field in REC_FIELDS:
if getattr(recipe_object,field): recstrings.append(getattr(recipe_object,field))
recstring = '\n'.join(recstrings)
recstring = recstring.strip()
recstring = recstring.lower()
#print 'hash',recstring
m = hashlib.md5(recstring.encode('utf8'))
return m.hexdigest()
def hash_recipe (rec, rd, conv=None):
if not conv: conv = convert.get_converter()
rechash = get_recipe_hash(rec)
inghash = get_ingredient_hash(rd.get_ings(rec),conv)
return rechash,inghash
# Diff stuff
# Convenience methods
def format_ing_text (ing_alist,rd,conv=None):
strings = []
for g,ings in ing_alist:
if g: strings.append('\n<u>'+g+'</u>')
for i in ings:
istring = []
a,u = rd.get_amount_and_unit(i,conv=conv)
if a: istring.append(a)
if u: istring.append(u)
if i.item: istring.append(i.item)
if (not isinstance(i.optional, str) and i.optional) or i.optional=='yes':
istring.append(_('(Optional)'))
if i.refid: istring.append('=>%s'%i.refid)
if i.ingkey: istring.append('key=%s'%i.ingkey)
strings.append(xml.sax.saxutils.escape(' '.join(istring)))
return '\n'.join(strings).strip()
def format_ings (rec, rd):
ings = rd.get_ings(rec)
alist = rd.order_ings(ings)
return format_ing_text(alist,rd)
def apply_line_markup (line, markup):
out = ''
current_tag = ''
if len(markup) < len(line):
markup += ' '*(len(line)-len(markup))
for n in range(len(line)):
if markup[n]==' ' or markup[n]=='\n':
tag = None
elif markup[n]=='+':
tag = 'add'
elif markup[n]=='-':
tag = 'del'
else:
#print "WARNING: don't recognize diff tag \"%s\""%markup[n]
tag = None
if tag != current_tag:
if current_tag:
out += '</%s>'%current_tag
if tag:
out += '<%s>'%tag
current_tag = tag
out += line[n]
if current_tag:
out += '</%s>'%current_tag
return out
def get_diff_markup (s1,s2):
diffs = []
for line in difflib.ndiff(s1,s2):
code = line[:2]
line = line[2:]
if code!='? ':
diffs.append([code,line])
else:
diffs[-1][1] = apply_line_markup(diffs[-1][1],line)
return diffs
def get_two_columns (s1,s2):
"""Get two columns with diff markup on them."""
diffs = get_diff_markup(s1,s2)
left = []
right = []
for code,line in diffs:
if code=='- ':
left.append('<diff>'+line+'</diff>')
elif code=='+ ':
right.append('<diff>'+line+'</diff>')
elif code==' ':
while len(left) < len(right):
left.append('<diff/>')
while len(right) < len(left):
right.append('<diff/>')
left.append(line)
right.append(line)
return left,right
def diff_ings (rd,rec1,rec2):
ings1 = format_ings(rec1,rd)
ings2 = format_ings(rec2,rd)
if ings1 != ings2:
return get_two_columns(ings1.splitlines(),ings2.splitlines())
def diff_recipes (rd,recs):
diffs = {}
for attr in ALL_ATTRS:
if attr == 'category':
vals = [', '.join(rd.get_cats(r)) for r in recs]
else:
vals = [getattr(r,attr) for r in recs]
# If all our values are identical, there is no
# difference. Also, if all of our values are bool->False, we
# don't care (i.e. we don't care about the difference between
# None and "" or between None and 0).
if vals != [vals[0]] * len(vals) and True in [bool(v) for v in vals]:
#if TEXT_ATTR_DIC.has_key(attr):
# val1,val2 =
diffs[attr]=vals
return diffs
def merge_recipes (rd, recs):
"""Return two dictionaries representing the differences between recs.
The first dictionary contains items that are blank in one recipe
but not the other. The second dictionary contains conflicts."""
diffs = diff_recipes(rd,recs)
my_recipe = {}
# Now we loop through the recipe and remove any attributes that
# are blank in one recipe from diffs and put them instead into
# my_recipe.
for attr,vals in list(diffs.items()):
value = None
conflict = False
for v in vals:
if not v:
continue
elif not value:
value = v
elif v != value:
if ((isinstance(v, str) and isinstance(value, str))
and v.lower()==value.lower()):
continue
else:
conflict = True
break
if conflict: continue
else:
if value: my_recipe[attr]=value
del diffs[attr]
return my_recipe,diffs
def format_ingdiff_line (s):
if re.search('key=(.*)(?=</diff>)',s):
s = re.sub('key=(.*)(?=</diff>)','<i>(\\1)</i>',s)
else:
s = re.sub('key=(.*)','<i>(\\1)</i>',s)
s = s.replace('<diff>','<span background="#ffff80" foreground="#000">')
s = s.replace('</diff>','</span>')
s = s.replace('<diff/>','')
#s = s.replace('<del>','<span color="red" strikethrough="true">')
s = s.replace('<del>','<span weight="bold" color="red">')
s = s.replace('</del>','</span>')
s = s.replace('<add>','<span weight="bold" color="red">')
s = s.replace('</add>','</span>')
return s
def show_ing_diff (idiff):
left, right = idiff
ls = Gtk.ListStore(str,str)
for n in range(len(left)):
ls.append([format_ingdiff_line(left[n]),
format_ingdiff_line(right[n])]
)
tv = Gtk.TreeView()
r = Gtk.CellRendererText()
tc = Gtk.TreeViewColumn('Left',r,markup=0)
tc2 = Gtk.TreeViewColumn('Right',r,markup=1)
tv.append_column(tc)
tv.append_column(tc2)
tv.set_model(ls)
return tv
if __name__ == '__main__':
import recipeManager
rd = recipeManager.default_rec_manager()
r1 = 33
r2 = 241
#empty_hash = get_ingredient_hash([],None)
#rr = {}; ii = {}; ir = {}; count = 0
# for rec in rd.fetch_all(rd.recipe_table,deleted=False):
# count += 1
# rh,ih = hash_recipe(rec,rd)
# ch = rh+ih
# if count % 10 == 0: print count,rec.id,ch
# #print ch,rec.id
# if ir.has_key(ch):
# print rec.id,rec.title,'is a complete duplicate of',ir[ch].id,ir[ch].title
# print 'Merge would be: ',merge_recipes(rd,[rec,ir[ch]])
# else:
# ir[ch]=rec
# if rr.has_key(rh):
# print rec.id,rec.title,'duplicates',rr[rh].id,rr[rh].title
# rdiff = diff_recipes(rd,[rec,rr[rh]])
# idiff = diff_ings(rd,rec,rr[rh])
# if (not rdiff) and (not idiff):
# print 'PERFECT DUPS!'
# if rdiff:
# print 'Rec Diff'
# for k,v in rdiff.items(): print '%s: %s\t%s'%(k,v[0],v[1])
# if idiff:
# tv = show_ing_diff(idiff)
# w = Gtk.Window()
# w.add(tv)
# w.show_all()
# w.connect('delete-event',Gtk.main_quit)
# Gtk.main()
# left,right = idiff
# print 'ING DIFF\n----------\n'
# for n in range(len(left)):
# print left[n],right[n]
# else:
# rr[rh]=rec
# if ii.has_key(ih) and ih != empty_hash:
# print rec.id,rec.title,'duplicates ings',ii[ih].id,ii[ih].title
# else:
# ii[ih]=rec
|
gpl-2.0
|
axinging/chromium-crosswalk
|
native_client_sdk/src/build_tools/tests/sdktools_config_test.py
|
160
|
1609
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import sdk_tools.config as config
class TestSdkToolsConfig(unittest.TestCase):
def testInvalidSyntax(self):
invalid_json = "# oops\n"
cfg = config.Config()
self.assertRaises(config.Error, lambda: cfg.LoadJson(invalid_json))
def testEmptyConfig(self):
"""Test that empty config contains just empty sources list."""
expected = '{\n "sources": []\n}'
cfg = config.Config()
json_output = cfg.ToJson()
self.assertEqual(json_output, expected)
def testIntegerSetting(self):
json_input = '{ "setting": 3 }'
cfg = config.Config()
cfg.LoadJson(json_input)
self.assertEqual(cfg.setting, 3)
def testReadWrite(self):
json_input1 = '{\n "sources": [], \n "setting": 3\n}'
json_input2 = '{\n "setting": 3\n}'
for json_input in (json_input1, json_input2):
cfg = config.Config()
cfg.LoadJson(json_input)
json_output = cfg.ToJson()
self.assertEqual(json_output, json_input1)
def testAddSource(self):
cfg = config.Config()
cfg.AddSource('http://localhost/foo')
json_output = cfg.ToJson()
expected = '{\n "sources": [\n "http://localhost/foo"\n ]\n}'
self.assertEqual(json_output, expected)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
wwj718/edx-platform
|
lms/djangoapps/rss_proxy/tests/test_views.py
|
42
|
2777
|
"""
Tests for the rss_proxy views
"""
from django.test import TestCase
from django.core.urlresolvers import reverse
from mock import patch, Mock
from rss_proxy.models import WhitelistedRssUrl
class RssProxyViewTests(TestCase):
""" Tests for the rss_proxy views """
def setUp(self):
super(RssProxyViewTests, self).setUp()
self.whitelisted_url1 = 'http://www.example.com'
self.whitelisted_url2 = 'http://www.example.org'
self.non_whitelisted_url = 'http://www.example.net'
self.rss = '''
<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0">
<channel>
<title></title>
<link>http://www.example.com/rss</link>
<description></description>
<language>en</language>
<item>
<title>Example</title>
<link>http://www.example.com/rss/item</link>
<description>Example item description</description>
<pubDate>Fri, 13 May 1977 00:00:00 +0000</pubDate>
</item>
</channel>
</rss>
'''
WhitelistedRssUrl.objects.create(url=self.whitelisted_url1)
WhitelistedRssUrl.objects.create(url=self.whitelisted_url2)
@patch('rss_proxy.views.requests.get')
def test_proxy_with_whitelisted_url(self, mock_requests_get):
"""
Test the proxy view with a whitelisted URL
"""
mock_requests_get.return_value = Mock(status_code=200, content=self.rss)
resp = self.client.get('%s?url=%s' % (reverse('rss_proxy:proxy'), self.whitelisted_url1))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['Content-Type'], 'application/xml')
self.assertEqual(resp.content, self.rss)
@patch('rss_proxy.views.requests.get')
def test_proxy_with_whitelisted_url_404(self, mock_requests_get):
"""
Test the proxy view with a whitelisted URL that is not found
"""
mock_requests_get.return_value = Mock(status_code=404)
resp = self.client.get('%s?url=%s' % (reverse('rss_proxy:proxy'), self.whitelisted_url2))
print resp.status_code
print resp.content
print resp['Content-Type']
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp['Content-Type'], 'application/xml')
self.assertEqual(resp.content, '')
def test_proxy_with_non_whitelisted_url(self):
"""
Test the proxy view with a non-whitelisted URL
"""
resp = self.client.get('%s?url=%s' % (reverse('rss_proxy:proxy'), self.non_whitelisted_url))
self.assertEqual(resp.status_code, 404)
|
agpl-3.0
|
timabell/gpodder
|
src/gpodder/gtkui/frmntl/preferences.py
|
1
|
9136
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import gpodder
_ = gpodder.gettext
N_ = gpodder.ngettext
from gpodder import util
from gpodder.gtkui.interface.common import BuilderWidget
from gpodder.gtkui.interface.common import Orientation
from gpodder.gtkui.frmntl.portrait import FremantleRotation
import hildon
class gPodderPreferences(BuilderWidget):
UPDATE_INTERVALS = (
(0, _('manually')),
(20, N_('every %d minute', 'every %d minutes', 20) % 20),
(60, _('hourly')),
(60*6, N_('every %d hour', 'every %d hours', 6) % 6),
(60*24, _('daily')),
)
DOWNLOAD_METHODS = (
('never', _('Show episode list')),
('queue', _('Add to download list')),
# ('wifi', _('Download when on Wi-Fi')),
('always', _('Download immediately')),
)
AUDIO_PLAYERS = (
('default', _('Media Player')),
('panucci', _('Panucci')),
)
VIDEO_PLAYERS = (
('default', _('Media Player')),
('mplayer', _('MPlayer')),
)
def new(self):
self.main_window.connect('destroy', lambda w, self: self.callback_finished(), self)
self.wiki_button = self.main_window.add_button(_('User manual'), 1)
self.wiki_button.connect('clicked', self.on_wiki_activate)
self.about_button = self.main_window.add_button(_('About'), 2)
self.about_button.connect('clicked', self.on_itemAbout_activate)
self.touch_selector_orientation = hildon.TouchSelector(text=True)
for caption in FremantleRotation.MODE_CAPTIONS:
self.touch_selector_orientation.append_text(caption)
self.touch_selector_orientation.set_active(0, self._config.rotation_mode)
self.picker_orientation.set_selector(self.touch_selector_orientation)
if not self._config.auto_update_feeds:
self._config.auto_update_frequency = 0
# Create a mapping from minute values to touch selector indices
minute_index_mapping = dict((b, a) for a, b in enumerate(x[0] for x in self.UPDATE_INTERVALS))
self.touch_selector_interval = hildon.TouchSelector(text=True)
for value, caption in self.UPDATE_INTERVALS:
self.touch_selector_interval.append_text(caption)
interval = self._config.auto_update_frequency
if interval in minute_index_mapping:
self._custom_interval = 0
self.touch_selector_interval.set_active(0, minute_index_mapping[interval])
else:
self._custom_interval = self._config.auto_update_frequency
self.touch_selector_interval.append_text(_('every %d minutes') % interval)
self.touch_selector_interval.set_active(0, len(self.UPDATE_INTERVALS))
self.picker_interval.set_selector(self.touch_selector_interval)
# Create a mapping from download methods to touch selector indices
download_method_mapping = dict((b, a) for a, b in enumerate(x[0] for x in self.DOWNLOAD_METHODS))
self.touch_selector_download = hildon.TouchSelector(text=True)
for value, caption in self.DOWNLOAD_METHODS:
self.touch_selector_download.append_text(caption)
if self._config.auto_download not in (x[0] for x in self.DOWNLOAD_METHODS):
self._config.auto_download = self.DOWNLOAD_METHODS[0][0]
self.touch_selector_download.set_active(0, download_method_mapping[self._config.auto_download])
self.picker_download.set_selector(self.touch_selector_download)
# Create a mapping from audio players to touch selector indices
audio_player_mapping = dict((b, a) for a, b in enumerate(x[0] for x in self.AUDIO_PLAYERS))
self.touch_selector_audio_player = hildon.TouchSelector(text=True)
for value, caption in self.AUDIO_PLAYERS:
self.touch_selector_audio_player.append_text(caption)
if self._config.player not in (x[0] for x in self.AUDIO_PLAYERS):
self._config.player = self.AUDIO_PLAYERS[0][0]
self.touch_selector_audio_player.set_active(0, audio_player_mapping[self._config.player])
self.picker_audio_player.set_selector(self.touch_selector_audio_player)
# Create a mapping from video players to touch selector indices
video_player_mapping = dict((b, a) for a, b in enumerate(x[0] for x in self.VIDEO_PLAYERS))
self.touch_selector_video_player = hildon.TouchSelector(text=True)
for value, caption in self.VIDEO_PLAYERS:
self.touch_selector_video_player.append_text(caption)
if self._config.videoplayer not in (x[0] for x in self.VIDEO_PLAYERS):
self._config.videoplayer = self.VIDEO_PLAYERS[0][0]
self.touch_selector_video_player.set_active(0, video_player_mapping[self._config.videoplayer])
self.picker_video_player.set_selector(self.touch_selector_video_player)
self.update_button_mygpo()
# Fix the styling and layout of the picker buttons
for button in (self.picker_orientation, \
self.picker_interval, \
self.picker_download, \
self.picker_audio_player, \
self.picker_video_player, \
self.button_mygpo):
# Work around Maemo bug #4718
button.set_name('HildonButton-finger')
# Fix alignment problems (Maemo bug #6205)
button.set_alignment(.0, .5, 1., 0.)
child = button.get_child()
child.set_padding(0, 0, 12, 0)
self.check_feed_update_skipping = hildon.CheckButton(gtk.HILDON_SIZE_FINGER_HEIGHT)
self.check_feed_update_skipping.set_label(_('Enable feed update heuristics'))
self._config.connect_gtk_togglebutton('feed_update_skipping', self.check_feed_update_skipping)
self.pannable_vbox.add(self.check_feed_update_skipping)
self.pannable_vbox.reorder_child(self.check_feed_update_skipping, 6)
self.check_view_all_episodes = hildon.CheckButton(gtk.HILDON_SIZE_FINGER_HEIGHT)
self.check_view_all_episodes.set_label(_('Show "All episodes" view'))
self._config.connect_gtk_togglebutton('podcast_list_view_all', self.check_view_all_episodes)
self.pannable_vbox.add(self.check_view_all_episodes)
self.pannable_vbox.reorder_child(self.check_view_all_episodes, 2)
self.gPodderPreferences.show_all()
def on_window_orientation_changed(self, orientation):
if orientation == Orientation.PORTRAIT:
self.wiki_button.hide()
self.about_button.hide()
else:
self.wiki_button.show()
self.about_button.show()
def on_picker_orientation_value_changed(self, *args):
self._config.rotation_mode = self.touch_selector_orientation.get_active(0)
def on_picker_interval_value_changed(self, *args):
active_index = self.touch_selector_interval.get_active(0)
if active_index < len(self.UPDATE_INTERVALS):
new_frequency = self.UPDATE_INTERVALS[active_index][0]
else:
new_frequency = self._custom_interval
if new_frequency == 0:
self._config.auto_update_feeds = False
self._config.auto_update_frequency = new_frequency
if new_frequency > 0:
self._config.auto_update_feeds = True
def on_picker_download_value_changed(self, *args):
active_index = self.touch_selector_download.get_active(0)
new_value = self.DOWNLOAD_METHODS[active_index][0]
self._config.auto_download = new_value
def on_picker_audio_player_value_changed(self, *args):
active_index = self.touch_selector_audio_player.get_active(0)
new_value = self.AUDIO_PLAYERS[active_index][0]
self._config.player = new_value
def on_picker_video_player_value_changed(self, *args):
active_index = self.touch_selector_video_player.get_active(0)
new_value = self.VIDEO_PLAYERS[active_index][0]
self._config.videoplayer = new_value
def update_button_mygpo(self):
if self._config.mygpo_username:
self.button_mygpo.set_value(self._config.mygpo_username)
else:
self.button_mygpo.set_value(_('Not logged in'))
def on_button_mygpo_clicked(self, button):
self.mygpo_login()
self.update_button_mygpo()
|
gpl-3.0
|
so0k/zulip
|
zerver/lib/bugdown/codehilite.py
|
116
|
8441
|
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/code_hilite.html>
Contact: markdown@freewisdom.org
License: BSD (see ../LICENSE.md for details)
Dependencies:
* [Python 2.3+](http://python.org/)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments](http://pygments.org/)
"""
import markdown
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer
from pygments.formatters import HtmlFormatter
pygments = True
except ImportError:
pygments = False
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite:
"""
Determine language of source code, and pass it into the pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* force_linenos: (Boolean) Force line numbering 'on' (True) or 'off' (False).
If not specified, number lines iff a shebang line is present.
* guess_lang: (Boolean) Turn language auto-detection 'on' or 'off' (on by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # True or False; Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, force_linenos=None, guess_lang=True,
css_class="codehilite", lang=None, style='default',
noclasses=False, tab_length=4):
self.src = src
self.lang = lang
self.linenos = force_linenos
self.guess_lang = guess_lang
self.css_class = css_class
self.style = style
self.noclasses = noclasses
self.tab_length = tab_length
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None:
self._getLang()
if pygments:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src)
else:
lexer = TextLexer()
except ValueError:
lexer = TextLexer()
formatter = HtmlFormatter(linenos=bool(self.linenos),
cssclass=self.css_class,
style=self.style,
noclasses=self.noclasses)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
classes = []
if self.lang:
classes.append('language-%s' % self.lang)
if self.linenos:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="%s"' % ' '.join(classes)
return '<pre class="%s"><code%s>%s</code></pre>\n'% \
(self.css_class, class_str, txt)
def _getLang(self):
"""
Determines language of a code block from shebang line and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang line and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of a
code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
"""
import re
#split text into lines
lines = self.src.split("\n")
#pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons.
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if m.group('shebang') and self.linenos is None:
# shebang exists - use line numbers
self.linenos = True
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(markdown.treeprocessors.Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.getiterator('pre')
for block in blocks:
children = block.getchildren()
if len(children) == 1 and children[0].tag == 'code':
code = CodeHilite(children[0].text,
force_linenos=self.config['force_linenos'],
guess_lang=self.config['guess_lang'],
css_class=self.config['css_class'],
style=self.config['pygments_style'],
noclasses=self.config['noclasses'],
tab_length=self.markdown.tab_length)
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(markdown.Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, configs):
# define default configs
self.config = {
'force_linenos' : [None, "Force line numbers - Default: detect based on shebang"],
'guess_lang' : [True, "Automatic language detection - Default: True"],
'css_class' : ["codehilite",
"Set class name for wrapper <div> - Default: codehilite"],
'pygments_style' : ['default', 'Pygments HTML Formatter Style (Colorscheme) - Default: default'],
'noclasses': [False, 'Use inline styles instead of CSS classes - Default false']
}
# Override defaults with user settings
for key, value in configs:
# convert strings to booleans
if value == 'True': value = True
if value == 'False': value = False
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.add("hilite", hiliter, "<inline")
md.registerExtension(self)
def makeExtension(configs={}):
return CodeHiliteExtension(configs=configs)
|
apache-2.0
|
Innovahn/odoo.old
|
addons/product_email_template/models/invoice.py
|
321
|
1969
|
# -*- coding: utf-8 -*-
from openerp.osv import osv
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate_send_email(self, cr, uid, ids, context=None):
Composer = self.pool['mail.compose.message']
for invoice in self.browse(cr, uid, ids, context=context):
# send template only on customer invoice
if invoice.type != 'out_invoice':
continue
# subscribe the partner to the invoice
if invoice.partner_id not in invoice.message_follower_ids:
self.message_subscribe(cr, uid, [invoice.id], [invoice.partner_id.id], context=context)
for line in invoice.invoice_line:
if line.product_id.email_template_id:
# CLEANME: should define and use a clean API: message_post with a template
composer_id = Composer.create(cr, uid, {
'model': 'account.invoice',
'res_id': invoice.id,
'template_id': line.product_id.email_template_id.id,
'composition_mode': 'comment',
}, context=context)
template_values = Composer.onchange_template_id(
cr, uid, composer_id, line.product_id.email_template_id.id, 'comment', 'account.invoice', invoice.id
)['value']
template_values['attachment_ids'] = [(4, id) for id in template_values.get('attachment_ids', [])]
Composer.write(cr, uid, [composer_id], template_values, context=context)
Composer.send_mail(cr, uid, [composer_id], context=context)
return True
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
self.invoice_validate_send_email(cr, uid, ids, context=context)
return res
|
agpl-3.0
|
chirilo/remo
|
vendor-local/lib/python/unidecode/x09f.py
|
252
|
4509
|
data = (
'Cu ', # 0x00
'Qu ', # 0x01
'Chao ', # 0x02
'Wa ', # 0x03
'Zhu ', # 0x04
'Zhi ', # 0x05
'Mang ', # 0x06
'Ao ', # 0x07
'Bie ', # 0x08
'Tuo ', # 0x09
'Bi ', # 0x0a
'Yuan ', # 0x0b
'Chao ', # 0x0c
'Tuo ', # 0x0d
'Ding ', # 0x0e
'Mi ', # 0x0f
'Nai ', # 0x10
'Ding ', # 0x11
'Zi ', # 0x12
'Gu ', # 0x13
'Gu ', # 0x14
'Dong ', # 0x15
'Fen ', # 0x16
'Tao ', # 0x17
'Yuan ', # 0x18
'Pi ', # 0x19
'Chang ', # 0x1a
'Gao ', # 0x1b
'Qi ', # 0x1c
'Yuan ', # 0x1d
'Tang ', # 0x1e
'Teng ', # 0x1f
'Shu ', # 0x20
'Shu ', # 0x21
'Fen ', # 0x22
'Fei ', # 0x23
'Wen ', # 0x24
'Ba ', # 0x25
'Diao ', # 0x26
'Tuo ', # 0x27
'Tong ', # 0x28
'Qu ', # 0x29
'Sheng ', # 0x2a
'Shi ', # 0x2b
'You ', # 0x2c
'Shi ', # 0x2d
'Ting ', # 0x2e
'Wu ', # 0x2f
'Nian ', # 0x30
'Jing ', # 0x31
'Hun ', # 0x32
'Ju ', # 0x33
'Yan ', # 0x34
'Tu ', # 0x35
'Ti ', # 0x36
'Xi ', # 0x37
'Xian ', # 0x38
'Yan ', # 0x39
'Lei ', # 0x3a
'Bi ', # 0x3b
'Yao ', # 0x3c
'Qiu ', # 0x3d
'Han ', # 0x3e
'Wu ', # 0x3f
'Wu ', # 0x40
'Hou ', # 0x41
'Xi ', # 0x42
'Ge ', # 0x43
'Zha ', # 0x44
'Xiu ', # 0x45
'Weng ', # 0x46
'Zha ', # 0x47
'Nong ', # 0x48
'Nang ', # 0x49
'Qi ', # 0x4a
'Zhai ', # 0x4b
'Ji ', # 0x4c
'Zi ', # 0x4d
'Ji ', # 0x4e
'Ji ', # 0x4f
'Qi ', # 0x50
'Ji ', # 0x51
'Chi ', # 0x52
'Chen ', # 0x53
'Chen ', # 0x54
'He ', # 0x55
'Ya ', # 0x56
'Ken ', # 0x57
'Xie ', # 0x58
'Pao ', # 0x59
'Cuo ', # 0x5a
'Shi ', # 0x5b
'Zi ', # 0x5c
'Chi ', # 0x5d
'Nian ', # 0x5e
'Ju ', # 0x5f
'Tiao ', # 0x60
'Ling ', # 0x61
'Ling ', # 0x62
'Chu ', # 0x63
'Quan ', # 0x64
'Xie ', # 0x65
'Ken ', # 0x66
'Nie ', # 0x67
'Jiu ', # 0x68
'Yao ', # 0x69
'Chuo ', # 0x6a
'Kun ', # 0x6b
'Yu ', # 0x6c
'Chu ', # 0x6d
'Yi ', # 0x6e
'Ni ', # 0x6f
'Cuo ', # 0x70
'Zou ', # 0x71
'Qu ', # 0x72
'Nen ', # 0x73
'Xian ', # 0x74
'Ou ', # 0x75
'E ', # 0x76
'Wo ', # 0x77
'Yi ', # 0x78
'Chuo ', # 0x79
'Zou ', # 0x7a
'Dian ', # 0x7b
'Chu ', # 0x7c
'Jin ', # 0x7d
'Ya ', # 0x7e
'Chi ', # 0x7f
'Chen ', # 0x80
'He ', # 0x81
'Ken ', # 0x82
'Ju ', # 0x83
'Ling ', # 0x84
'Pao ', # 0x85
'Tiao ', # 0x86
'Zi ', # 0x87
'Ken ', # 0x88
'Yu ', # 0x89
'Chuo ', # 0x8a
'Qu ', # 0x8b
'Wo ', # 0x8c
'Long ', # 0x8d
'Pang ', # 0x8e
'Gong ', # 0x8f
'Pang ', # 0x90
'Yan ', # 0x91
'Long ', # 0x92
'Long ', # 0x93
'Gong ', # 0x94
'Kan ', # 0x95
'Ta ', # 0x96
'Ling ', # 0x97
'Ta ', # 0x98
'Long ', # 0x99
'Gong ', # 0x9a
'Kan ', # 0x9b
'Gui ', # 0x9c
'Qiu ', # 0x9d
'Bie ', # 0x9e
'Gui ', # 0x9f
'Yue ', # 0xa0
'Chui ', # 0xa1
'He ', # 0xa2
'Jue ', # 0xa3
'Xie ', # 0xa4
'Yu ', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
bsd-3-clause
|
mahak/ansible
|
lib/ansible/cli/galaxy.py
|
14
|
80269
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os.path
import re
import shutil
import sys
import textwrap
import time
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import (
build_collection,
download_collections,
find_existing_collections,
install_collections,
publish_collection,
validate_collection_name,
validate_collection_path,
verify_collections
)
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.dependency_resolution.dataclasses import Requirement
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
This decorator constructs a ConcreteArtifactsManager and maintains
the related temporary directory auto-cleanup around the target
method invocation.
"""
def method_wrapper(*args, **kwargs):
if 'artifacts_manager' in kwargs:
return wrapped_method(*args, **kwargs)
with ConcreteArtifactsManager.under_tmpdir(
C.DEFAULT_LOCAL_TMP,
validate_certs=not context.CLIARGS['ignore_certs'],
) as concrete_artifact_cm:
kwargs['artifacts_manager'] = concrete_artifact_cm
return wrapped_method(*args, **kwargs)
return method_wrapper
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
h1,
h2,
'-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
'-' * max([len(h2), w2]),
cwidth=w1,
vwidth=w2,
))
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
fqcn=to_text(collection.fqcn),
version=collection.ver,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
if not is_iterable(collections):
collections = (collections, )
fqcn_set = {to_text(c.fqcn) for c in collections}
version_set = {to_text(c.ver) for c in collections}
fqcn_length = len(max(fqcn_set, key=len))
version_length = len(max(version_set, key=len))
return fqcn_length, version_length
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
self._raw_args = args
self._implicit_role = False
if len(args) > 1:
# Inject role into sys.argv[1] as a backwards compatibility step
if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
# Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
idx = 2 if args[1].startswith('-v') else 1
args.insert(idx, 'role')
self._implicit_role = True
# since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
if args[1:3] == ['role', 'login']:
display.error(
"The login command was removed in late 2020. An API key is now required to publish roles or collections "
"to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
"ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
"command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
sys.exit(1)
self.api_servers = []
self.galaxy = None
self._api = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.argparse.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
default=AnsibleCollectionConfig.collection_paths,
action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
cache_options = opt_help.argparse.ArgumentParser(add_help=False)
cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
default=False, help='Clear the existing server response cache.')
cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
help='Do not use the server response cache.')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common, cache_options])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force, cache_options])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
if galaxy_type == 'collection':
list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
help="Format to display the list of collections in.")
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The collection(s) name or '
'path/url to a tar.gz collection artifact. This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Validate collection integrity locally without contacting server for '
'canonical manifest hash.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=self._get_default_collection_path(),
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
else:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of roles to be installed.')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be published to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required):
return {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
}
server_def = [('url', True), ('username', False), ('password', False), ('token', False),
('auth_url', False), ('v3', False)]
validate_certs = not context.CLIARGS['ignore_certs']
galaxy_options = {'validate_certs': validate_certs}
for optional_key in ['clear_response_cache', 'no_cache']:
if optional_key in context.CLIARGS:
galaxy_options[optional_key] = context.CLIARGS[optional_key]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_priority, server_key in enumerate(server_list, start=1):
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def)
defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi
auth_url = server_options.pop('auth_url', None)
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
available_api_versions = None
v3 = server_options.pop('v3', None)
if v3:
# This allows a user to explicitly indicate the server uses the /v3 API
# This was added for testing against pulp_ansible and I'm not sure it has
# a practical purpose outside of this use case. As such, this option is not
# documented as of now
server_options['available_api_versions'] = {'v3': '/v3'}
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username,
server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=validate_certs)
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
server_options.update(galaxy_options)
config_servers.append(GalaxyAPI(
self.galaxy, server_key,
priority=server_priority,
**server_options
))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
priority=len(config_servers) + 1,
**galaxy_options
))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
priority=0,
**galaxy_options
))
return context.CLIARGS['func']()
@property
def api(self):
if self._api:
return self._api
for server in self.api_servers:
try:
if u'v1' in server.available_api_versions:
self._api = server
break
except Exception:
continue
if not self._api:
self._api = self.api_servers[0]
return self._api
def _get_default_collection_path(self):
return C.COLLECTIONS_PATHS[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
type: git|file|url|galaxy
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:param artifacts_manager: Artifacts manager.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles') or []:
requirements['roles'] += parse_role_req(role_req)
requirements['collections'] = [
Requirement.from_requirement_dict(
self._init_coll_req_dict(collection_req),
artifacts_manager,
)
for collection_req in file_requirements.get('collections') or []
]
return requirements
def _init_coll_req_dict(self, coll_req):
if not isinstance(coll_req, dict):
# Assume it's a string:
return {'name': coll_req}
if (
'name' not in coll_req or
not coll_req.get('source') or
coll_req.get('type', 'galaxy') != 'galaxy'
):
return coll_req
# Try and match up the requirement source with our list of Galaxy API
# servers defined in the config, otherwise create a server with that
# URL without any auth.
coll_req['source'] = next(
iter(
srvr for srvr in self.api_servers
if coll_req['source'] in {srvr.name, srvr.api_server}
),
GalaxyAPI(
self.galaxy,
'explicit_requirement_{name!s}'.format(
name=coll_req['name'],
),
coll_req['source'],
validate_certs=not context.CLIARGS['ignore_certs'],
),
)
return coll_req
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
# Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
galaxy_info = role_info.get('galaxy_info', {})
description = role_info.get('description', galaxy_info.get('description', ''))
text.append(u"\tdescription: %s" % description)
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
# make sure we have a trailing newline returned
text.append(u"")
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
return meta_value
def _require_one_of_collections_requirements(
self, collections, requirements_file,
artifacts_manager=None,
):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(
requirements_file,
allow_old_format=False,
artifacts_manager=artifacts_manager,
)
else:
requirements = {
'collections': [
Requirement.from_string(coll_input, artifacts_manager)
for coll_input in collections
],
'roles': [],
}
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(
to_text(collection_path, errors='surrogate_or_strict'),
to_text(output_path, errors='surrogate_or_strict'),
force,
)
@with_collection_artifacts_manager
def execute_download(self, artifacts_manager=None):
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(
requirements, download_path, self.api_servers, no_deps,
context.CLIARGS['allow_pre_release'],
artifacts_manager=artifacts_manager,
)
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
dependencies=[],
))
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
loader = DataLoader()
templar = Templar(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
# Filter out ignored directory names
# Use [:] to mutate the list os.walk uses
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
if not context.CLIARGS['offline']:
remote_data = None
try:
remote_data = self.api.lookup_role_by_name(role, False)
except AnsibleError as e:
if e.http_code == 400 and 'Bad Request' in e.message:
# Role does not exist in Ansible Galaxy
data = u"- the role %s was not found" % role
break
raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
if remote_data:
role_info.update(remote_data)
elif context.CLIARGS['offline'] and not gr._exists:
data = u"- the role %s was not found" % role
break
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
self.pager(data)
@with_collection_artifacts_manager
def execute_verify(self, artifacts_manager=None):
collections = context.CLIARGS['args']
search_paths = context.CLIARGS['collections_path']
ignore_errors = context.CLIARGS['ignore_errors']
local_verify_only = context.CLIARGS['offline']
requirements_file = context.CLIARGS['requirements']
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
results = verify_collections(
requirements, resolved_paths,
self.api_servers, ignore_errors,
local_verify_only=local_verify_only,
artifacts_manager=artifacts_manager,
)
if any(result for result in results if not result.success):
return 1
return 0
@with_collection_artifacts_manager
def execute_install(self, artifacts_manager=None):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
:param artifacts_manager: Artifacts manager.
"""
install_items = context.CLIARGS['args']
requirements_file = context.CLIARGS['requirements']
collection_path = None
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
"run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
"'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
# TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
collection_requirements = []
role_requirements = []
if context.CLIARGS['type'] == 'collection':
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
requirements = self._require_one_of_collections_requirements(
install_items, requirements_file,
artifacts_manager=artifacts_manager,
)
collection_requirements = requirements['collections']
if requirements['roles']:
display.vvv(two_type_warning.format('role'))
else:
if not install_items and requirements_file is None:
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
if requirements_file:
if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
requirements = self._parse_requirements_file(
requirements_file,
artifacts_manager=artifacts_manager,
)
role_requirements = requirements['roles']
# We can only install collections and roles at the same time if the type wasn't specified and the -p
# argument was not used. If collections are present in the requirements then at least display a msg.
galaxy_args = self._raw_args
if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
'--roles-path' in galaxy_args):
# We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
# was explicit about the type and shouldn't care that collections were skipped.
display_func = display.warning if self._implicit_role else display.vvv
display_func(two_type_warning.format('collection'))
else:
collection_path = self._get_default_collection_path()
collection_requirements = requirements['collections']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
role_requirements.append(GalaxyRole(self.galaxy, self.api, **role))
if not role_requirements and not collection_requirements:
display.display("Skipping install, no requirements found")
return
if role_requirements:
display.display("Starting galaxy role install process")
self._execute_install_role(role_requirements)
if collection_requirements:
display.display("Starting galaxy collection install process")
# Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
# the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
self._execute_install_collection(
collection_requirements, collection_path,
artifacts_manager=artifacts_manager,
)
def _execute_install_collection(
self, requirements, path, artifacts_manager,
):
force = context.CLIARGS['force']
ignore_errors = context.CLIARGS['ignore_errors']
no_deps = context.CLIARGS['no_deps']
force_with_deps = context.CLIARGS['force_with_deps']
# If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
upgrade = context.CLIARGS.get('upgrade', False)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(path), to_text(":".join(collections_path))))
output_path = validate_collection_path(path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(
requirements, output_path, self.api_servers, ignore_errors,
no_deps, force, force_with_deps, upgrade,
allow_pre_release=allow_pre_release,
artifacts_manager=artifacts_manager,
)
return 0
def _execute_install_role(self, requirements):
role_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
for role in requirements:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = (role.metadata.get('dependencies') or []) + role.requirements
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in requirements:
display.display('- adding dependency: %s' % to_text(dep_role))
requirements.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependent role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
requirements.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
requirements.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
@with_collection_artifacts_manager
def execute_list_collection(self, artifacts_manager=None):
"""
List all collections installed on the local system
:param artifacts_manager: Artifacts manager.
"""
output_format = context.CLIARGS['output_format']
collections_search_paths = set(context.CLIARGS['collections_path'])
collection_name = context.CLIARGS['collection']
default_collections_path = AnsibleCollectionConfig.collection_paths
collections_in_paths = {}
warnings = []
path_found = False
collection_found = False
for path in collections_search_paths:
collection_path = GalaxyCLI._resolve_path(path)
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(collection_path))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
path_found = True
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace, collection = collection_name.split('.')
collection_path = validate_collection_path(collection_path)
b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
warnings.append("- unable to find {0} in collection paths".format(collection_name))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
collection_found = True
try:
collection = Requirement.from_dir_path_as_unknown(
b_collection_path,
artifacts_manager,
)
except ValueError as val_err:
six.raise_from(AnsibleError(val_err), val_err)
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver}
}
continue
fqcn_width, version_width = _get_collection_widths([collection])
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
_display_collection(collection, fqcn_width, version_width)
else:
# list all collections
collection_path = validate_collection_path(path)
if os.path.isdir(collection_path):
display.vvv("Searching {0} for collections".format(collection_path))
collections = list(find_existing_collections(
collection_path, artifacts_manager,
))
else:
# There was no 'ansible_collections/' directory in the path, so there
# or no collections here.
display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
continue
if not collections:
display.vvv("No collections found at {0}".format(collection_path))
continue
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver} for collection in collections
}
continue
# Display header
fqcn_width, version_width = _get_collection_widths(collections)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
# Sort collections by the namespace and name
for collection in sorted(collections, key=to_text):
_display_collection(collection, fqcn_width, version_width)
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
if output_format == 'json':
display.display(json.dumps(collections_in_paths))
elif output_format == 'yaml':
display.display(yaml_dump(collections_in_paths))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
gpl-3.0
|
Godiyos/python-for-android
|
python-modules/twisted/twisted/protocols/finger.py
|
81
|
1246
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""The Finger User Information Protocol (RFC 1288)"""
from twisted.protocols import basic
import string
class Finger(basic.LineReceiver):
def lineReceived(self, line):
parts = string.split(line)
if not parts:
parts = ['']
if len(parts) == 1:
slash_w = 0
else:
slash_w = 1
user = parts[-1]
if '@' in user:
host_place = string.rfind(user, '@')
user = user[:host_place]
host = user[host_place+1:]
return self.forwardQuery(slash_w, user, host)
if user:
return self.getUser(slash_w, user)
else:
return self.getDomain(slash_w)
def _refuseMessage(self, message):
self.transport.write(message+"\n")
self.transport.loseConnection()
def forwardQuery(self, slash_w, user, host):
self._refuseMessage('Finger forwarding service denied')
def getDomain(self, slash_w):
self._refuseMessage('Finger online list denied')
def getUser(self, slash_w, user):
self.transport.write('Login: '+user+'\n')
self._refuseMessage('No such user')
|
apache-2.0
|
jackwluo/py-quantmod
|
quantmod/datetools.py
|
1
|
1489
|
"""Date and time functions
Refactored from Cufflinks' 'date_tools.py' module.
Credits to @jorgesantos.
"""
import datetime as dt
def get_date_from_today(delta, strfmt='%Y%m%d'):
""" Returns a string that represents a date n numbers of days from today.
Parameters
----------
delta : int
number of days
strfmt : string
format in which the date will be represented
"""
return (dt.date.today() + dt.timedelta(delta)).strftime(strfmt)
def string_to_date(string_date, strfmt='%Y%m%d'):
""" Converts a string format date into datetime.
Parameters
----------
string_date : string
date in string format
strfmt : string
format in which the input date is represented
"""
return dt.datetime.strptime(string_date, strfmt).date()
def int_to_date(int_date):
""" Converts an int format date into datetime.
Parameters
----------
int_date : int
date in int format
Example
-------
int_date(20151023)
"""
return string_to_date(str(int_date))
def date_to_int(date, strfmt='%Y%m%d'):
""" Converts a datetime date into int.
Parameters
----------
date : datetime
date in datetime format
strfmt : string
format in which the int date will be generated
Example
-------
date_to_int(dt.date(2015,10,23),'%Y')
"""
return int(date.strftime(strfmt))
|
mit
|
nvmd/bitcoin
|
qa/rpc-tests/getchaintips.py
|
140
|
2130
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10);
self.nodes[2].generate(20);
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
|
mit
|
theflofly/tensorflow
|
tensorflow/contrib/gan/python/losses/__init__.py
|
56
|
1385
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFGAN losses and penalties.
Losses can be used with individual arguments or with GANModel tuples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Collapse losses into a single namespace.
from tensorflow.contrib.gan.python.losses.python import losses_wargs as wargs
from tensorflow.contrib.gan.python.losses.python import tuple_losses
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.losses.python.tuple_losses import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['wargs'] + tuple_losses.__all__
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
jsteemann/arangodb
|
3rdParty/V8-4.3.61/build/gyp/test/sibling/gyptest-relocate.py
|
137
|
1170
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('build/all.gyp', test.ALL, chdir='relocate/src')
chdir = 'relocate/src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format in ('make', 'ninja', 'cmake'):
chdir = 'relocate/src'
if test.format == 'xcode':
chdir = 'relocate/src/prog1'
test.run_built_executable('program1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'relocate/src/prog2'
test.run_built_executable('program2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
|
apache-2.0
|
justinpotts/mozillians
|
vendor-local/lib/python/tablib/formats/_yaml.py
|
6
|
1252
|
# -*- coding: utf-8 -*-
""" Tablib - YAML Support.
"""
import sys
try:
import yaml
except ImportError:
if sys.version_info[0] > 2:
import tablib.packages.yaml3 as yaml
else:
import tablib.packages.yaml as yaml
import tablib
title = 'yaml'
extentions = ('yaml', 'yml')
def export_set(dataset):
"""Returns YAML representation of Dataset."""
return yaml.dump(dataset._package(ordered=False))
def export_book(databook):
"""Returns YAML representation of Databook."""
return yaml.dump(databook._package())
def import_set(dset, in_stream):
"""Returns dataset from YAML stream."""
dset.wipe()
dset.dict = yaml.load(in_stream)
def import_book(dbook, in_stream):
"""Returns databook from YAML stream."""
dbook.wipe()
for sheet in yaml.load(in_stream):
data = tablib.Dataset()
data.title = sheet['title']
data.dict = sheet['data']
dbook.add_sheet(data)
def detect(stream):
"""Returns True if given stream is valid YAML."""
try:
_yaml = yaml.load(stream)
if isinstance(_yaml, (list, tuple, dict)):
return True
else:
return False
except yaml.parser.ParserError:
return False
|
bsd-3-clause
|
fitoria/askbot-devel
|
askbot/deps/django_authopenid/mimeparse.py
|
23
|
7745
|
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explaination.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into it's component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q' quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q') from a list of candidates.
"""
__version__ = "0.1.1"
__author__ = 'Joe Gregorio'
__email__ = "joe@bitworking.org"
__credits__ = ""
def parse_mime_type(mime_type):
"""Carves up a mime_type and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would
get parsed into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(";")
params = dict([tuple([s.strip() for s in param.split("=")])\
for param in parts[1:] ])
(type, subtype) = parts[0].split("/")
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Carves up a media range and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/*;q=0.5' would
get parsed into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime_type against
a list of media_ranges that have already been
parsed by parse_media_range(). Returns the
'q' quality parameter of the best match, 0 if no
match was found. This function bahaves the same as quality()
except that 'parsed_ranges' must be a list of
parsed media ranges. """
best_fitness = -1
best_match = ""
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
param_matches = reduce(lambda x, y: x+y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
if (type == target_type or type == '*' or target_type == '*') and \
(subtype == target_subtype or subtype == '*' or target_subtype == '*'):
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return float(best_fit_q)
def quality(mime_type, ranges):
"""Returns the quality 'q' of a mime_type when compared
against the media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(",")]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Takes a list of supported mime-types and finds the best
match for all the media-ranges listed in header. The value of
header must be a string that conforms to the format of the
HTTP Accept: header. The value of 'supported' is a list of
mime-types.
>>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
parsed_header = [parse_media_range(r) for r in header.split(",")]
weighted_matches = [(quality_parsed(mime_type, parsed_header), mime_type)\
for mime_type in supported]
weighted_matches.sort()
return weighted_matches[-1][0] and weighted_matches[-1][1] or ''
if __name__ == "__main__":
import unittest
class TestMimeParsing(unittest.TestCase):
def test_parse_media_range(self):
self.assert_(('application', 'xml', {'q': '1'}) == parse_media_range('application/xml;q=1'))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml'))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml;q='))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml ; q='))
self.assertEqual(('application', 'xml', {'q': '1', 'b': 'other'}), parse_media_range('application/xml ; q=1;b=other'))
self.assertEqual(('application', 'xml', {'q': '1', 'b': 'other'}), parse_media_range('application/xml ; q=2;b=other'))
def test_rfc_2616_example(self):
accept = "text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5"
self.assertEqual(1, quality("text/html;level=1", accept))
self.assertEqual(0.7, quality("text/html", accept))
self.assertEqual(0.3, quality("text/plain", accept))
self.assertEqual(0.5, quality("image/jpeg", accept))
self.assertEqual(0.4, quality("text/html;level=2", accept))
self.assertEqual(0.7, quality("text/html;level=3", accept))
def test_best_match(self):
mime_types_supported = ['application/xbel+xml', 'application/xml']
# direct match
self.assertEqual(best_match(mime_types_supported, 'application/xbel+xml'), 'application/xbel+xml')
# direct match with a q parameter
self.assertEqual(best_match(mime_types_supported, 'application/xbel+xml; q=1'), 'application/xbel+xml')
# direct match of our second choice with a q parameter
self.assertEqual(best_match(mime_types_supported, 'application/xml; q=1'), 'application/xml')
# match using a subtype wildcard
self.assertEqual(best_match(mime_types_supported, 'application/*; q=1'), 'application/xml')
# match using a type wildcard
self.assertEqual(best_match(mime_types_supported, '*/*'), 'application/xml')
mime_types_supported = ['application/xbel+xml', 'text/xml']
# match using a type versus a lower weighted subtype
self.assertEqual(best_match(mime_types_supported, 'text/*;q=0.5,*/*; q=0.1'), 'text/xml')
# fail to match anything
self.assertEqual(best_match(mime_types_supported, 'text/html,application/atom+xml; q=0.9'), '')
def test_support_wildcards(self):
mime_types_supported = ['image/*', 'application/xml']
# match using a type wildcard
self.assertEqual(best_match(mime_types_supported, 'image/png'), 'image/*')
# match using a wildcard for both requested and supported
self.assertEqual(best_match(mime_types_supported, 'image/*'), 'image/*')
unittest.main()
|
gpl-3.0
|
sajadkk/odoo-addons
|
portal_procurement/portal_procurement.py
|
1
|
3701
|
# -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class ProcurementOrder(osv.Model):
_inherit = "procurement.order"
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context.get('portal'):
vals['state'] = 'draft'
return super(ProcurementOrder, self).create(cr, user, vals, context)
def unlink(self, cr, uid, ids, context=None):
procurements = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in procurements:
if s['state'] in ['cancel', 'draft']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'),
_('Cannot delete Procurement Order(s) which are in %s state.') % s['state'])
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def _get_default_partner_id(self, cr, uid, context=None):
""" Gives default partner_id """
if context is None:
context = {}
if context.get('portal'):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
# Special case for portal users, as they are not allowed to call name_get on res.partner
# We save this call for the web client by returning it in default get
return self.pool['res.partner'].name_get(cr, SUPERUSER_ID, [user.partner_id.id], context=context)[0]
return False
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', False)]}, readonly=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', False)]}, readonly=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', False)]}, readonly=True),
'product_uos_qty': fields.float('UoS Quantity', states={'draft': [('readonly', False)], 'confirmed': [('readonly', False)]}, readonly=True),
'product_uos': fields.many2one('product.uom', 'Product UoS', states={'draft': [('readonly', False)], 'confirmed': [('readonly', False)]}, readonly=True),
'partner_id': fields.many2one('res.partner'),
'state': fields.selection([
('cancel', 'Cancelled'),
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
('running', 'Running'),
('done', 'Done')
], 'Status', required=True, track_visibility='onchange', copy=False),
}
_defaults = {
'partner_id': lambda self, cr, uid, context: self._get_default_partner_id(cr, uid, context),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(ProcurementOrder, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type,
context=context, toolbar=toolbar, submenu=submenu)
if toolbar and context.get('portal'):
print res['toolbar']['action']
res['toolbar']['action'] = []
return res
def button_confirm_request(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
|
agpl-3.0
|
Ghostkeeper/Luna
|
luna/stream.py
|
1
|
2727
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
This module presents some classes that help with streaming data for
inter-component communication.
"""
import io #To use the standard I/O streams as helper.
class BytesStreamReader:
"""
A stream that wraps around a ``BufferedReader`` instance and allows
iterating byte-by-byte.
For the rest, the reader behaves exactly the same as ``BufferedReader``.
Only iterating over it yields separate bytes.
The built-in ``BufferedReader`` can read binary streams, but iterating over
them still yields the data line-by-line. This is undesirable since lines
typically have no meaning for binary files.
"""
def __init__(self, wrapped):
"""
Creates the ``BytesStreamReader``, wrapping it around the original
stream.
:param wrapped: The ``BufferedReader`` stream to wrap around.
"""
if type(wrapped) == bytes:
wrapped = io.BytesIO(wrapped)
self._wrapped = wrapped
def __enter__(self):
"""
Starts reading from the stream.
This command is simply passed on to the wrapped stream.
:return: This BytesStreamReader instance.
"""
self._wrapped.__enter__()
return self #Need to override this because we want to return ourselves, not the BufferedReader instance.
def __exit__(self, exception_type, exception_value, traceback):
"""
Stops reading from the stream.
:param exception_type: The type of any exception thrown during the
``with`` block, or ``None`` if no exception was thrown.
:param exception_value: An instance of the exception that was thrown
during the ``with`` block, or ``None`` if no exception was thrown.
:param traceback: The traceback of any exception that was thrown during
the ``with`` block, or ``None`` if no exception was thrown.
"""
self._wrapped.__exit__()
def __getattr__(self, item):
"""
Passes ordinary calls to the stream on to the wrapped
``BufferedReader``.
Only attributes that are defined in this class are not passed on.
:param item: The attribute to get from ``BufferedReader``.
:return:
"""
return getattr(self._wrapped, item)
def __iter__(self):
"""
Creates an iterator that iterates over the bytes in this stream.
This turns the ``BytesStreamReader`` into a ``bytes``-like class.
:return: A sequence of bytes in the stream.
"""
for line in self._wrapped:
yield from line
|
cc0-1.0
|
wangyum/tensorflow
|
tensorflow/contrib/session_bundle/gc.py
|
47
|
6397
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print every_fifth(path_list) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print largest_three(all_paths) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print both(all_paths) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Path = collections.namedtuple('Path', 'path export_version')
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def one_of_every_n_export_versions(n):
r"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(base_dir, r), None))
if p:
paths.append(p)
return sorted(paths)
|
apache-2.0
|
cfossace/test
|
crits/samples/handlers.py
|
1
|
60996
|
import copy
import json
import logging
import os
import pprint
import subprocess
import tempfile, shutil
import time
from bson.objectid import ObjectId
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from hashlib import md5
from mongoengine.base import ValidationError
from crits.backdoors.backdoor import Backdoor
from crits.campaigns.forms import CampaignForm
from crits.config.config import CRITsConfig
from crits.core import form_consts
from crits.core.class_mapper import class_from_value, class_from_id
from crits.core.crits_mongoengine import EmbeddedSource, EmbeddedCampaign
from crits.core.crits_mongoengine import json_handler, create_embedded_source
from crits.core.data_tools import convert_string_to_bool, validate_md5_checksum
from crits.core.data_tools import validate_sha1_checksum, validate_sha256_checksum
from crits.core.exceptions import ZipFileError
from crits.core.forms import DownloadFileForm
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.handsontable_tools import convert_handsontable_to_rows, parse_bulk_upload
from crits.core.mongo_tools import get_file
from crits.core.source_access import SourceAccess
from crits.core.user_tools import is_admin, user_sources, get_user_organization
from crits.core.user_tools import is_user_subscribed, is_user_favorite
from crits.notifications.handlers import remove_user_from_notification
from crits.objects.handlers import object_array_to_dict
from crits.objects.handlers import validate_and_add_new_handler_object
from crits.samples.forms import XORSearchForm, UnrarSampleForm, UploadFileForm
from crits.samples.sample import Sample
from crits.samples.yarahit import YaraHit
from crits.services.analysis_result import AnalysisResult
from crits.services.handlers import run_triage, get_supported_services
from crits.stats.handlers import generate_yara_hits
from crits.vocabulary.relationships import RelationshipTypes
logger = logging.getLogger(__name__)
def generate_sample_csv(request):
"""
Generate a CSV file of the Sample information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Sample)
return response
def get_sample_details(sample_md5, analyst, format_=None):
"""
Generate the data to render the Sample details template.
:param sample_md5: The MD5 of the Sample to get details for.
:type sample_md5: str
:param analyst: The user requesting this information.
:type analyst: str
:param format_: The format of the details page.
:type format_: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(analyst)
sample = Sample.objects(md5=sample_md5,
source__name__in=sources).first()
if not sample:
return ('error.html', {'error': "File not yet available or you do not have access to view it."})
sample.sanitize_sources(username=analyst)
if format_:
exclude = [
"source",
"relationships",
"schema_version",
"campaign",
"analysis",
"bucket_list",
"ticket",
"releasability",
"unsupported_attrs",
"status",
"objects",
"modified",
"analyst",
"_id"
]
if format_ == "yaml":
data = sample.to_yaml(exclude)
return "yaml", data
if format_ == "json":
data = sample.to_json(exclude)
return "json", data
if not sample:
template = "error.html"
args = {'error': "No sample found"}
elif format_ == "text":
template = "samples_detail_text.html"
args = {'sample': sample}
else:
#create forms
xor_search_form = XORSearchForm()
campaign_form = CampaignForm()
unrar_sample_form = UnrarSampleForm()
download_form = DownloadFileForm(initial={"obj_type":'Sample',
"obj_id":sample.id,
"meta_format": "none"})
# do we have the binary?
if isinstance(sample.filedata.grid_id, ObjectId):
binary_exists = 1
else:
binary_exists = 0
sample.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, sample.id, 'Sample')
# subscription
subscription = {
'type': 'Sample',
'id': sample.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Sample',
sample.id),
}
#objects
objects = sample.sort_objects()
#relationships
relationships = sample.sort_relationships("%s" % analyst,
meta=True)
# relationship
relationship = {
'type': 'Sample',
'value': sample.id
}
#comments
comments = {'comments': sample.get_comments(),
'url_key': sample_md5}
#screenshots
screenshots = sample.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Sample', sample.id)
# services
service_list = get_supported_services('Sample')
# analysis results
service_results = sample.get_analysis_results()
args = {'objects': objects,
'relationships': relationships,
'comments': comments,
'relationship': relationship,
'subscription': subscription,
'sample': sample, 'sources': sources,
'campaign_form': campaign_form,
'download_form': download_form,
'xor_search_form': xor_search_form,
'unrar_sample_form': unrar_sample_form,
'binary_exists': binary_exists,
'favorite': favorite,
'screenshots': screenshots,
'service_list': service_list,
'service_results': service_results}
return template, args
def generate_sample_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Sample
type_ = "sample"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type, details_url, details_url_key,
request, includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtlist_by_org":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
get_values = request.GET.copy()
get_values['source'] = get_user_organization("%s" % request.user.username)
request.GET = get_values
fields = mapper['fields']
response = jtable_ajax_list(obj_type,details_url,details_url_key,
request, includes=fields)
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Samples",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Samples'",
'text': "'All'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Samples'",
'text': "'New'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Samples'",
'text': "'In Progress'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Samples'",
'text': "'Analyzed'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Samples'",
'text': "'Deprecated'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Sample'",
'text': "'Add Sample'",
'click': "function () {$('#new-sample').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def generate_yarahit_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
refresh = request.GET.get("refresh", "no")
if refresh == "yes":
generate_yara_hits()
obj_type = YaraHit
type_ = "yarahit"
if option == "jtlist":
# Sets display url
details_url = 'crits.samples.views.samples_listing'
details_url_key = "detectexact"
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Yara Hits",
'default_sort': "result ASC",
'listurl': reverse('crits.samples.views.%ss_listing' % (type_,),
args=('jtlist',)),
'deleteurl': "",
'searchurl': reverse('crits.samples.views.%ss_listing' % (type_,)),
'fields': ["result", "engine", "version", "sample_count","_id"],
'hidden_fields': ["_id"],
'linked_fields': []
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'Refresh Yara Hits'",
'text': "'Refresh Stats'",
'click': "function () {$.get('"+reverse('crits.samples.views.%ss_listing' % type_)+"', {'refresh': 'yes'}, function () { $('#yarahits_listing').jtable('reload');});}"
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%ss_listing' % type_,
'button' : '%ss_button' % type_},
RequestContext(request))
else:
return render_to_response("%ss_listing.html" % type_,
{'jtable': jtable,
'jtid': '%ss_listing' % type_},
RequestContext(request))
def get_filename(md5=None):
"""
Get the filename of a sample by MD5.
:param md5: The MD5 of the sample to get the filename of.
:type md5: str
:returns: None, str
"""
if not md5:
return None
sample = Sample.objects(md5=md5).first()
if not sample:
return None
return sample.filename
def get_md5_hash(oid=None):
"""
Get the MD5 of a sample by ObjectId.
:param oid: The ObjectId of the sample to get the MD5 of.
:type oid: str
:returns: None, str
"""
if oid is None:
return None
else:
sample = Sample.objects(id=oid).first()
if not sample:
return None
return sample.md5
def delete_sample(sample_md5, username=None):
"""
Delete a sample from CRITs.
:param sample_md5: The MD5 of the sample to delete.
:type sample_md5: str
:param username: The user deleting this sample.
:type username: str
:returns: bool
"""
if is_admin(username):
sample = Sample.objects(md5=sample_md5).first()
if sample:
sample.delete(username=username)
return True
else:
return False
else:
return False
def mail_sample(sample_md5, recips=None):
"""
Mail a sample to a list of recipients.
:param sample_md5: The MD5 of the sample to send.
:type sample_md5: str
:param recips: List of recipients.
:type recips: list
:returns: None, str
"""
crits_config = CRITsConfig.objects().first()
if recips is not None:
sample = Sample.objects(md5=sample_md5).first()
if not sample:
return None
try:
send_mail('Details for %s' % sample_md5,
'%s' % pprint.pformat(sample.to_json()),
crits_config.crits_email,
recips,
fail_silently=False)
except Exception as e:
logger.error(e)
return str(e.args)
return None
def get_source_counts(analyst):
"""
Get the sources for a user.
:param analyst: The user to get sources for.
:type analyst: str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
allowed = user_sources(analyst)
sources = SourceAccess.objects(name__in=allowed)
return sources
def get_yara_hits(version=None):
"""
Get the yara hits in the database.
:param version: The yara hit version to search for.
:type version: str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
if version:
hits = YaraHit.objects(version=version).order_by('+result')
else:
hits = YaraHit.objects().order_by('+result')
return hits
def handle_unrar_sample(md5, user=None, password=None):
"""
Unrar a sample.
:param md5: The MD5 of the sample to unrar.
:type md5: str
:param user: The user unraring this sample.
:type user: str
:param password: Password to use to unrar the sample.
:type password: str
:returns: list
:raises: ZipFileError, Exception
"""
sample = class_from_value('Sample', md5)
if not sample:
return None
data = sample.filedata.read()
source = sample.source[0].name
campaign = sample.campaign
reference = ''
return unrar_file(md5, user, password, data, source, method="Unrar Existing Sample",
reference=reference, campaign=campaign, related_md5=md5)
def handle_unzip_file(md5, user=None, password=None):
"""
Unzip a sample.
:param md5: The MD5 of the sample to unzip.
:type md5: str
:param user: The user unzipping this sample.
:type user: str
:param password: Password to use to unzip the sample.
:type password: str
:returns: list
:raises: ZipFileError, Exception
"""
sample = class_from_value('Sample', md5)
if not sample:
return None
data = sample.filedata.read()
source = sample.source[0].name
campaign = sample.campaign
reference = ''
return unzip_file(md5, user, password, data, source, method="Unzip Existing Sample",
reference=reference, campaign=campaign, related_md5=md5, )
def unzip_file(filename, user=None, password=None, data=None, source=None,
method='Zip', reference='', campaign=None, confidence='low',
related_md5=None, related_id=None, related_type='Sample',
bucket_list=None, ticket=None, inherited_source=None,
is_return_only_md5=True, backdoor_name=None,
backdoor_version=None):
"""
Unzip a file.
:param filename: The name of the file to unzip.
:type filename: str
:param user: The user unzipping the file.
:type user: str
:param password: The password to use to unzip the file.
:type password: str
:param data: The filedata.
:type data: str
:param source: The name of the source that provided the data.
:type source: str
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param backdoor_name: Name of backdoor to relate this object to.
:type is_return_only_md5: str
:param is_return_only_md5: Only return the MD5s.
:type backdoor_name: str
:param backdoor_version: Version of backdoor to relate this object to.
:type backdoor_version: str
:returns: list
:raises: ZipFileError, Exception
"""
crits_config = CRITsConfig.objects().first()
temproot = crits_config.temp_dir
samples = []
zipdir = ""
extractdir = ""
try:
zip_md5 = md5(data).hexdigest()
# 7z doesn't decompress archives via stdin, therefore
# we need to write it out as a file first
zipdir = tempfile.mkdtemp(dir=temproot)
zipfile = open(zipdir + "/" + filename, "wb")
zipfile.write(data)
zipfile.close()
# Build argument string to popen()
args = [crits_config.zip7_path]
if not os.access(crits_config.zip7_path, os.X_OK):
errmsg = "7z is not executable at path specified in the config setting: %s\n" % crits_config.zip7_path
raise ZipFileError, errmsg
args.append("e")
extractdir = tempfile.mkdtemp(dir=temproot)
args.append("-o" + extractdir) # Set output directory
# Apparently 7z doesn't mind being handed a password to an
# archive that isn't encrypted - but blocks for the opposite
# case, so we'll always give it something for a password argument
if password is None:
args.append("-pNone")
else:
args.append("-p" + password)
args.append("-y") # 'Yes' on all queries - avoid blocking
args.append(zipdir + "/" + filename)
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Give the process 30 seconds to complete, otherwise kill it
waitSeconds = 30
while (proc.poll() is None and waitSeconds):
time.sleep(1)
waitSeconds -= 1
if proc.returncode: # 7z spit out an error
errmsg = "Error while extracting archive\n" + proc.stdout.read()
raise ZipFileError, errmsg
elif not waitSeconds: # Process timed out
proc.terminate()
raise ZipFileError, "Unzip process failed to terminate"
else:
if related_md5 and related_md5 == zip_md5:
relationship = RelationshipTypes.COMPRESSED_INTO
else:
relationship = RelationshipTypes.RELATED_TO
for root, dirs, files in os.walk(extractdir):
for filename in files:
filepath = extractdir + "/" + filename
filehandle = open(filepath, 'rb')
new_sample = handle_file(filename, filehandle.read(),
source, method, reference,
related_md5=related_md5,
related_id=related_id,
related_type=related_type, backdoor='',
user=user, campaign=campaign,
confidence=confidence,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
relationship=relationship,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
if new_sample:
samples.append(new_sample)
filehandle.close()
except ZipFileError: # Pass this error up the chain
raise
except Exception, ex:
errmsg = ''
for err in ex.args:
errmsg = errmsg + " " + str(err)
raise ZipFileError, errmsg
finally:
if os.path.isdir(zipdir):
shutil.rmtree(zipdir)
if os.path.isdir(extractdir):
shutil.rmtree(extractdir)
return samples
def unrar_file(filename, user=None, password=None, data=None, source=None,
method="Generic", reference='', campaign=None, confidence='low',
related_md5=None, related_id=None, related_type='Sample',
bucket_list=None, ticket=None, inherited_source=None,
is_return_only_md5=True,
backdoor_name=None, backdoor_version=None):
"""
Unrar a file.
:param filename: The name of the file to unrar.
:type filename: str
:param user: The user unraring the file.
:type user: str
:param password: The password to use to unrar the file.
:type password: str
:param data: The filedata.
:type data: str
:param source: The name of the source that provided the data.
:type source: str
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param is_return_only_md5: Only return the MD5s.
:type is_return_only_md5: bool
:returns: list
:raises: ZipFileError, Exception
"""
crits_config = CRITsConfig.objects().first()
samples = []
try:
rar_md5 = md5(data).hexdigest()
# write the data to a file so we can read from it as a rar file
temproot = crits_config.temp_dir
rardir = tempfile.mkdtemp(dir=temproot)
# append '.rar' to help ensure rarfile doesn't have same
# name as an extracted file.
rarname = os.path.join(rardir, filename)+'.rar'
if data is None: #unraring an existing file
data = get_file(filename)
with open(rarname, "wb") as f:
f.write(data)
# change to temp directory since unrar allows extraction
# only to the current directory first save current directory
old_dir = os.getcwd()
os.chdir(rardir)
cmd = [crits_config.rar_path,'e'] #,'-inul'
if password:
cmd.append('-p'+password)
else:
cmd.append('-p-')
cmd.append('-y') #assume yes to all prompts
cmd.append(rarname)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
waitSeconds = 30
while (proc.poll() is None and waitSeconds):
time.sleep(1)
waitSeconds -= 1
if proc.returncode:
errmsg = "Error while unraring archive\n" + proc.stdout.read()
raise ZipFileError, errmsg
elif not waitSeconds:
proc.terminate()
raise ZipFileError, "Unrar process failed to terminate"
else:
if related_md5 and related_md5 == rar_md5:
relationship = RelationshipTypes.COMPRESSED_INTO
else:
relationship = RelationshipTypes.RELATED_TO
for root, dirs, files in os.walk(rardir):
for filename in files:
filepath = os.path.join(rardir, filename)
if filepath != rarname:
with open(filepath, 'rb') as filehandle:
new_sample = handle_file(filename,
filehandle.read(),
source, method, reference,
related_md5=related_md5,
related_id=related_id,
related_type=related_type,
backdoor='', user=user,
campaign=campaign,
confidence=confidence,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
relationship=relationship,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
samples.append(new_sample)
except ZipFileError:
raise
except Exception:
raise
#raise ZipFileError, str(e)
finally:
#change back to original current directory
os.chdir(old_dir)
if os.path.isdir(rardir):
shutil.rmtree(rardir)
return samples
def handle_file(filename, data, source, method='Generic', reference='',
related_md5=None, related_id=None, related_type='Sample',
backdoor=None, user='', campaign=None, confidence='low',
md5_digest=None, sha1_digest=None, sha256_digest=None,
size=None, mimetype=None, bucket_list=None, ticket=None,
relationship=None, inherited_source=None, is_validate_only=False,
is_return_only_md5=True, cache={}, backdoor_name=None,
backdoor_version=None):
"""
Handle adding a file.
:param filename: The name of the file.
:type filename: str
:param data: The filedata.
:type data: str
:param source: The name of the source that provided the data.
:type source: list, str, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param backdoor: The backdoor to assign to this sample.
:type backdoor: str
:param user: The user uploading this sample.
:type user: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param md5_digest: The MD5 of this sample.
:type md5_digest: str
:param sha1_digest: The SHA1 of this sample.
:type sha1_digest: str
:param sha256_digest: The SHA256 of this sample.
:type sha256_digest: str
:param size: the Size of this sample.
:type size: str
:param mimetype: The Mimetype of this sample.
:type mimetype: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param relationship: The relationship between this sample and the parent.
:type relationship: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param is_return_only_md5: Only return the MD5s.
:type is_return_only_md5: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param backdoor_name: Name of the backdoor to relate the file to.
:type backdoor_name: str
:param backdoor_version: Version of the backdoor to relate the file to.
:type backdoor_version: str
:returns: str,
dict with keys:
"success" (boolean),
"message" (str),
"object" (the sample),
"""
retVal = {}
retVal['success'] = True
retVal['message'] = ""
is_sample_new = False
# get sample from database, or create it if one doesn't exist
if not data and not md5_digest:
retVal['success'] = False
retVal['message'] = "At least MD5 hash is required."
return retVal
if md5_digest:
# validate md5
md5_digest = md5_digest.lower().strip()
validate_md5_result = validate_md5_checksum(md5_digest)
retVal['message'] += validate_md5_result.get('message')
retVal['success'] = validate_md5_result.get('success')
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
if sha1_digest != None and sha1_digest != "":
sha1_digest = sha1_digest.lower().strip()
validate_sha1_result = validate_sha1_checksum(sha1_digest)
retVal['message'] += validate_sha1_result.get('message')
retVal['success'] = validate_sha1_result.get('success')
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
if sha256_digest != None and sha256_digest != "":
sha256_digest = sha256_digest.lower().strip()
validate_sha256_result = validate_sha256_checksum(sha256_digest)
retVal['message'] += validate_sha256_result.get('message')
retVal['success'] = validate_sha256_result.get('success')
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
if data:
md5_digest = md5(data).hexdigest()
validate_md5_result = validate_md5_checksum(md5_digest)
retVal['message'] += validate_md5_result.get('message')
retVal['success'] = validate_md5_result.get('success')
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
if related_id or related_md5:
if related_id:
related_obj = class_from_id(related_type, related_id)
else:
related_obj = class_from_value(related_type, related_md5)
if not related_obj:
retVal['message'] += (' Related %s not found. Sample not uploaded.'
% (related_type))
retVal['success'] = False
else:
related_obj = None
cached_results = cache.get(form_consts.Sample.CACHED_RESULTS)
if cached_results != None:
sample = cached_results.get(md5_digest)
else:
sample = Sample.objects(md5=md5_digest).first()
if not sample:
is_sample_new = True
sample = Sample()
sample.filename = filename or md5_digest
sample.md5 = md5_digest
sample.sha1 = sha1_digest
sample.sha256 = sha256_digest
sample.size = size
sample.mimetype = mimetype
else:
if filename not in sample.filenames and filename != sample.filename:
sample.filenames.append(filename)
if cached_results != None:
cached_results[md5_digest] = sample
# attempt to discover binary in GridFS before assuming we don't
# have it
sample.discover_binary()
if data:
# we already have this binary so generate metadata from it
if sample.filedata.grid_id:
sample._generate_file_metadata(data)
# add the binary to gridfs and generate metadata
else:
sample.add_file_data(data)
# if we didn't get data:
else:
if sample.filedata:
# get data from db and add metadata in case it doesn't exist
data = sample.filedata.read()
sample._generate_file_metadata(data)
else:
if md5_digest:
# no data and no binary, add limited metadata
sample.md5 = md5_digest
else:
retVal['message'] += ("The MD5 digest and data, or the file "
"data itself, need to be supplied.")
retVal['success'] = False
if sha1_digest:
sample.sha1 = sha1_digest
if sha256_digest:
sample.sha256 = sha256_digest
#add copy of inherited source(s) to Sample
if isinstance(inherited_source, EmbeddedSource):
sample.add_source(copy.copy(inherited_source))
elif isinstance(inherited_source, list) and len(inherited_source) > 0:
for s in inherited_source:
if isinstance(s, EmbeddedSource):
sample.add_source(copy.copy(s))
# generate new source information and add to sample
if isinstance(source, basestring) and len(source) > 0:
s = create_embedded_source(source,
method=method,
reference=reference,
analyst=user)
# this will handle adding a new source, or an instance automatically
sample.add_source(s)
elif isinstance(source, EmbeddedSource):
sample.add_source(source, method=method, reference=reference)
elif isinstance(source, list) and len(source) > 0:
for s in source:
if isinstance(s, EmbeddedSource):
sample.add_source(s, method=method, reference=reference)
if bucket_list:
sample.add_bucket_list(bucket_list, user)
if ticket:
sample.add_ticket(ticket, user)
# if no proper source has been provided, don't add the sample
if len(sample.source) == 0:
retVal['message'] += "The sample does not have a source."
retVal['success'] = False
elif is_validate_only == False:
# assume it's a list of EmbeddedCampaign, but check if it's a string
# if it is a string then create a new EmbeddedCampaign
if campaign != None:
campaign_array = campaign
if isinstance(campaign, basestring):
campaign_array = [EmbeddedCampaign(name=campaign, confidence=confidence, analyst=user)]
for campaign_item in campaign_array:
sample.add_campaign(campaign_item)
# save sample to get an id since the rest of the processing needs it
sample.save(username=user)
sources = user_sources(user)
if backdoor_name:
# Relate this to the backdoor family if there is one.
backdoor = Backdoor.objects(name=backdoor_name,
source__name__in=sources).first()
if backdoor:
backdoor.add_relationship(sample,
RelationshipTypes.RELATED_TO,
analyst=user)
backdoor.save()
# Also relate to the specific instance backdoor.
if backdoor_version:
backdoor = Backdoor.objects(name=backdoor_name,
version=backdoor_version,
source__name__in=sources).first()
if backdoor:
backdoor.add_relationship(sample,
RelationshipTypes.RELATED_TO,
analyst=user)
backdoor.save()
# reloading clears the _changed_fields of the sample object. this prevents
# situations where we save again below and the shard key (md5) is
# still marked as changed.
sample.reload()
# run sample triage:
if len(AnalysisResult.objects(object_id=str(sample.id))) < 1 and data:
run_triage(sample, user)
# update relationship if a related top-level object is supplied
if related_obj and sample:
if related_obj.id != sample.id: #don't form relationship to itself
if not relationship:
if related_obj._meta['crits_type'] == 'Email':
relationship = RelationshipTypes.CONTAINED_WITHIN
else:
relationship = RelationshipTypes.RELATED_TO
sample.add_relationship(related_obj,
relationship,
analyst=user,
get_rels=False)
sample.save(username=user)
if is_sample_new == True:
# New sample, and successfully uploaded
if is_validate_only == False:
retVal['message'] += ('Success: Added new sample <a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
# Update Cache
if cached_results != None:
cached_results[sample.md5] = sample
else:
# Duplicate sample, but uploaded anyways
if is_validate_only == False:
message = ('Success: Updated sample <a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
retVal['message'] += message
retVal['status'] = form_consts.Status.DUPLICATE
retVal['warning'] = message
# Duplicate sample, but only validation
else:
if sample.id != None:
warning_message = ('Warning: Trying to add file [' +
filename + ']'
' when MD5 already exists as file [' +
sample.filename + ']'
'<a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
retVal['message'] += warning_message
retVal['status'] = form_consts.Status.DUPLICATE
retVal['warning'] = warning_message
if is_return_only_md5 == True:
return md5_digest
else:
retVal['object'] = sample
return retVal
def handle_uploaded_file(f, source, method='', reference='', file_format=None,
password=None, user=None, campaign=None, confidence='low',
related_md5=None, related_id=None, related_type='Sample',
filename=None, md5=None, sha1=None, sha256=None, size=None,
mimetype=None, bucket_list=None, ticket=None,
inherited_source=None, is_validate_only=False,
is_return_only_md5=True, cache={}, backdoor_name=None,
backdoor_version=None):
"""
Handle an uploaded file.
:param f: The uploaded file.
:type f: file handle
:param source: The name of the source that provided the data.
:type source: list, str, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param file_format: The format the file was uploaded in.
:type file_format: str
:param password: A password necessary to access the file data.
:type password: str
:param user: The user uploading this sample.
:type user: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param filename: The filename of the sample.
:type filename: str
:param md5: The MD5 of the sample.
:type md5: str
:param sha1: The SHA1 of the sample.
:type sha1: str
:param sha256: The SHA256 of the sample.
:type sha256: str
:param size; The size of the sample.
:type size: str
:param mimetype: The mimetype of the sample.
:type mimetype: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param is_return_only_md5: Only return the MD5s.
:type is_return_only_md5: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param backdoor_name: Name of backdoor to relate this object to.
:type backdoor_name: str
:param backdoor_version: Version of backdoor to relate this object to.
:type backdoor_version: str
:returns: list
"""
samples = list()
if not source:
return [{'success': False, 'message': "Missing source information."}]
if method:
method = " - " + method
if f:
method = "File Upload" + method
elif md5:
method = "Metadata Upload" + method
else:
method = "Upload" + method
try:
data = f.read()
except AttributeError:
data = f
if not filename:
filename = getattr(f, 'name', None)
if not filename:
try:
filename = md5(data).hexdigest()
except:
filename = "unknown"
if file_format == "zip" and f:
return unzip_file(
filename,
user=user,
password=password,
data=data,
source=source,
method=method,
reference=reference,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
related_id=related_id,
related_type=related_type,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
elif file_format == "rar" and f:
return unrar_file(
filename,
user=user,
password=password,
data=data,
source=source,
method=method,
reference=reference,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
related_id=related_id,
related_type=related_type,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
else:
new_sample = handle_file(filename, data, source, method, reference,
related_md5=related_md5, related_id=related_id,
related_type=related_type, backdoor='',
user=user, campaign=campaign,
confidence=confidence, md5_digest=md5,
sha1_digest=sha1, sha256_digest=sha256,
size=size, mimetype=mimetype,
bucket_list=bucket_list, ticket=ticket,
inherited_source=inherited_source,
is_validate_only=is_validate_only,
is_return_only_md5=is_return_only_md5,
cache=cache, backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
if new_sample:
samples.append(new_sample)
return samples
def add_new_sample_via_bulk(data, rowData, request, errors, is_validate_only=False, cache={}):
"""
Add a new sample from bulk upload.
:param data: The data about the sample.
:type data: dict
:param rowData: Object data in the row.
:type rowData: dict
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param errors: List of existing errors to append to.
:type errors: list
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
returns: tuple of result, errors, return value
"""
username = request.user.username
result = False
retVal = {}
retVal['success'] = True
files = None
if request.FILES:
files = request.FILES
#upload_type = data.get('upload_type')
#filedata = data.get('filedata')
filename = data.get('filename')
campaign = data.get('campaign')
confidence = data.get('confidence')
md5 = data.get('md5')
sha1 = data.get('sha1')
sha256 = data.get('sha256')
size = data.get('size')
mimetype = data.get('mimetype')
fileformat = data.get('file_format')
password = data.get('password')
#is_email_results = data.get('email')
related_md5 = data.get('related_md5')
source = data.get('source')
method = data.get('method', '')
reference = data.get('reference')
bucket_list = data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
ticket = data.get(form_consts.Common.TICKET_VARIABLE_NAME)
samples = handle_uploaded_file(files, source, method, reference,
file_format=fileformat,
password=password,
user=username,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
filename=filename,
md5=md5,
sha1=sha1,
sha256=sha256,
size=size,
mimetype=mimetype,
bucket_list=bucket_list,
ticket=ticket,
is_validate_only=is_validate_only,
is_return_only_md5=False,
cache=cache)
# This block tries to add objects to the item
if not errors or is_validate_only == True:
result = True
objectsData = rowData.get(form_consts.Common.OBJECTS_DATA)
for sample in samples:
# repack message field into top of structure
if retVal.get('message'):
if sample.get('success') == False:
retVal['success'] = False
result = False
errors.append(sample.get('message'))
else:
retVal['message'] += sample.get('message')
else:
if sample.get('success') == False:
retVal['success'] = False
result = False
errors.append(sample.get('message'))
else:
retVal['message'] = sample.get('message')
if sample.get('warning'):
retVal['warning'] = sample.get('warning')
if sample.get('status'):
retVal['status'] = sample.get('status')
# add new objects if they exist
if objectsData:
objectsData = json.loads(objectsData)
for object_row_counter, objectData in enumerate(objectsData, 1):
if sample.get('object') != None and is_validate_only == False:
objectDict = object_array_to_dict(objectData, "Sample",
sample.get('object').id)
else:
if sample.get('object'):
if sample.get('object').id:
objectDict = object_array_to_dict(objectData, "Sample",
sample.get('object').id)
else:
objectDict = object_array_to_dict(objectData, "Sample", "")
else:
objectDict = object_array_to_dict(objectData, "Sample", "")
(object_result, object_errors, object_retVal) = validate_and_add_new_handler_object(
None, objectDict, request, errors, object_row_counter,
is_validate_only=is_validate_only, cache=cache)
# if there was an error, mark the overall
# operation as failed
if object_retVal.get('success') == False:
retVal['success'] = False
result = False
if object_retVal.get('message'):
errors.append(object_retVal['message'])
else:
errors += "Failed to add Sample: " + md5
return result, errors, retVal
def parse_row_to_bound_sample_form(request, rowData, cache, upload_type="File Upload"):
"""
Parse a mass upload row into an UploadFileForm.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param rowData: The data in the row.
:type rowData: dict
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param upload_type: The type of upload.
:type upload_type: str
:returns: :class:`crits.samples.forms.UploadFileForm`
"""
filedata = None
fileformat = None
password = None
filename = None
md5 = None
sha1 = None
sha256 = None
size = None
mimetype = None
if not upload_type:
upload_type = rowData.get(form_consts.Sample.UPLOAD_TYPE, "")
if upload_type == form_consts.Sample.UploadType.FILE_UPLOAD:
filedata = rowData.get(form_consts.Sample.FILE_DATA, "")
fileformat = rowData.get(form_consts.Sample.FILE_FORMAT, "")
password = rowData.get(form_consts.Sample.PASSWORD, "")
elif upload_type == form_consts.Sample.UploadType.METADATA_UPLOAD:
filename = rowData.get(form_consts.Sample.FILE_NAME, "")
md5 = rowData.get(form_consts.Sample.MD5, "")
sha1 = rowData.get(form_consts.Sample.SHA1, "")
sha256 = rowData.get(form_consts.Sample.SHA256, "")
size = rowData.get(form_consts.Sample.SIZE, 0)
mimetype = rowData.get(form_consts.Sample.MIMETYPE, "")
campaign = rowData.get(form_consts.Sample.CAMPAIGN, "")
confidence = rowData.get(form_consts.Sample.CAMPAIGN_CONFIDENCE, "")
is_email_results = convert_string_to_bool(rowData.get(form_consts.Sample.EMAIL_RESULTS, ""))
related_md5 = rowData.get(form_consts.Sample.RELATED_MD5, "")
source = rowData.get(form_consts.Sample.SOURCE, "")
method = rowData.get(form_consts.Sample.SOURCE_METHOD, "")
reference = rowData.get(form_consts.Sample.SOURCE_REFERENCE, "")
bucket_list = rowData.get(form_consts.Sample.BUCKET_LIST, "")
ticket = rowData.get(form_consts.Common.TICKET, "")
data = {
'upload_type': upload_type,
'filedata': filedata,
'filename': filename,
'md5': md5,
'sha1': sha1,
'sha256': sha256,
'size': size,
'mimetype': mimetype,
'file_format': fileformat,
'campaign': campaign,
'confidence': confidence,
'password': password,
'email': is_email_results,
'related_md5': related_md5,
'source': source,
'method': method,
'reference': reference,
'bucket_list': bucket_list,
'ticket': ticket
}
bound_md5_sample_form = cache.get('sample_form')
if bound_md5_sample_form == None:
bound_md5_sample_form = UploadFileForm(request.user, data, request.FILES)
cache['sample_form'] = bound_md5_sample_form
else:
bound_md5_sample_form.data = data
bound_md5_sample_form.full_clean()
return bound_md5_sample_form
def parse_row_to_bound_md5_sample_form(request, rowData, cache):
"""
Parse a mass upload row into an UploadFileForm.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param rowData: The data in the row.
:type rowData: dict
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:returns: :class:`crits.samples.forms.UploadFileForm`
"""
upload_type = form_consts.Sample.UploadType.METADATA_UPLOAD
return parse_row_to_bound_sample_form(request, rowData, cache, upload_type=upload_type)
def process_bulk_add_md5_sample(request, formdict):
"""
Performs the bulk add of MD5 samples by parsing the request data. Batches
some data into a cache object for performance by reducing large
amounts of single database queries.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param formdict: The form representing the bulk uploaded data.
:type formdict: dict
:returns: :class:`django.http.HttpResponse`
"""
md5_samples = []
cached_results = {}
cleanedRowsData = convert_handsontable_to_rows(request)
for rowData in cleanedRowsData:
if rowData != None and rowData.get(form_consts.Sample.MD5) != None:
md5_samples.append(rowData.get(form_consts.Sample.MD5).lower())
md5_results = Sample.objects(md5__in=md5_samples)
for md5_result in md5_results:
cached_results[md5_result.md5] = md5_result
cache = {form_consts.Sample.CACHED_RESULTS: cached_results, 'cleaned_rows_data': cleanedRowsData}
response = parse_bulk_upload(request, parse_row_to_bound_md5_sample_form, add_new_sample_via_bulk, formdict, cache)
return response
def update_sample_filename(id_, filename, analyst):
"""
Update a Sample filename.
:param id_: ObjectId of the Sample.
:type id_: str
:param filename: The new filename.
:type filename: str
:param analyst: The user setting the new filename.
:type analyst: str
:returns: dict with key 'success' (boolean) and 'message' (str) if failed.
"""
if not filename:
return {'success': False, 'message': "No filename to change"}
sample = Sample.objects(id=id_).first()
if not sample:
return {'success': False, 'message': "No sample to change"}
sample.filename = filename.strip()
try:
sample.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def modify_sample_filenames(id_, tags, analyst):
"""
Modify the filenames for a Sample.
:param id_: ObjectId of the Sample.
:type id_: str
:param tags: The new filenames.
:type tags: list
:param analyst: The user setting the new filenames.
:type analyst: str
:returns: dict with key 'success' (boolean) and 'message' (str) if failed.
"""
sample = Sample.objects(id=id_).first()
if sample:
sample.set_filenames(tags)
try:
sample.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': "Invalid value: %s" % e}
else:
return {'success': False}
|
mit
|
JurassicWordExcel/core
|
wizards/com/sun/star/wizards/fax/CallWizard.py
|
4
|
3060
|
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
import unohelper
import traceback
from .FaxWizardDialogImpl import FaxWizardDialogImpl, Desktop
from com.sun.star.lang import XServiceInfo
from com.sun.star.task import XJobExecutor
# pythonloader looks for a static g_ImplementationHelper variable
g_ImplementationHelper = unohelper.ImplementationHelper()
g_implName = "com.sun.star.wizards.fax.CallWizard"
# implement a UNO component by deriving from the standard unohelper.Base class
# and from the interface(s) you want to implement.
class CallWizard(unohelper.Base, XJobExecutor, XServiceInfo):
def __init__(self, ctx):
# store the component context for later use
self.ctx = ctx
def trigger(self, args):
try:
fw = FaxWizardDialogImpl(self.ctx.ServiceManager)
fw.startWizard(self.ctx.ServiceManager)
except Exception as e:
print ("Wizard failure exception " + str(type(e)) +
" message " + str(e) + " args " + str(e.args) +
traceback.format_exc())
@classmethod
def callRemote(self):
#Call the wizard remotely(see README)
try:
ConnectStr = \
"uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext"
xLocMSF = Desktop.connect(ConnectStr)
lw = FaxWizardDialogImpl(xLocMSF)
lw.startWizard(xLocMSF)
except Exception as e:
print ("Wizard failure exception " + str(type(e)) +
" message " + str(e) + " args " + str(e.args) +
traceback.format_exc())
def getImplementationName(self):
return g_implName
def supportsService(self, ServiceName):
return g_ImplementationHelper.supportsService(g_implName, ServiceName)
def getSupportedServiceNames(self):
return g_ImplementationHelper.getSupportedServiceNames(g_implName)
g_ImplementationHelper.addImplementation( \
CallWizard, # UNO object class
g_implName, # implemtenation name
("com.sun.star.task.Job",),) # list of implemented services
# (the only service)
# vim:set shiftwidth=4 softtabstop=4 expandtab:
|
mpl-2.0
|
ProfessionalIT/professionalit-webiste
|
sdk/google_appengine/google/appengine/ext/mapreduce/api/map_job/map_job_config.py
|
10
|
6778
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Per job config for map jobs."""
from google.appengine.ext.mapreduce import hooks
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import output_writers
from google.appengine.ext.mapreduce import parameters
from google.appengine.ext.mapreduce import util
from google.appengine.ext.mapreduce.api.map_job import input_reader
from google.appengine.ext.mapreduce.api.map_job import mapper as mapper_module
_Option = parameters._Option
_API_VERSION = 1
class JobConfig(parameters._Config):
"""Configurations for a map job.
Names started with _ are reserved for internal use.
To create an instance:
all option names can be used as keys to __init__.
If an option is required, the key must be provided.
If an option isn't required and no value is given, the default value
will be used.
"""
job_name = _Option(basestring, required=True)
job_id = _Option(basestring, default_factory=util._get_descending_key)
mapper = _Option(mapper_module.Mapper, required=True)
input_reader_cls = _Option(input_reader.InputReader, required=True)
input_reader_params = _Option(dict, default_factory=lambda: {})
output_writer_cls = _Option(output_writers.OutputWriter,
can_be_none=True)
output_writer_params = _Option(dict, default_factory=lambda: {})
shard_count = _Option(int,
default_factory=lambda: parameters.config.SHARD_COUNT)
user_params = _Option(dict, default_factory=lambda: {})
queue_name = _Option(
basestring, default_factory=lambda: parameters.config.QUEUE_NAME)
shard_max_attempts = _Option(
int, default_factory=lambda: parameters.config.SHARD_MAX_ATTEMPTS)
done_callback_url = _Option(basestring, can_be_none=True)
_force_writes = _Option(bool, default_factory=lambda: False)
_base_path = _Option(basestring,
default_factory=lambda: parameters.config.BASE_PATH)
_task_max_attempts = _Option(
int, default_factory=lambda: parameters.config.TASK_MAX_ATTEMPTS)
_task_max_data_processing_attempts = _Option(
int, default_factory=(
lambda: parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS))
_hooks_cls = _Option(hooks.Hooks, can_be_none=True)
_app = _Option(basestring, can_be_none=True)
_api_version = _Option(int, default_factory=lambda: _API_VERSION)
def _get_mapper_params(self):
"""Converts self to model.MapperSpec.params."""
reader_params = self.input_reader_cls.params_to_json(
self.input_reader_params)
return {"input_reader": reader_params,
"output_writer": self.output_writer_params}
def _get_mapper_spec(self):
"""Converts self to model.MapperSpec."""
from google.appengine.ext.mapreduce import model
return model.MapperSpec(
handler_spec=util._obj_to_path(self.mapper),
input_reader_spec=util._obj_to_path(self.input_reader_cls),
params=self._get_mapper_params(),
shard_count=self.shard_count,
output_writer_spec=util._obj_to_path(self.output_writer_cls))
def _get_mr_params(self):
"""Converts self to model.MapreduceSpec.params."""
return {"force_writes": self._force_writes,
"done_callback": self.done_callback_url,
"user_params": self.user_params,
"shard_max_attempts": self.shard_max_attempts,
"task_max_attempts": self._task_max_attempts,
"task_max_data_processing_attempts":
self._task_max_data_processing_attempts,
"queue_name": self.queue_name,
"base_path": self._base_path,
"app_id": self._app,
"api_version": self._api_version}
@classmethod
def _get_default_mr_params(cls):
"""Gets default values for old API."""
cfg = cls(_lenient=True)
mr_params = cfg._get_mr_params()
mr_params["api_version"] = 0
return mr_params
@classmethod
def _to_map_job_config(cls,
mr_spec,
queue_name):
"""Converts model.MapreduceSpec back to JobConfig.
This method allows our internal methods to use JobConfig directly.
This method also allows us to expose JobConfig as an API during execution,
despite that it is not saved into datastore.
Args:
mr_spec: model.MapreduceSpec.
queue_name: queue name.
Returns:
The JobConfig object for this job.
"""
mapper_spec = mr_spec.mapper
api_version = mr_spec.params.get("api_version", 0)
old_api = api_version == 0
input_reader_cls = mapper_spec.input_reader_class()
input_reader_params = input_readers._get_params(mapper_spec)
if issubclass(input_reader_cls, input_reader.InputReader):
input_reader_params = input_reader_cls.params_from_json(
input_reader_params)
output_writer_cls = mapper_spec.output_writer_class()
output_writer_params = output_writers._get_params(mapper_spec)
return cls(_lenient=old_api,
job_name=mr_spec.name,
job_id=mr_spec.mapreduce_id,
mapper=util.for_name(mapper_spec.handler_spec),
input_reader_cls=input_reader_cls,
input_reader_params=input_reader_params,
output_writer_cls=output_writer_cls,
output_writer_params=output_writer_params,
shard_count=mapper_spec.shard_count,
queue_name=queue_name,
user_params=mr_spec.params.get("user_params"),
shard_max_attempts=mr_spec.params.get("shard_max_attempts"),
done_callback_url=mr_spec.params.get("done_callback"),
_force_writes=mr_spec.params.get("force_writes"),
_base_path=mr_spec.params["base_path"],
_task_max_attempts=mr_spec.params.get("task_max_attempts"),
_task_max_data_processing_attempts=(
mr_spec.params.get("task_max_data_processing_attempts")),
_hooks_cls=util.for_name(mr_spec.hooks_class_name),
_app=mr_spec.params.get("app_id"),
_api_version=api_version)
|
lgpl-3.0
|
pilou-/ansible
|
lib/ansible/modules/cloud/digital_ocean/digital_ocean_tag.py
|
54
|
6486
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_tag
short_description: Create and remove tag(s) to DigitalOcean resource.
description:
- Create and remove tag(s) to DigitalOcean resource.
author: "Victor Volle (@kontrafiktion)"
version_added: "2.2"
options:
name:
description:
- The name of the tag. The supported characters for names include
alphanumeric characters, dashes, and underscores.
required: true
resource_id:
description:
- The ID of the resource to operate on.
- The data type of resource_id is changed from integer to string, from version 2.5.
aliases: ['droplet_id']
resource_type:
description:
- The type of resource to operate on. Currently, only tagging of
droplets is supported.
default: droplet
choices: ['droplet']
state:
description:
- Whether the tag should be present or absent on the resource.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: digital_ocean.documentation
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
They both refer to the v2 token.
- As of Ansible 2.0, Version 2 of the DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- name: create a tag
digital_ocean_tag:
name: production
state: present
- name: tag a resource; creating the tag if it does not exist
digital_ocean_tag:
name: "{{ item }}"
resource_id: "73333005"
state: present
loop:
- staging
- dbserver
- name: untag a resource
digital_ocean_tag:
name: staging
resource_id: "73333005"
state: absent
# Deleting a tag also untags all the resources that have previously been
# tagged with it
- name: remove a tag
digital_ocean_tag:
name: dbserver
state: absent
'''
RETURN = '''
data:
description: a DigitalOcean Tag resource
returned: success and no resource constraint
type: dict
sample: {
"tag": {
"name": "awesome",
"resources": {
"droplets": {
"count": 0,
"last_tagged": null
}
}
}
}
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
state = module.params['state']
name = module.params['name']
resource_id = module.params['resource_id']
resource_type = module.params['resource_type']
rest = DigitalOceanHelper(module)
if state == 'present':
response = rest.get('tags/{0}'.format(name))
status_code = response.status_code
resp_json = response.json
changed = False
if status_code == 200 and resp_json['tag']['name'] == name:
changed = False
else:
# Ensure Tag exists
response = rest.post("tags", data={'name': name})
status_code = response.status_code
resp_json = response.json
if status_code == 201:
changed = True
elif status_code == 422:
changed = False
else:
module.exit_json(changed=False, data=resp_json)
if resource_id is None:
# No resource defined, we're done.
module.exit_json(changed=changed, data=resp_json)
else:
# Check if resource is already tagged or not
found = False
url = "{0}?tag_name={1}".format(resource_type, name)
if resource_type == 'droplet':
url = "droplets?tag_name={0}".format(name)
response = rest.get(url)
status_code = response.status_code
resp_json = response.json
if status_code == 200:
for resource in resp_json['droplets']:
if not found and resource['id'] == int(resource_id):
found = True
break
if not found:
# If resource is not tagged, tag a resource
url = "tags/{0}/resources".format(name)
payload = {
'resources': [{
'resource_id': resource_id,
'resource_type': resource_type}]}
response = rest.post(url, data=payload)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error tagging resource '{0}': {1}".format(resource_id, response.json["message"]))
else:
# Already tagged resource
module.exit_json(changed=False)
else:
# Unable to find resource specified by user
module.fail_json(msg=resp_json['message'])
elif state == 'absent':
if resource_id:
url = "tags/{0}/resources".format(name)
payload = {
'resources': [{
'resource_id': resource_id,
'resource_type': resource_type}]}
response = rest.delete(url, data=payload)
else:
url = "tags/{0}".format(name)
response = rest.delete(url)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.exit_json(changed=False, data=response.json)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
name=dict(type='str', required=True),
resource_id=dict(aliases=['droplet_id'], type='str'),
resource_type=dict(choices=['droplet'], default='droplet'),
state=dict(choices=['present', 'absent'], default='present'),
)
module = AnsibleModule(argument_spec=argument_spec)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
|
gpl-3.0
|
unix1986/scons
|
engine/SCons/Tool/sunf90.py
|
9
|
2127
|
"""SCons.Tool.sunf90
Tool-specific initialization for sunf90, the Sun Studio F90 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf90.py 2014/09/27 12:51:43 garyo"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf90', 'f90']
def generate(env):
"""Add Builders and construction variables for sun f90 compiler to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f90'
env['FORTRAN'] = fcomp
env['F90'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF90'] = '$F90'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF90FLAGS'] = SCons.Util.CLVar('$F90FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
ecoal95/servo
|
tests/wpt/update/tree.py
|
20
|
7753
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from distutils.spawn import find_executable
import re
import subprocess
import sys
import tempfile
from wptrunner import update as wptupdate
from wptrunner.update.tree import Commit, CommitMessage, get_unique_name
class HgTree(wptupdate.tree.HgTree):
def __init__(self, *args, **kwargs):
self.commit_cls = kwargs.pop("commit_cls", Commit)
wptupdate.tree.HgTree.__init__(self, *args, **kwargs)
# TODO: The extra methods for upstreaming patches from a
# hg checkout
class GitTree(wptupdate.tree.GitTree):
def __init__(self, *args, **kwargs):
"""Extension of the basic GitTree with extra methods for
transfering patches"""
commit_cls = kwargs.pop("commit_cls", Commit)
wptupdate.tree.GitTree.__init__(self, *args, **kwargs)
self.commit_cls = commit_cls
def create_branch(self, name, ref=None):
"""Create a named branch,
:param name: String representing the branch name.
:param ref: None to use current HEAD or rev that the branch should point to"""
args = []
if ref is not None:
if hasattr(ref, "sha1"):
ref = ref.sha1
args.append(ref)
self.git("branch", name, *args)
def commits_by_message(self, message, path=None):
"""List of commits with messages containing a given string.
:param message: The string that must be contained in the message.
:param path: Path to a file or directory the commit touches
"""
args = ["--pretty=format:%H", "--reverse", "-z", "--grep=%s" % message]
if path is not None:
args.append("--")
args.append(path)
data = self.git("log", *args)
return [self.commit_cls(self, sha1) for sha1 in data.split("\0")]
def log(self, base_commit=None, path=None):
"""List commits touching a certian path from a given base commit.
:base_param commit: Commit object for the base commit from which to log
:param path: Path that the commits must touch
"""
args = ["--pretty=format:%H", "--reverse", "-z", "--no-merges"]
if base_commit is not None:
args.append("%s.." % base_commit.sha1)
if path is not None:
args.append("--")
args.append(path)
data = self.git("log", *args)
return [self.commit_cls(self, sha1) for sha1 in data.split("\0") if sha1]
def import_patch(self, patch, strip_count):
"""Import a patch file into the tree and commit it
:param patch: a Patch object containing the patch to import
"""
with tempfile.NamedTemporaryFile() as f:
f.write(patch.diff)
f.flush()
f.seek(0)
self.git("apply", "--index", f.name, "-p", str(strip_count))
self.git("commit", "-m", patch.message.text, "--author=%s" % patch.full_author)
def rebase(self, ref, continue_rebase=False):
"""Rebase the current branch onto another commit.
:param ref: A Commit object for the commit to rebase onto
:param continue_rebase: Continue an in-progress rebase"""
if continue_rebase:
args = ["--continue"]
else:
if hasattr(ref, "sha1"):
ref = ref.sha1
args = [ref]
self.git("rebase", *args)
def push(self, remote, local_ref, remote_ref, force=False):
"""Push local changes to a remote.
:param remote: URL of the remote to push to
:param local_ref: Local branch to push
:param remote_ref: Name of the remote branch to push to
:param force: Do a force push
"""
args = []
if force:
args.append("-f")
args.extend([remote, "%s:%s" % (local_ref, remote_ref)])
self.git("push", *args)
def unique_branch_name(self, prefix):
"""Get an unused branch name in the local tree
:param prefix: Prefix to use at the start of the branch name"""
branches = [ref[len("refs/heads/"):] for sha1, ref in self.list_refs()
if ref.startswith("refs/heads/")]
return get_unique_name(branches, prefix)
class Patch(object):
def __init__(self, author, email, message, merge_message, diff):
self.author = author
self.email = email
self.merge_message = merge_message
if isinstance(message, CommitMessage):
self.message = message
else:
self.message = GeckoCommitMessage(message)
self.diff = diff
def __repr__(self):
return "<Patch (%s)>" % self.message.full_summary
@property
def full_author(self):
return "%s <%s>" % (self.author, self.email)
@property
def empty(self):
return bool(self.diff.strip())
class GeckoCommitMessage(CommitMessage):
"""Commit message following the Gecko conventions for identifying bug number
and reviewer"""
# c.f. http://hg.mozilla.org/hgcustom/version-control-tools/file/tip/hghooks/mozhghooks/commit-message.py
# which has the regexps that are actually enforced by the VCS hooks. These are
# slightly different because we need to parse out specific parts of the message rather
# than just enforce a general pattern.
_bug_re = re.compile("^Bug (\d+)[^\w]*(?:Part \d+[^\w]*)?(.*?)\s*(?:r=(\w*))?$",
re.IGNORECASE)
_merge_re = re.compile("^Auto merge of #(\d+) - [^:]+:[^,]+, r=(.+)$", re.IGNORECASE)
_backout_re = re.compile("^(?:Back(?:ing|ed)\s+out)|Backout|(?:Revert|(?:ed|ing))",
re.IGNORECASE)
_backout_sha1_re = re.compile("(?:\s|\:)(0-9a-f){12}")
def _parse_message(self):
CommitMessage._parse_message(self)
if self._backout_re.match(self.full_summary):
self.backouts = self._backout_re.findall(self.full_summary)
else:
self.backouts = []
m = self._merge_re.match(self.full_summary)
if m is not None:
self.bug, self.reviewer = m.groups()
self.summary = self.full_summary
else:
m = self._bug_re.match(self.full_summary)
if m is not None:
self.bug, self.summary, self.reviewer = m.groups()
else:
self.bug, self.summary, self.reviewer = None, self.full_summary, None
class GeckoCommit(Commit):
msg_cls = GeckoCommitMessage
def __init__(self, tree, sha1, is_merge=False):
Commit.__init__(self, tree, sha1)
if not is_merge:
args = ["-c", sha1]
try:
merge_rev = self.git("when-merged", *args).strip()
except subprocess.CalledProcessError as exn:
if not find_executable('git-when-merged'):
print('Please add the `when-merged` git command to your PATH ' +
'(https://github.com/mhagger/git-when-merged/).')
sys.exit(1)
raise exn
self.merge = GeckoCommit(tree, merge_rev, True)
def export_patch(self, path=None):
"""Convert a commit in the tree to a Patch with the bug number and
reviewer stripped from the message"""
args = ["--binary", self.sha1]
if path is not None:
args.append("--")
args.append(path)
diff = self.git("show", *args)
merge_message = self.merge.message if self.merge else None
return Patch(self.author, self.email, self.message, merge_message, diff)
|
mpl-2.0
|
sebrandon1/nova
|
nova/tests/unit/api/openstack/compute/test_availability_zone.py
|
3
|
13714
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from nova.api.openstack.compute import availability_zone as az_v21
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute import servers as servers_v21
from nova import availability_zones
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import servicegroup
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_service
from oslo_config import cfg
FAKE_UUID = fakes.FAKE_UUID
def fake_service_get_all(context, disabled=None):
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
return dict(test_service.fake_service,
binary=binary,
availability_zone=availability_zone,
available_zones=availability_zone,
created_at=created_at,
updated_at=updated_at,
host=host,
disabled=disabled)
if disabled:
return [__fake_service("nova-compute", "zone-2",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-scheduler", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", True)]
else:
return [__fake_service("nova-compute", "zone-1",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", False)]
def fake_service_is_up(self, service):
return service['binary'] != u"nova-network"
def fake_set_availability_zones(context, services):
return services
def fake_get_availability_zones(context):
return ['nova'], []
CONF = cfg.CONF
class AvailabilityZoneApiTestV21(test.NoDBTestCase):
availability_zone = az_v21
def setUp(self):
super(AvailabilityZoneApiTestV21, self).setUp()
availability_zones.reset_cache()
self.stub_out('nova.db.service_get_all', fake_service_get_all)
self.stubs.Set(availability_zones, 'set_availability_zones',
fake_set_availability_zones)
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
self.controller = self.availability_zone.AvailabilityZoneController()
self.req = fakes.HTTPRequest.blank('')
def test_filtered_availability_zones(self):
zones = ['zone1', 'internal']
expected = [{'zoneName': 'zone1',
'zoneState': {'available': True},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones, True)
self.assertEqual(result, expected)
expected = [{'zoneName': 'zone1',
'zoneState': {'available': False},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones,
False)
self.assertEqual(result, expected)
def test_availability_zone_index(self):
resp_dict = self.controller.index(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 2)
self.assertEqual(zones[0]['zoneName'], u'zone-1')
self.assertTrue(zones[0]['zoneState']['available'])
self.assertIsNone(zones[0]['hosts'])
self.assertEqual(zones[1]['zoneName'], u'zone-2')
self.assertFalse(zones[1]['zoneState']['available'])
self.assertIsNone(zones[1]['hosts'])
def test_availability_zone_detail(self):
resp_dict = self.controller.detail(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 3)
timestamp = iso8601.parse_date("2012-12-26T14:45:25Z")
nova_network_timestamp = iso8601.parse_date("2012-12-26T14:45:24Z")
expected = [{'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-compute': {'active': True, 'available': True,
'updated_at': timestamp}}}},
{'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-sched': {'active': True, 'available': True,
'updated_at': timestamp}},
'fake_host-2': {
'nova-network': {
'active': True,
'available': False,
'updated_at': nova_network_timestamp}}}},
{'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None}]
self.assertEqual(expected, zones)
def test_availability_zone_detail_no_services(self):
expected_response = {'availabilityZoneInfo':
[{'zoneState': {'available': True},
'hosts': {},
'zoneName': 'nova'}]}
self.stubs.Set(availability_zones, 'get_availability_zones',
fake_get_availability_zones)
resp_dict = self.controller.detail(self.req)
self.assertThat(resp_dict,
matchers.DictMatches(expected_response))
class ServersControllerCreateTestV21(test.TestCase):
base_url = '/v2/fake/'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
self._set_up_controller()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'availability_zone': 'nova',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
return instance
fake.stub_out_image_service(self)
self.stub_out('nova.db.instance_create', instance_create)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
'os-availability-zone',
'osapi_v21')
self.no_availability_zone_controller = servers_v21.ServersController(
extension_info=ext_info)
def _test_create_extra(self, params, controller):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
server.update(params)
body = dict(server=server)
server = controller.create(self.req, body=body).obj['server']
def test_create_instance_with_availability_zone_disabled(self):
params = {'availability_zone': 'foo'}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['availability_zone'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params, self.no_availability_zone_controller)
def _create_instance_with_availability_zone(self, zone_name):
def create(*args, **kwargs):
self.assertIn('availability_zone', kwargs)
self.assertEqual('nova', kwargs['availability_zone'])
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stubs.Set(compute_api.API, 'create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'availability_zone': zone_name,
},
}
admin_context = context.get_admin_context()
db.service_create(admin_context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(admin_context,
{'name': 'agg1'}, {'availability_zone': 'nova'})
db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
return self.req, body
def test_create_instance_with_availability_zone(self):
zone_name = 'nova'
req, body = self._create_instance_with_availability_zone(zone_name)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
zone_name = 'a' * 256
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_too_short(self):
zone_name = ''
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_not_str(self):
zone_name = 111
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_without_availability_zone(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
res = self.controller.create(self.req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
|
apache-2.0
|
Bounti/avatar-python
|
avatar/tests/test_configuration.py
|
1
|
3055
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from avatar.configuration.configurationFactory import ConfigurationFactory
from avatar.system import System
import os
#Analyzer Target Emulator tests
ate_tests = {
"analyzer" :
{
"supported" : {"s2e"},
"unsupported" : {"klee", "angr"},
"unknown" : {"abc"},
},
"target" :
{
"supported" : {"openocd", "superspeed-jtag", "gdb"},
"unsupported" : {},
"unknown" : {"abc"},
},
"emulator" :
{
"supported" : {"qemu"},
"unsupported" : {},
"unknown" : {"abc"},
}
}
def generate_conf(analyzer, target, emulator, type):
analyzer_configuration = {}
emulator_configuration = {}
target_configuration = {}
configuration = {
"version" : 1.0,
"output_directory" : "",
"configuration_directory" : os.getcwd(),
"analyzer" : {"name" : analyzer, "configuration": analyzer_configuration },
"emulator" : {"name" : emulator, "configuration": emulator_configuration },
"target" : {"name" : target, "configuration": target_configuration },
}
return configuration
def test():
#Test supported, unsupported and unknown configuration
#Supported should start the element as defined
#unsupported should raise a NotImplementedError
#unknown should raise a ValueError
print("[*] Testing The Configuration module")
tested_types = {"supported", "unsupported", "unknown"}
for t in tested_types :
for analyzer in ate_tests["analyzer"][t] :
for target in ate_tests["target"][t] :
for emulator in ate_tests["emulator"][t] :
print(" [-] " + analyzer + " " + target + " " + emulator)
try :
conf = generate_conf(analyzer, target, emulator, t)
configuration = ConfigurationFactory.createParser(conf)
# target = TargetsFactory.create(self._configuration)
# emulator = EmulatorsFactory.create(self._configuration)
# avatar = System(conf, ["--debug", "--trace"])
avatar.start()
avatar.stop()
except (ValueError, NotImplementedError) as e :
if type(ex).__name__ == "ValueError" and t == "unknown" :
print(" Success")
elif type(ex).__name__ == "NotImplementedError" and t == "NotImplementedError" :
print(" Success")
else :
print("Test failed : "+ type(ex).__name__)
print("Test vector : "+ c)
|
apache-2.0
|
acq4/acq4
|
acq4/devices/DAQGeneric/DOChannelTemplate.py
|
3
|
5794
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file 'DOChannelTemplate.ui'
#
# Created: Sun Feb 22 13:29:09 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.verticalLayout_3 = QtGui.QVBoxLayout(Form)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.groupBox = GroupBox(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setContentsMargins(5, 0, 0, 0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.preSetCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.preSetCheck.setFont(font)
self.preSetCheck.setObjectName(_fromUtf8("preSetCheck"))
self.gridLayout.addWidget(self.preSetCheck, 0, 0, 1, 1)
self.holdingCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.holdingCheck.setFont(font)
self.holdingCheck.setObjectName(_fromUtf8("holdingCheck"))
self.gridLayout.addWidget(self.holdingCheck, 1, 0, 1, 1)
self.preSetSpin = QtGui.QSpinBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.preSetSpin.setFont(font)
self.preSetSpin.setMaximum(1)
self.preSetSpin.setObjectName(_fromUtf8("preSetSpin"))
self.gridLayout.addWidget(self.preSetSpin, 0, 1, 1, 1)
self.holdingSpin = QtGui.QSpinBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.holdingSpin.setFont(font)
self.holdingSpin.setMaximum(1)
self.holdingSpin.setObjectName(_fromUtf8("holdingSpin"))
self.gridLayout.addWidget(self.holdingSpin, 1, 1, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
self.frame = QtGui.QFrame(self.groupBox)
self.frame.setFrameShape(QtGui.QFrame.Box)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.verticalLayout = QtGui.QVBoxLayout(self.frame)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.functionCheck = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.functionCheck.setFont(font)
self.functionCheck.setObjectName(_fromUtf8("functionCheck"))
self.horizontalLayout.addWidget(self.functionCheck)
self.displayCheck = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.displayCheck.setFont(font)
self.displayCheck.setChecked(True)
self.displayCheck.setObjectName(_fromUtf8("displayCheck"))
self.horizontalLayout.addWidget(self.displayCheck)
self.verticalLayout.addLayout(self.horizontalLayout)
self.waveGeneratorWidget = StimGenerator(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.waveGeneratorWidget.sizePolicy().hasHeightForWidth())
self.waveGeneratorWidget.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.waveGeneratorWidget.setFont(font)
self.waveGeneratorWidget.setObjectName(_fromUtf8("waveGeneratorWidget"))
self.verticalLayout.addWidget(self.waveGeneratorWidget)
self.verticalLayout_2.addWidget(self.frame)
self.verticalLayout_3.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "GroupBox", None))
self.preSetCheck.setText(_translate("Form", "Pre-set", None))
self.holdingCheck.setText(_translate("Form", "Holding", None))
self.functionCheck.setText(_translate("Form", "Enable Function", None))
self.displayCheck.setText(_translate("Form", "Display", None))
from acq4.pyqtgraph import GroupBox
from acq4.util.generator.StimGenerator import StimGenerator
|
mit
|
santoshsahoo/filesync-server
|
src/backends/filesync/data/tests/test_services.py
|
6
|
7010
|
# Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""Test the Data services."""
import uuid
import datetime
from backends.filesync.data.testing.testdata import get_fake_hash
from backends.filesync.data.testing.testcase import StorageDALTestCase
from backends.filesync.data.services import (
get_abandoned_uploadjobs,
get_node_for_shard,
get_user_info_for_shard,
get_public_file,
get_public_directory,
get_storage_user,
make_storage_user,
)
from backends.filesync.data import dao, errors, utils, model
MAX_STORAGE_BYTES = 10 * 23
class DataServicesTestCase(StorageDALTestCase):
"""Test the DataServices.
Since all the logic is in lower level tests, these tests are kept
to a minimum
"""
def assert_storage_user(
self, storage_user, user_id, visible_name, max_storage_bytes):
self.assertIsInstance(storage_user, dao.StorageUser)
self.assertEqual(storage_user.id, user_id)
self.assertEqual(storage_user.visible_name, visible_name)
quota = storage_user.get_quota()
self.assertEqual(quota.max_storage_bytes, max_storage_bytes)
def test_make_storage_user(self):
"""Test the make_storage_user function."""
storage_user = make_storage_user(
1, u"Cool UserName", u"Visible Name", MAX_STORAGE_BYTES)
self.assert_storage_user(
storage_user, 1, u"Visible Name", MAX_STORAGE_BYTES)
def test_get_storage_user(self):
"""Test the get_storage_user function."""
user = make_storage_user(
1, u"Cool UserName", u"Visible Name", MAX_STORAGE_BYTES)
user = get_storage_user(1)
self.assertTrue(isinstance(user, dao.StorageUser))
user.update(subscription=False)
self.assertRaises(errors.DoesNotExist, get_storage_user, 1)
user = get_storage_user(1, active_only=False)
user.update(subscription=True)
# now check a locked user.
suser = self.user_store.get(model.StorageUser, user.id)
suser.locked = True
self.user_store.commit()
self.assertRaises(errors.LockedUserError, get_storage_user, user.id)
# and ignore the lock too
user = get_storage_user(user.id, readonly=True)
self.assertTrue(isinstance(user, dao.StorageUser))
def test_get_node_for_shard(self):
"""Test the get_node_for_shard function."""
user1 = self.obj_factory.make_user(
1, u"User 1", u"User 1", MAX_STORAGE_BYTES, shard_id=u"shard1")
node = user1.volume().root.make_file(u"test file")
new_node = get_node_for_shard(node.id, u'shard1')
self.assertEquals(node.id, new_node.id)
self.assertEquals(node.parent_id, new_node.parent_id)
self.assertEquals(node.name, new_node.name)
self.assertEquals(node.path, new_node.path)
def test_get_user_info_for_shard(self):
"""Test the get_user_info_for_shard function."""
user = self.obj_factory.make_user(
1, u"User 1", u"User 1", MAX_STORAGE_BYTES, shard_id=u"shard1")
user_info = get_user_info_for_shard(user.id, user.shard_id)
quota = user.get_quota()
self.assertEquals(quota.max_storage_bytes, user_info.max_storage_bytes)
self.assertEquals(quota.used_storage_bytes,
user_info.used_storage_bytes)
self.assertEquals(quota.free_bytes, user_info.free_bytes)
self.assertRaises(errors.DoesNotExist, get_user_info_for_shard, 41,
user.shard_id)
def test_get_abandoned_uploadjobs(self):
"""Test the get_abandoned_uploadjobs function."""
self.assertRaises(TypeError, get_abandoned_uploadjobs, 'shard1')
jobs = get_abandoned_uploadjobs('shard1', datetime.datetime.now(), 100)
self.assertTrue(isinstance(jobs, list))
def test_get_public_file(self):
"""Test the get_public_file function."""
save_setting = utils.set_public_uuid
utils.set_public_uuid = False
user = self.obj_factory.make_user(
1, u"Cool UserName", u"Visible Name", 10)
a_file = user.volume().root.make_file_with_content(
u"file.txt", get_fake_hash(), 123, 1, 1, uuid.uuid4())
a_file.change_public_access(True)
public_key = a_file.public_key
f1 = get_public_file(public_key)
self.assertEqual(f1, a_file)
a_file.change_public_access(False)
self.assertRaises(errors.DoesNotExist,
get_public_file, public_key, use_uuid=False)
utils.set_public_uuid = save_setting
def test_get_public_directory(self):
"""Test the get_public_directory function."""
user = self.obj_factory.make_user(
1, u"Cool UserName", u"Visible Name", 10)
a_dir = user.volume().root.make_subdirectory(u'test_dir')
a_dir.make_file_with_content(
u"file.txt", get_fake_hash(), 123, 1, 1, uuid.uuid4())
a_dir.change_public_access(True, allow_directory=True)
public_key = a_dir.public_key
pub_dir = get_public_directory(public_key)
self.assertEqual(pub_dir, a_dir)
a_dir.change_public_access(False, allow_directory=True)
self.assertRaises(errors.DoesNotExist,
get_public_directory, public_key)
def test_get_public_file_public_uuid(self):
"""Test the get_public_file function."""
save_setting = utils.set_public_uuid
utils.set_public_uuid = True
user = self.obj_factory.make_user(
1, u"Cool UserName", u"Visible Name", 10)
a_file = user.volume().root.make_file_with_content(
u"file.txt", get_fake_hash(), 123, 1, 1, uuid.uuid4())
a_file.change_public_access(True)
public_key = a_file.public_key
# get the file using the public uuid
f1 = get_public_file(public_key, use_uuid=True)
self.assertEqual(f1, a_file)
# can't get the file using the old id
self.assertRaises(errors.DoesNotExist,
get_public_file, public_key)
a_file.change_public_access(False)
self.assertRaises(errors.DoesNotExist,
get_public_file, public_key, use_uuid=True)
utils.set_public_uuid = save_setting
|
agpl-3.0
|
PourroyJean/performance_modelisation
|
script/data visualisation/venv/lib/python3.6/site-packages/pip/wheel.py
|
338
|
32010
|
"""
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.compat import expanduser
from pip.download import path_to_url, unpack_url
from pip.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks,
)
from pip.utils.ui import open_spinner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = canonicalize_name(package_name)
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path))
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.optionxform = lambda option: option
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False, prefix=None):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
# Record pip as the installer
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((normpath(f, lib_dir), h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir, python_tag=None):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd, python_tag=python_tag):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
finally:
rmtree(tempd)
def _base_setup_args(self, req):
return [
sys.executable, "-u", '-c',
SETUPTOOLS_SHIM % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
return True
except:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif autobuilding and req.editable:
pass
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
|
gpl-3.0
|
gustavo-guimaraes/siga
|
backend/appengine/lib/pip/_vendor/requests/packages/chardet/langhebrewmodel.py
|
2763
|
11318
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
|
mit
|
fgesora/odoo
|
addons/account_analytic_plans/report/__init__.py
|
445
|
1084
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ingo-m/py_pRF_mapping
|
pyprf/analysis/model_creation_timecourses_par.py
|
2
|
7380
|
# -*- coding: utf-8 -*-
"""Parallelisation function for crt_prf_tcmdl."""
# Part of py_pRF_mapping library
# Copyright (C) 2016 Ingo Marquardt
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import h5py
import threading
import queue
from pyprf.analysis.utilities import crt_gauss
def prf_par(idxPrc, aryMdlParamsChnk, tplVslSpcSze, aryPixConv, strPathMdl,
queOut):
"""
Create pRF time course models.
Parameters
----------
idxPrc : int
Process ID.
aryMdlParamsChnk : np.array
2D numpy array containing the parameters for the pRF models to be
created. Dimensionality: `aryMdlParamsChnk[model-ID, parameter-value]`.
For each model there are four values: (0) an index starting from zero,
(1) the x-position, (2) the y-position, and (3) the standard deviation.
Parameters 1, 2 , and 3 are in units of the upsampled visual space.
tplVslSpcSze : tuple
Pixel size of visual space model in which the pRF models are created
(x- and y-dimension).
aryPixConv : np.array
4D numpy array containing the pixel-wise, HRF-convolved design matrix,
with the following structure: `aryPixConv[x-pixels, y-pixels,
conditions, volumes]`.
strPathMdl : str
Filepath of pRF time course models (including file name, but without
file extension). If `strPathMdl` is not `None`, model time courses are
saved to disk in hdf5 format during model creation in order to avoid
out of memory problems.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
Returns
-------
lstOut : list
List containing the following object:
idxPrc : int
Process ID.
vecMdlIdx : np.array
1D numpy array with model indices (for sorting of models after
parallel function. Shape: vecMdlIdx[varNumMdls].
aryPrfTc : np.array
3D numpy array with pRF model time courses, shape:
aryPrfTc[varNumMdls, varNumCon, varNumVol].
Notes
-----
The list with results is not returned directly, but placed on a
multiprocessing queue.
"""
# Number of models (i.e., number of combinations of model parameters in the
# parallel processing current chunk):
varNumMdls = aryMdlParamsChnk.shape[0]
# Number of conditions:
varNumCon = aryPixConv.shape[2]
# Number of volumes:
varNumVol = aryPixConv.shape[3]
# Number of combinations of model parameters in the current chunk:
# varNumMdls = np.size(aryMdlParamsChnk, axis=0)
# Only place model time courses on RAM if the parameter space is not too
# large. Whether this is the case is signalled by whether a file path for
# storing of an hdf5 file was provided.
if strPathMdl is None:
# Output array with pRF model time courses:
aryPrfTc = np.zeros([varNumMdls, varNumCon, varNumVol],
dtype=np.float32)
else:
# Prepare memory-efficient placement of pRF model time courses in hdf5
# file.
# Buffer size:
varBuff = 100
# Create FIFO queue:
objQ = queue.Queue(maxsize=varBuff)
# Path of hdf5 file:
strPthHdf5 = (strPathMdl + '_' + str(idxPrc) + '.hdf5')
# Create hdf5 file:
fleHdf5 = h5py.File(strPthHdf5, 'w')
# Create dataset within hdf5 file:
dtsPrfTc = fleHdf5.create_dataset('pRF_time_courses',
(varNumMdls,
varNumCon,
varNumVol),
dtype=np.float32)
# Define & run extra thread with graph that places data on queue:
objThrd = threading.Thread(target=feed_hdf5_q,
args=(dtsPrfTc, objQ, varNumMdls))
objThrd.setDaemon(True)
objThrd.start()
# Loop through combinations of model parameters:
for idxMdl in range(varNumMdls):
# Spatial parameters of current model:
varTmpX = aryMdlParamsChnk[idxMdl, 1]
varTmpY = aryMdlParamsChnk[idxMdl, 2]
varTmpSd = aryMdlParamsChnk[idxMdl, 3]
# Create pRF model (2D):
aryGauss = crt_gauss(tplVslSpcSze[0],
tplVslSpcSze[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply super-sampled pixel-time courses with Gaussian pRF
# models:
aryPrfTcTmp = np.multiply(aryPixConv, aryGauss[:, :, None, None])
# Shape: aryPrfTcTmp[x-pixels, y-pixels, conditions, volumes]
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This gives us the ratio of 'activation' of the pRF
# at each time point, or, in other words, the pRF time course model.
# Note: Normalisation of pRFs takes at funcGauss(); pRF models are
# normalised to have an area under the curve of one when they are
# created.
aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1), dtype=np.float32)
# New shape: aryPrfTcTmp[conditions, volumes]
if strPathMdl is None:
# Put model time courses into the function's output array:
aryPrfTc[idxMdl, :, :] = np.copy(aryPrfTcTmp)
else:
# Place model time courses on queue:
objQ.put(aryPrfTcTmp)
# Close queue feeding thread, and hdf5 file.
if not(strPathMdl is None):
# Close thread:
objThrd.join()
# Close file:
fleHdf5.close()
# Dummy pRF time course array:
aryPrfTc = None
# Put column with the indicies of model-parameter-combinations into the
# output list (in order to be able to put the pRF model time courses into
# the correct order after the parallelised function):
vecMdlIdx = aryMdlParamsChnk[:, 0]
# Output list:
lstOut = [idxPrc, vecMdlIdx, aryPrfTc]
# Put output to queue:
queOut.put(lstOut)
def feed_hdf5_q(dtsPrfTc, objQ, varNumMdls):
"""
Feed FIFO queue for placement of pRF time courses in hdf5 file.
Parameters
----------
dtsPrfTc : h5py dataset
Dataset within h5py file.
objQ : queue.Queue
Queue from which pRF model time courses are retrieved.
varNumMdls : int
Number of models (i.e., number of combinations of model parameters in
the processing chunk).
"""
# Loop through combinations of model parameters:
for idxMdl in range(varNumMdls):
# Take model time course from queue, and put in hdf5 file:
dtsPrfTc[idxMdl, :, :] = objQ.get()
|
gpl-3.0
|
RK905/pokedex-1
|
pokedex/db/util.py
|
4
|
3883
|
"""Helpers for common ways to work with pokedex queries
These include identifier- and name-based lookup, filtering out base forms
of pokemon, and filtering/ordering by name.
"""
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import func
from sqlalchemy.sql.functions import coalesce
from sqlalchemy.orm.exc import NoResultFound
from pokedex.db import tables
### Getter
def get(session, table, identifier=None, name=None, id=None, language=None):
"""Get one object from the database.
session: The session to use (from pokedex.db.connect())
table: The table to select from (such as pokedex.db.tables.Move)
identifier: Identifier of the object
name: The name of the object
id: The ID number of the object
language: A Language to use for name and form_name
All conditions must match, so it's not a good idea to specify more than one
of identifier/name/id at once.
If zero or more than one objects matching the criteria are found, the
appropriate SQLAlchemy exception is raised.
"""
query = session.query(table)
if identifier is not None:
query = query.filter_by(identifier=identifier)
if name is not None:
query = filter_name(query, table, name, language)
if id is not None:
# ASSUMPTION: id is the primary key of the table.
result = query.get(id)
if result is None:
# Keep the API
raise NoResultFound
else:
return result
return query.one()
### Helpers
def filter_name(query, table, name, language, name_attribute='name'):
"""Filter a query by name, return the resulting query
query: The query to filter
table: The table of named objects
name: The name to look for. May be a tuple of alternatives.
language: The language for "name", or None for the session default
name_attribute: the attribute to use; defaults to 'name'
"""
if language is None:
query = query.filter(getattr(table, name_attribute) == name)
else:
names_table = table.names_table
name_column = getattr(names_table, name_attribute)
query = query.join(names_table)
query = query.filter(names_table.foreign_id == table.id)
query = query.filter(names_table.local_language_id == language.id)
if isinstance(name, tuple):
query = query.filter(name_column in name)
else:
query = query.filter(name_column == name)
return query
def order_by_name(query, table, language=None, *extra_languages, **kwargs):
"""Order a query by name.
query: The query to order
table: Table of the named objects
language: The language to order names by. If None, use the
connection default.
extra_languages: Extra languages to order by, should the translations for
`language` be incomplete (or ambiguous).
name_attribute (keyword argument): the attribute to use; defaults to 'name'
Uses the identifier as a fallback ordering.
"""
name_attribute = kwargs.pop('name', 'name')
if kwargs:
raise ValueError('Unexpected keyword arguments: %s' % kwargs.keys())
order_columns = []
if language is None:
query = query.outerjoin(table.names_local)
order_columns.append(func.lower(getattr(table.names_table, name_attribute)))
else:
extra_languages = (language, ) + extra_languages
for language in extra_languages:
names_table = aliased(table.names_table)
query = query.outerjoin(names_table)
query = query.filter(names_table.foreign_id == table.id)
query = query.filter(names_table.local_language_id == language.id)
order_columns.append(func.lower(getattr(names_table, name_attribute)))
order_columns.append(table.identifier)
query = query.order_by(coalesce(*order_columns))
return query
|
mit
|
chhao91/QGIS
|
python/ext-libs/jinja2/testsuite/__init__.py
|
404
|
4641
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite
~~~~~~~~~~~~~~~~
All the unittests of Jinja2. These tests can be executed by
either running run-tests.py using multiple Python versions at
the same time.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import unittest
from traceback import format_exception
from jinja2 import loaders
from jinja2._compat import PY2
here = os.path.dirname(os.path.abspath(__file__))
dict_loader = loaders.DictLoader({
'justdict.html': 'FOO'
})
package_loader = loaders.PackageLoader('jinja2.testsuite.res', 'templates')
filesystem_loader = loaders.FileSystemLoader(here + '/res/templates')
function_loader = loaders.FunctionLoader({'justfunction.html': 'FOO'}.get)
choice_loader = loaders.ChoiceLoader([dict_loader, package_loader])
prefix_loader = loaders.PrefixLoader({
'a': filesystem_loader,
'b': dict_loader
})
class JinjaTestCase(unittest.TestCase):
### use only these methods for testing. If you need standard
### unittest method, wrap them!
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
self.teardown()
def assert_equal(self, a, b):
return self.assertEqual(a, b)
def assert_raises(self, *args, **kwargs):
return self.assertRaises(*args, **kwargs)
def assert_traceback_matches(self, callback, expected_tb):
try:
callback()
except Exception as e:
tb = format_exception(*sys.exc_info())
if re.search(expected_tb.strip(), ''.join(tb)) is None:
raise self.fail('Traceback did not match:\n\n%s\nexpected:\n%s'
% (''.join(tb), expected_tb))
else:
self.fail('Expected exception')
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def suite():
from jinja2.testsuite import ext, filters, tests, core_tags, \
loader, inheritance, imports, lexnparse, security, api, \
regression, debug, utils, bytecode_cache, doctests
suite = unittest.TestSuite()
suite.addTest(ext.suite())
suite.addTest(filters.suite())
suite.addTest(tests.suite())
suite.addTest(core_tags.suite())
suite.addTest(loader.suite())
suite.addTest(inheritance.suite())
suite.addTest(imports.suite())
suite.addTest(lexnparse.suite())
suite.addTest(security.suite())
suite.addTest(api.suite())
suite.addTest(regression.suite())
suite.addTest(debug.suite())
suite.addTest(utils.suite())
suite.addTest(bytecode_cache.suite())
# doctests will not run on python 3 currently. Too many issues
# with that, do not test that on that platform.
if PY2:
suite.addTest(doctests.suite())
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
|
gpl-2.0
|
alangwansui/mtl_ordercenter
|
openerp/addons/l10n_in_hr_payroll/wizard/hr_yearly_salary_detail.py
|
51
|
2418
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class yearly_salary_detail(osv.osv_memory):
_name ='yearly.salary.detail'
_description = 'Hr Salary Employee By Category Report'
_columns = {
'employee_ids': fields.many2many('hr.employee', 'payroll_emp_rel', 'payroll_id', 'employee_id', 'Employees', required=True),
'date_from': fields.date('Start Date', required=True),
'date_to': fields.date('End Date', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form': res})
return {
'type': 'ir.actions.report.xml',
'report_name': 'salary.detail.byyear',
'datas': datas,
}
yearly_salary_detail()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
rlizana/l10n-spain
|
l10n_es_aeat_mod340/__init__.py
|
3
|
1249
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 20011 Ting (http://www.ting.es)
# Copyright (c) 2011-2013 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas Izquierdo <ignacio@acysos.com>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import report
from . import wizard
from . import mod340
from . import res_partner
from . import account_invoice
from . import account
|
agpl-3.0
|
shakamunyi/neutron
|
neutron/plugins/ml2/drivers/arista/exceptions.py
|
23
|
1036
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions used by Arista ML2 Mechanism Driver."""
from neutron.common import exceptions
class AristaRpcError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaConfigError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaServicePluginRpcError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaServicePluginConfigError(exceptions.NeutronException):
message = _('%(msg)s')
|
apache-2.0
|
benoitsteiner/tensorflow
|
tensorflow/contrib/keras/api/keras/callbacks/__init__.py
|
30
|
1766
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callback classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.callbacks import BaseLogger
from tensorflow.contrib.keras.python.keras.callbacks import Callback
from tensorflow.contrib.keras.python.keras.callbacks import CSVLogger
from tensorflow.contrib.keras.python.keras.callbacks import EarlyStopping
from tensorflow.contrib.keras.python.keras.callbacks import History
from tensorflow.contrib.keras.python.keras.callbacks import LambdaCallback
from tensorflow.contrib.keras.python.keras.callbacks import LearningRateScheduler
from tensorflow.contrib.keras.python.keras.callbacks import ModelCheckpoint
from tensorflow.contrib.keras.python.keras.callbacks import ProgbarLogger
from tensorflow.contrib.keras.python.keras.callbacks import ReduceLROnPlateau
from tensorflow.contrib.keras.python.keras.callbacks import RemoteMonitor
from tensorflow.contrib.keras.python.keras.callbacks import TensorBoard
del absolute_import
del division
del print_function
|
apache-2.0
|
MurpheyLab/trep
|
examples/dual_pendulums.py
|
1
|
1282
|
import trep
from trep import tx,ty,tz,rx,ry,rz
import time
import trep.visual as visual
dt = 0.01
tf = 10.0
def simulate_system(system):
# Now we'll extract the current configuration into a tuple to use as
# initial conditions for a variational integrator.
q0 = system.q
# Create and initialize the variational integrator
mvi = trep.MidpointVI(system)
mvi.initialize_from_configs(0.0, q0, dt, q0)
# This is our simulation loop. We save the results in two lists.
q = [mvi.q2]
t = [mvi.t2]
while mvi.t1 < tf:
mvi.step(mvi.t2+dt)
q.append(mvi.q2)
t.append(mvi.t2)
return (t,q)
system = trep.System()
system.import_frames([
rx('theta1'), [
tz(2, mass=1, name='pend1')
],
ty(1), [
rx('theta2'), [
tz(2, mass=1, name='pend2')
]]
])
trep.potentials.LinearSpring(system, 'pend1', 'pend2', k=20, x0=1)
trep.forces.LinearDamper(system, 'pend1', 'pend2', c=1)
trep.potentials.Gravity(system, name="Gravity")
system.q = [3,-3]
# Simulate
start = time.clock()
(t, q) = simulate_system(system)
finish = time.clock()
# Display
print "Simulation: dt=%f, tf=%f, runtime=%f s" % (dt, tf, finish-start)
visual.visualize_3d([ visual.VisualItem3D(system, t, q) ])
|
gpl-3.0
|
crmccreary/openerp_server
|
openerp/service/netrpc_server.py
|
14
|
6163
|
# -*- coding: utf-8 -*-
#
# Copyright P. Christeas <p_christ@hol.gr> 2008,2009
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#.apidoc title: NET-RPC Server
""" This file contains instance of the net-rpc server
"""
import logging
import select
import socket
import sys
import threading
import traceback
import openerp
import openerp.netsvc as netsvc
import openerp.tiny_socket as tiny_socket
import openerp.tools as tools
_logger = logging.getLogger(__name__)
class TinySocketClientThread(threading.Thread):
def __init__(self, sock, threads):
spn = sock and sock.getpeername()
spn = 'netrpc-client-%s:%s' % spn[0:2]
threading.Thread.__init__(self, name=spn)
self.sock = sock
# Only at the server side, use a big timeout: close the
# clients connection when they're idle for 20min.
self.sock.settimeout(1200)
self.threads = threads
def run(self):
self.running = True
try:
ts = tiny_socket.mysocket(self.sock)
except Exception:
self.threads.remove(self)
self.running = False
return False
while self.running:
try:
msg = ts.myreceive()
result = netsvc.dispatch_rpc(msg[0], msg[1], msg[2:])
ts.mysend(result)
except socket.timeout:
#terminate this channel because other endpoint is gone
break
except Exception, e:
try:
valid_exception = Exception(netrpc_handle_exception_legacy(e))
valid_traceback = getattr(e, 'traceback', sys.exc_info())
formatted_traceback = "".join(traceback.format_exception(*valid_traceback))
_logger.debug("netrpc: communication-level exception", exc_info=True)
ts.mysend(valid_exception, exception=True, traceback=formatted_traceback)
break
except Exception, ex:
#terminate this channel if we can't properly send back the error
_logger.exception("netrpc: cannot deliver exception message to client")
break
netsvc.close_socket(self.sock)
self.sock = None
self.threads.remove(self)
self.running = False
return True
def stop(self):
self.running = False
def netrpc_handle_exception_legacy(e):
if isinstance(e, openerp.osv.osv.except_osv):
return 'warning -- ' + e.name + '\n\n' + e.value
if isinstance(e, openerp.exceptions.Warning):
return 'warning -- Warning\n\n' + str(e)
if isinstance(e, openerp.exceptions.AccessError):
return 'warning -- AccessError\n\n' + str(e)
if isinstance(e, openerp.exceptions.AccessDenied):
return 'AccessDenied ' + str(e)
return openerp.tools.exception_to_unicode(e)
class TinySocketServerThread(threading.Thread,netsvc.Server):
def __init__(self, interface, port, secure=False):
threading.Thread.__init__(self, name="NetRPCDaemon-%d"%port)
netsvc.Server.__init__(self)
self.__port = port
self.__interface = interface
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.__interface, self.__port))
self.socket.listen(5)
self.threads = []
_logger.info("starting NET-RPC service on %s:%s", interface or '0.0.0.0', port)
def run(self):
try:
self.running = True
while self.running:
fd_sets = select.select([self.socket], [], [], self._busywait_timeout)
if not fd_sets[0]:
continue
(clientsocket, address) = self.socket.accept()
ct = TinySocketClientThread(clientsocket, self.threads)
clientsocket = None
self.threads.append(ct)
ct.start()
lt = len(self.threads)
if (lt > 10) and (lt % 10 == 0):
# Not many threads should be serving at the same time, so log
# their abuse.
_logger.debug("Netrpc: %d threads", len(self.threads))
self.socket.close()
except Exception, e:
_logger.warning("Netrpc: closing because of exception %s" % str(e))
self.socket.close()
return False
def stop(self):
self.running = False
for t in self.threads:
t.stop()
self._close_socket()
def stats(self):
res = "Net-RPC: " + ( (self.running and "running") or "stopped")
i = 0
for t in self.threads:
i += 1
res += "\nNet-RPC #%d: %s " % (i, t.name)
if t.isAlive():
res += "running"
else:
res += "finished"
if t.sock:
res += ", socket"
return res
netrpcd = None
def init_servers():
global netrpcd
if tools.config.get('netrpc', False):
netrpcd = TinySocketServerThread(
tools.config.get('netrpc_interface', ''),
int(tools.config.get('netrpc_port', 8070)))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
berr/stagger
|
test/specs.py
|
16
|
23395
|
#!/usr/bin/env python3
#
# specs.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
import warnings
from stagger.errors import *
from stagger.specs import *
from stagger.frames import *
class SpecTestCase(unittest.TestCase):
def testByteSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = ByteSpec("test")
# spec.read
self.assertEqual(spec.read(frame, b"\x01\x02"), (1, b"\x02"))
self.assertEqual(spec.read(frame, b"\x01"), (1, b""))
self.assertRaises(EOFError, spec.read, frame, b"")
# spec.write
self.assertEqual(spec.write(frame, 5), b"\x05")
# spec.validate
self.assertEqual(spec.validate(frame, 5), 5)
self.assertRaises(ValueError, spec.validate, frame, -1)
self.assertRaises(ValueError, spec.validate, frame, 256)
self.assertRaises(TypeError, spec.validate, frame, "foobar")
def testIntegerSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = IntegerSpec("test", 16)
# spec.read
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04"), (258, b"\x03\x04"))
self.assertEqual(spec.read(frame, b"\x01\x02"), (258, b""))
self.assertRaises(EOFError, spec.read, frame, b"")
self.assertRaises(EOFError, spec.read, frame, b"\x01")
# spec.write
self.assertEqual(spec.write(frame, 1), b"\x00\x01")
self.assertEqual(spec.write(frame, 258), b"\x01\x02")
# spec.validate
self.assertEqual(spec.validate(frame, 5), 5)
self.assertRaises(ValueError, spec.validate, frame, -1)
self.assertRaises(ValueError, spec.validate, frame, 65537)
self.assertRaises(ValueError, spec.validate, frame, 65536)
self.assertRaises(TypeError, spec.validate, frame, "foobar")
# Now try specifying an indirect width
spec = IntegerSpec("test", "bits")
# spec.read
frame.bits = 8
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04"), (1, b"\x02\x03\x04"))
self.assertRaises(EOFError, spec.read, frame, b"")
self.assertEqual(spec.read(frame, b"\x01"), (1, b""))
frame.bits = 16
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04"), (258, b"\x03\x04"))
self.assertRaises(EOFError, spec.read, frame, b"")
self.assertRaises(EOFError, spec.read, frame, b"\x01")
# spec.write
frame.bits = 8
self.assertEqual(spec.write(frame, 1), b"\x01")
self.assertRaises(ValueError, spec.write, frame, 258)
frame.bits = 16
self.assertEqual(spec.write(frame, 1), b"\x00\x01")
self.assertEqual(spec.write(frame, 258), b"\x01\x02")
# spec.validate
frame.bits = 8
self.assertEqual(spec.validate(frame, 5), 5)
self.assertRaises(ValueError, spec.validate, frame, -1)
self.assertRaises(ValueError, spec.validate, frame, 256)
self.assertRaises(ValueError, spec.validate, frame, 65536)
self.assertRaises(TypeError, spec.validate, frame, "foobar")
frame.bits = 16
self.assertEqual(spec.validate(frame, 5), 5)
self.assertRaises(ValueError, spec.validate, frame, -1)
self.assertEqual(spec.validate(frame, 256), 256)
self.assertRaises(ValueError, spec.validate, frame, 65536)
self.assertRaises(TypeError, spec.validate, frame, "foobar")
def testSignedIntegerSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = SignedIntegerSpec("test", 16)
# spec.read
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04"), (258, b"\x03\x04"))
self.assertEqual(spec.read(frame, b"\x01\x02"), (258, b""))
self.assertEqual(spec.read(frame, b"\xFF\xFF"), (-1, b""))
self.assertEqual(spec.read(frame, b"\x80\x00"), (-32768, b""))
self.assertRaises(EOFError, spec.read, frame, b"")
self.assertRaises(EOFError, spec.read, frame, b"\x01")
# spec.write
self.assertEqual(spec.write(frame, 1), b"\x00\x01")
self.assertEqual(spec.write(frame, 258), b"\x01\x02")
self.assertEqual(spec.write(frame, -1), b"\xFF\xFF")
self.assertEqual(spec.write(frame, -2), b"\xFF\xFE")
self.assertEqual(spec.write(frame, -32768), b"\x80\x00")
# spec.validate
self.assertEqual(spec.validate(frame, 5), 5)
self.assertEqual(spec.validate(frame, -1), -1)
self.assertEqual(spec.validate(frame, 32767), 32767)
self.assertEqual(spec.validate(frame, -32768), -32768)
self.assertRaises(ValueError, spec.validate, frame, 32768)
self.assertRaises(ValueError, spec.validate, frame, -32769)
self.assertRaises(TypeError, spec.validate, frame, "foobar")
def testRVADIntegerSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = RVADIntegerSpec("test", "bits", signbit=4)
frame.signs = 0
frame.bits = 16
# spec.read
frame.signs = 255
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04"),
(258, b"\x03\x04"))
frame.signs = 16
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04"),
(258, b"\x03\x04"))
frame.signs = 0
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04"),
(-258, b"\x03\x04"))
frame.signs = 239
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04"),
(-258, b"\x03\x04"))
frame.signs = 255
self.assertEqual(spec.read(frame, b"\x01\x02"), (258, b""))
self.assertEqual(spec.read(frame, b"\xFF\xFF"), (65535, b""))
self.assertEqual(spec.read(frame, b"\x80\x00"), (32768, b""))
self.assertRaises(EOFError, spec.read, frame, b"")
self.assertRaises(EOFError, spec.read, frame, b"\x01")
frame.signs = 0
self.assertEqual(spec.read(frame, b"\x01\x02"), (-258, b""))
self.assertEqual(spec.read(frame, b"\xFF\xFF"), (-65535, b""))
self.assertEqual(spec.read(frame, b"\x80\x00"), (-32768, b""))
self.assertRaises(EOFError, spec.read, frame, b"")
self.assertRaises(EOFError, spec.read, frame, b"\x01")
# spec.write
frame.signs = 0
self.assertEqual(spec.write(frame, 1), b"\x00\x01")
self.assertEqual(spec.write(frame, 258), b"\x01\x02")
self.assertEqual(spec.write(frame, 32768), b"\x80\x00")
self.assertEqual(frame.signs, 0) # Write shouldn't update signs
self.assertEqual(spec.write(frame, -1), b"\x00\x01")
self.assertEqual(spec.write(frame, -258), b"\x01\x02")
self.assertEqual(spec.write(frame, -32768), b"\x80\x00")
self.assertEqual(frame.signs, 0)
# spec.validate
frame.signs = 0
self.assertEqual(spec.validate(frame, 5), 5)
self.assertEqual(frame.signs, 16) # Validate updates signs
frame.signs = 0
self.assertEqual(spec.validate(frame, -1), -1)
self.assertEqual(frame.signs, 0)
frame.signs = 0
self.assertEqual(spec.validate(frame, 65535), 65535)
self.assertEqual(frame.signs, 16)
frame.signs = 0
self.assertEqual(spec.validate(frame, -65535), -65535)
self.assertEqual(frame.signs, 0)
frame.signs = 0
self.assertRaises(ValueError, spec.validate, frame, 65536)
self.assertEqual(frame.signs, 16)
frame.signs = 0
self.assertRaises(ValueError, spec.validate, frame, -65536)
self.assertEqual(frame.signs, 0)
self.assertRaises(TypeError, spec.validate, frame, "foobar")
def testVarIntSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = VarIntSpec("test")
# spec.read
self.assertEqual(spec.read(frame, b"\x10\x01\x02\x03"), (258, b"\x03"))
self.assertEqual(spec.read(frame, b"\x10\xFF\xFF"), (65535, b""))
self.assertEqual(spec.read(frame, b"\x08\x05"), (5, b""))
self.assertEqual(spec.read(frame, b"\x01\x05"), (5, b""))
self.assertEqual(spec.read(frame, b"\x02\x05"), (5, b""))
self.assertRaises(EOFError, spec.read, frame, b"")
self.assertRaises(EOFError, spec.read, frame, b"\x08")
self.assertRaises(EOFError, spec.read, frame, b"\x10\x01")
# spec.write
self.assertEqual(spec.write(frame, 0), b"\x20\x00\x00\x00\x00")
self.assertEqual(spec.write(frame, 1), b"\x20\x00\x00\x00\x01")
self.assertEqual(spec.write(frame, 258), b"\x20\x00\x00\x01\x02")
self.assertEqual(spec.write(frame, 1 << 32), b"\x40\x00\x00\x00\x01\x00\x00\x00\x00")
# spec.validate
self.assertEqual(spec.validate(frame, 5), 5)
self.assertEqual(spec.validate(frame, 1 << 32), 1 << 32)
self.assertEqual(spec.validate(frame, 1 << 64 + 3), 1 << 64 + 3)
self.assertRaises(ValueError, spec.validate, frame, -32769)
self.assertRaises(TypeError, spec.validate, frame, "foobar")
def testBinaryDataSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = BinaryDataSpec("test")
# spec.read
self.assertEqual(spec.read(frame, b""), (b"", b""))
self.assertEqual(spec.read(frame, b"\x01"), (b"\x01", b""))
self.assertEqual(spec.read(frame, bytes(range(100))), (bytes(range(100)), b""))
# spec.write
self.assertEqual(spec.write(frame, b""), b"")
self.assertEqual(spec.write(frame, b"\x01\x02"), b"\x01\x02")
self.assertEqual(spec.write(frame, bytes(range(100))), bytes(range(100)))
# spec.validate
self.assertEqual(spec.validate(frame, b""), b"")
self.assertEqual(spec.validate(frame, b"12"), b"12")
self.assertRaises(TypeError, spec.validate, frame, 1)
self.assertRaises(TypeError, spec.validate, frame, [1, 2])
self.assertRaises(TypeError, spec.validate, frame, "foobar")
def testSimpleStringSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = SimpleStringSpec("test", 6)
# spec.read
self.assertEqual(spec.read(frame, b"Foobar"), ("Foobar", b""))
self.assertEqual(spec.read(frame, b"Foobarbaz"), ("Foobar", b"baz"))
self.assertEqual(spec.read(frame, b"F\xF6\xF8b\xE1r"), ("F\u00F6\u00F8b\u00E1r", b""))
# spec.write
self.assertEqual(spec.write(frame, "Foobar"), b"Foobar")
self.assertEqual(spec.write(frame, "F\u00F6\u00F8b\u00E1r"), b"F\xF6\xF8b\xE1r")
# spec.validate
self.assertEqual(spec.validate(frame, "Foobar"), "Foobar")
self.assertEqual(spec.validate(frame, "F\u00F6\u00F8b\u00E1r"), "F\u00F6\u00F8b\u00E1r")
self.assertRaises(TypeError, spec.validate, frame, 1)
self.assertRaises(TypeError, spec.validate, frame, [1, 2])
self.assertRaises(TypeError, spec.validate, frame, b"foobar")
self.assertRaises(UnicodeEncodeError, spec.validate, frame, "\u2011oobar")
def nullstringhelper(self, frame, spec):
# spec.read
self.assertEqual(spec.read(frame, b""), ("", b""))
self.assertEqual(spec.read(frame, b"\x00"), ("", b""))
self.assertEqual(spec.read(frame, b"Foo"), ("Foo", b""))
self.assertEqual(spec.read(frame, b"Foo\x00"), ("Foo", b""))
self.assertEqual(spec.read(frame, b"Foo\x00Bar"), ("Foo", b"Bar"))
self.assertEqual(spec.read(frame, b"F\xF6\xF8b\xE1r\x00Bar"), ("F\u00F6\u00F8b\u00E1r", b"Bar"))
# spec.write
self.assertEqual(spec.write(frame, "Foobar"), b"Foobar\x00")
self.assertEqual(spec.write(frame, "F\u00F6\u00F8b\u00E1r"), b"F\xF6\xF8b\xE1r\x00")
# spec.validate
self.assertEqual(spec.validate(frame, "Foobar"), "Foobar")
self.assertEqual(spec.validate(frame, "F\u00F6\u00F8b\u00E1r"), "F\u00F6\u00F8b\u00E1r")
self.assertRaises(TypeError, spec.validate, frame, 1)
self.assertRaises(TypeError, spec.validate, frame, [1, 2])
self.assertRaises(TypeError, spec.validate, frame, b"foobar")
self.assertRaises(UnicodeEncodeError, spec.validate, frame, "\u2011oobar")
def testNullTerminatedStringSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = NullTerminatedStringSpec("test")
self.nullstringhelper(frame, spec)
self.assertEqual(spec.read(frame, b"\x00\x00"), ("", b"\x00"))
self.assertEqual(spec.read(frame, b"Foo\x00\x00"), ("Foo", b"\x00"))
self.assertEqual(spec.read(frame, b"Foo\x00Bar\x00"), ("Foo", b"Bar\x00"))
self.assertEqual(spec.read(frame, b"\x00Bar\x00"), ("", b"Bar\x00"))
def testURLStringSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = URLStringSpec("test")
self.nullstringhelper(frame, spec)
self.assertEqual(spec.read(frame, b"\x00\x00"), ("", b""))
self.assertEqual(spec.read(frame, b"Foo\x00\x00"), ("Foo", b"\x00"))
self.assertEqual(spec.read(frame, b"Foo\x00Bar\x00"), ("Foo", b"Bar\x00"))
self.assertEqual(spec.read(frame, b"\x00Bar\x00"), ("Bar", b""))
def testEncodingSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = EncodingSpec("test")
# spec.read
self.assertEqual(spec.read(frame, b"\x01\x02"), (1, b"\x02"))
self.assertEqual(spec.read(frame, b"\x01"), (1, b""))
self.assertRaises(EOFError, spec.read, frame, b"")
self.assertRaises(FrameError, spec.read, frame, b"\x04")
# spec.write
self.assertEqual(spec.write(frame, 3), b"\x03")
# spec.validate
self.assertEqual(spec.validate(frame, 3), 3)
self.assertEqual(spec.validate(frame, "utf8"), 3)
self.assertEqual(spec.validate(frame, "UTF-8"), 3)
self.assertRaises(ValueError, spec.validate, frame, -1)
self.assertRaises(ValueError, spec.validate, frame, 4)
self.assertRaises(ValueError, spec.validate, frame, "foobar")
self.assertRaises(TypeError, spec.validate, frame, 1.5)
def testEncodedStringSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = EncodedStringSpec("test")
# spec.read
self.assertEqual(spec.read(frame, b""), ("", b""))
self.assertEqual(spec.read(frame, b"Foo"), ("Foo", b""))
self.assertEqual(spec.read(frame, b"Foobar\x00"), ("Foobar", b""))
self.assertEqual(spec.read(frame, b"\x00Foobar"), ("", b"Foobar"))
frame.encoding = "utf-16-be"
self.assertEqual(spec.read(frame, b"\x00F\x00o\x00o"), ("Foo", b""))
self.assertEqual(spec.read(frame, b"\x00F\x00o\x00o\x00\x00"), ("Foo", b""))
self.assertEqual(spec.read(frame, b"\x00F\x01\x00\x00a"), ("F\u0100a", b""))
# Broken terminal character
self.assertRaises(EOFError, spec.read, frame, b"\x00F\x00")
# spec.write
frame.encoding = "latin-1"
self.assertEqual(spec.write(frame, ""), b"\x00")
self.assertEqual(spec.write(frame, "Foobar"), b"Foobar\x00")
self.assertRaises(UnicodeEncodeError, spec.write, frame, "\u0100")
frame.encoding = "utf-8"
self.assertEqual(spec.write(frame, ""), b"\x00")
self.assertEqual(spec.write(frame, "Foobar"), b"Foobar\x00")
self.assertEqual(spec.write(frame, "\u0100"), b"\xC4\x80\x00")
frame.encoding = "utf-16"
self.assertTrue(spec.write(frame, "") in [b"\xFE\xFF\x00\x00", b"\xFF\xFE\x00\x00"])
self.assertTrue(spec.write(frame, "B") in [b"\xFE\xFF\x00B\x00\x00", b"\xFF\xFEB\x00\x00\x00"])
frame.encoding = "utf-16-be"
self.assertEqual(spec.write(frame, ""), b"\x00\x00")
self.assertEqual(spec.write(frame, "B"), b"\x00B\x00\x00")
# spec.validate
for encoding in ["latin-1", "utf-16", "utf-16-be", "utf-8"]:
frame.encoding = encoding
self.assertEqual(spec.validate(frame, ""), "")
self.assertEqual(spec.validate(frame, "foo"), "foo")
self.assertEqual(spec.validate(frame, "\xF0"), "\xF0")
self.assertRaises(TypeError, spec.validate, frame, -1)
self.assertRaises(TypeError, spec.validate, frame, 4)
self.assertRaises(TypeError, spec.validate, frame, 3.4)
frame.encoding = "latin-1"
self.assertRaises(UnicodeEncodeError, spec.validate, frame, "\u0100")
def testSequenceSpec(self):
frame = object()
spec = SequenceSpec("test", NullTerminatedStringSpec("text"))
# spec.read
self.assertEqual(spec.read(frame, b""), ([], b""))
self.assertEqual(spec.read(frame, b"Foo"), (["Foo"], b""))
self.assertEqual(spec.read(frame, b"Foo\x00Bar\x00"), (["Foo", "Bar"], b""))
self.assertEqual(spec.read(frame, b"\x00Foobar"), (["", "Foobar"], b""))
self.assertEqual(spec.read(frame, b"\x00" * 10), ([""] * 10, b""))
# spec.write
self.assertEqual(spec.write(frame, ""), b"\x00")
self.assertEqual(spec.write(frame, "Foobar"), b"Foobar\x00")
self.assertEqual(spec.write(frame, [""] * 10), b"\x00" * 10)
self.assertEqual(spec.write(frame, ["Foo"] * 10), b"Foo\x00" * 10)
# spec.validate
self.assertEqual(spec.validate(frame, ""), [""])
self.assertEqual(spec.validate(frame, [""]), [""])
self.assertEqual(spec.validate(frame, "foo"), ["foo"])
self.assertEqual(spec.validate(frame, ["foo"]), ["foo"])
self.assertEqual(spec.validate(frame, ["foo"] * 10), ["foo"] * 10)
self.assertRaises(TypeError, spec.validate, frame, -1)
self.assertRaises(TypeError, spec.validate, frame, 4)
self.assertRaises(TypeError, spec.validate, frame, 3.4)
def testMultiSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = MultiSpec("test",
NullTerminatedStringSpec("text"),
IntegerSpec("value", 16))
# spec.read
self.assertEqual(spec.read(frame, b""), ([], b""))
self.assertRaises(EOFError, spec.read, frame, b"Foo")
self.assertEqual(spec.read(frame, b"Foo\x00\x01\x02"),
([("Foo", 258)], b""))
self.assertEqual(spec.read(frame, b"Foo\x00\x01\x02Bar\x00\x02\x03"),
([("Foo", 258), ("Bar", 515)], b""))
self.assertEqual(spec.read(frame, b"\x00\x01\x02Foobar\x00\x02\x03"),
([("", 258), ("Foobar", 515)], b""))
# spec.write
self.assertEqual(spec.write(frame, []), b"")
self.assertEqual(spec.write(frame, [("Foo", 1)]), b"Foo\x00\x00\x01")
self.assertEqual(spec.write(frame, [("Foo", 1), ("Bar", 2)]),
b"Foo\x00\x00\x01Bar\x00\x00\x02")
self.assertEqual(spec.write(frame, [("Foo", 1), ("Bar", 2)] * 10),
b"Foo\x00\x00\x01Bar\x00\x00\x02" * 10)
# spec.validate
self.assertEqual(spec.validate(frame, []), [])
self.assertEqual(spec.validate(frame, [["Foo", 1]] * 10), [("Foo", 1)] * 10)
self.assertRaises(TypeError, spec.validate, frame, 1)
self.assertRaises(TypeError, spec.validate, frame, "foo")
self.assertRaises(ValueError, spec.validate, frame, [["Foo", 2, 2]])
def testASPISpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = ASPISpec("test")
# spec.read
frame.b = 1
frame.N = 5
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04\x05\x06\x07"),
([1, 2, 3, 4, 5], b"\x06\x07"))
self.assertRaises(EOFError, spec.read, frame, b"\x01\x02")
frame.b = 2
frame.N = 2
self.assertEqual(spec.read(frame, b"\x01\x02\x03\x04\x05\x06\x07"),
([258, 772], b"\x05\x06\x07"))
self.assertRaises(EOFError, spec.read, frame, b"\x01\x02\x03")
# spec.write
frame.b = 1
frame.N = 4
self.assertEqual(spec.write(frame, [1, 2, 3, 4]), b"\x01\x02\x03\x04")
frame.b = 2
self.assertEqual(spec.write(frame, [1, 2, 3, 4]), b"\x00\x01\x00\x02\x00\x03\x00\x04")
# spec.validate
frame.N = 4
frame.b = 1
self.assertRaises(ValueError, spec.validate, frame, [])
self.assertEqual(spec.validate(frame, [1, 2, 3, 4]), [1, 2, 3, 4])
self.assertEqual(spec.validate(frame, b"\x01\x02\x03\x04"), [1, 2, 3, 4])
self.assertRaises(TypeError, spec.validate, frame, 1)
self.assertRaises(TypeError, spec.validate, frame, "1234")
self.assertRaises(ValueError, spec.validate, frame, [1, 2, 3])
self.assertRaises(ValueError, spec.validate, frame, [1, 2, 3, 4, 5])
def testPictureTypeSpec(self):
frame = TextFrame(frameid="TEST", encoding=3)
spec = PictureTypeSpec("test")
# spec.read
self.assertEqual(spec.read(frame, b"\x01\x02"), (1, b"\x02"))
self.assertEqual(spec.read(frame, b"\x01"), (1, b""))
self.assertRaises(EOFError, spec.read, frame, b"")
# spec.write
self.assertEqual(spec.write(frame, 3), b"\x03")
# spec.validate
self.assertEqual(spec.validate(frame, 3), 3)
self.assertEqual(spec.validate(frame, "Front Cover"), 3)
self.assertEqual(spec.validate(frame, "front cover"), 3)
self.assertRaises(ValueError, spec.validate, frame, -1)
self.assertRaises(ValueError, spec.validate, frame, 21)
self.assertRaises(ValueError, spec.validate, frame, "foobar")
self.assertRaises(TypeError, spec.validate, frame, 1.5)
suite = unittest.TestLoader().loadTestsFromTestCase(SpecTestCase)
if __name__ == "__main__":
warnings.simplefilter("always", stagger.Warning)
unittest.main(defaultTest="suite")
|
bsd-2-clause
|
yoki/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/filesystem.py
|
126
|
9517
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrapper object for the file system / source tree."""
import codecs
import errno
import exceptions
import glob
import hashlib
import os
import shutil
import sys
import tempfile
import time
class FileSystem(object):
"""FileSystem interface for webkitpy.
Unless otherwise noted, all paths are allowed to be either absolute
or relative."""
sep = os.sep
pardir = os.pardir
def abspath(self, path):
return os.path.abspath(path)
def realpath(self, path):
return os.path.realpath(path)
def path_to_module(self, module_name):
"""A wrapper for all calls to __file__ to allow easy unit testing."""
# FIXME: This is the only use of sys in this file. It's possible this function should move elsewhere.
return sys.modules[module_name].__file__ # __file__ is always an absolute path.
def expanduser(self, path):
return os.path.expanduser(path)
def basename(self, path):
return os.path.basename(path)
def chdir(self, path):
return os.chdir(path)
def copyfile(self, source, destination):
shutil.copyfile(source, destination)
def dirname(self, path):
return os.path.dirname(path)
def exists(self, path):
return os.path.exists(path)
def files_under(self, path, dirs_to_skip=[], file_filter=None):
"""Return the list of all files under the given path in topdown order.
Args:
dirs_to_skip: a list of directories to skip over during the
traversal (e.g., .svn, resources, etc.)
file_filter: if not None, the filter will be invoked
with the filesystem object and the dirname and basename of
each file found. The file is included in the result if the
callback returns True.
"""
def filter_all(fs, dirpath, basename):
return True
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
if file_filter(self, self.dirname(path), self.basename(path)):
files.append(path)
return files
if self.basename(path) in dirs_to_skip:
return []
for (dirpath, dirnames, filenames) in os.walk(path):
for d in dirs_to_skip:
if d in dirnames:
dirnames.remove(d)
for filename in filenames:
if file_filter(self, dirpath, filename):
files.append(self.join(dirpath, filename))
return files
def getcwd(self):
return os.getcwd()
def glob(self, path):
return glob.glob(path)
def isabs(self, path):
return os.path.isabs(path)
def isfile(self, path):
return os.path.isfile(path)
def isdir(self, path):
return os.path.isdir(path)
def join(self, *comps):
return os.path.join(*comps)
def listdir(self, path):
return os.listdir(path)
def mkdtemp(self, **kwargs):
"""Create and return a uniquely named directory.
This is like tempfile.mkdtemp, but if used in a with statement
the directory will self-delete at the end of the block (if the
directory is empty; non-empty directories raise errors). The
directory can be safely deleted inside the block as well, if so
desired.
Note that the object returned is not a string and does not support all of the string
methods. If you need a string, coerce the object to a string and go from there.
"""
class TemporaryDirectory(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
self._directory_path = tempfile.mkdtemp(**self._kwargs)
def __str__(self):
return self._directory_path
def __enter__(self):
return self._directory_path
def __exit__(self, type, value, traceback):
# Only self-delete if necessary.
# FIXME: Should we delete non-empty directories?
if os.path.exists(self._directory_path):
os.rmdir(self._directory_path)
return TemporaryDirectory(**kwargs)
def maybe_make_directory(self, *path):
"""Create the specified directory if it doesn't already exist."""
try:
os.makedirs(self.join(*path))
except OSError, e:
if e.errno != errno.EEXIST:
raise
def move(self, source, destination):
shutil.move(source, destination)
def mtime(self, path):
return os.stat(path).st_mtime
def normpath(self, path):
return os.path.normpath(path)
def open_binary_tempfile(self, suffix):
"""Create, open, and return a binary temp file. Returns a tuple of the file and the name."""
temp_fd, temp_name = tempfile.mkstemp(suffix)
f = os.fdopen(temp_fd, 'wb')
return f, temp_name
def open_binary_file_for_reading(self, path):
return codecs.open(path, 'rb')
def read_binary_file(self, path):
"""Return the contents of the file at the given path as a byte string."""
with file(path, 'rb') as f:
return f.read()
def write_binary_file(self, path, contents):
with file(path, 'wb') as f:
f.write(contents)
def open_text_file_for_reading(self, path):
# Note: There appears to be an issue with the returned file objects
# not being seekable. See http://stackoverflow.com/questions/1510188/can-seek-and-tell-work-with-utf-8-encoded-documents-in-python .
return codecs.open(path, 'r', 'utf8')
def open_text_file_for_writing(self, path):
return codecs.open(path, 'w', 'utf8')
def read_text_file(self, path):
"""Return the contents of the file at the given path as a Unicode string.
The file is read assuming it is a UTF-8 encoded file with no BOM."""
with codecs.open(path, 'r', 'utf8') as f:
return f.read()
def write_text_file(self, path, contents):
"""Write the contents to the file at the given location.
The file is written encoded as UTF-8 with no BOM."""
with codecs.open(path, 'w', 'utf8') as f:
f.write(contents)
def sha1(self, path):
contents = self.read_binary_file(path)
return hashlib.sha1(contents).hexdigest()
def relpath(self, path, start='.'):
return os.path.relpath(path, start)
class _WindowsError(exceptions.OSError):
"""Fake exception for Linux and Mac."""
pass
def remove(self, path, osremove=os.remove):
"""On Windows, if a process was recently killed and it held on to a
file, the OS will hold on to the file for a short while. This makes
attempts to delete the file fail. To work around that, this method
will retry for a few seconds until Windows is done with the file."""
try:
exceptions.WindowsError
except AttributeError:
exceptions.WindowsError = FileSystem._WindowsError
retry_timeout_sec = 3.0
sleep_interval = 0.1
while True:
try:
osremove(path)
return True
except exceptions.WindowsError, e:
time.sleep(sleep_interval)
retry_timeout_sec -= sleep_interval
if retry_timeout_sec < 0:
raise e
def rmtree(self, path):
"""Delete the directory rooted at path, whether empty or not."""
shutil.rmtree(path, ignore_errors=True)
def copytree(self, source, destination):
shutil.copytree(source, destination)
def split(self, path):
"""Return (dirname, basename + '.' + ext)"""
return os.path.split(path)
def splitext(self, path):
"""Return (dirname + os.sep + basename, '.' + ext)"""
return os.path.splitext(path)
|
bsd-3-clause
|
RubenKelevra/rethinkdb
|
test/regression/issue_852.py
|
36
|
3132
|
#!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
from __future__ import print_function
import sys, os, time
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, rdb_workload_common, scenario_common, utils, vcoptparse
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
numNodes = 2
r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()
print("Starting cluster of %d servers (%.2fs)" % (numNodes, time.time() - startTime))
with driver.Cluster(initial_servers=numNodes, output_folder='.', wait_until_ready=True, command_prefix=command_prefix, extra_options=serve_options) as cluster:
server1 = cluster[0]
server2 = cluster[1]
conn = r.connect(host=server1.host, port=server1.driver_port)
print("Creating db/table %s/%s (%.2fs)" % (dbName, tableName, time.time() - startTime))
if dbName not in r.db_list().run(conn):
r.db_create(dbName).run(conn)
if tableName in r.db(dbName).table_list().run(conn):
r.db(dbName).table_drop(tableName).run(conn)
r.db(dbName).table_create(tableName).run(conn)
print("Inserting some data (%.2fs)" % (time.time() - startTime))
rdb_workload_common.insert_many(host=server1.host, port=server1.driver_port, database=dbName, table=tableName, count=10000)
cluster.check()
print("Splitting into two shards (%.2fs)" % (time.time() - startTime))
shards = [
{'primary_replica':server1.name, 'replicas':[server1.name, server2.name]},
{'primary_replica':server2.name, 'replicas':[server2.name, server1.name]}
]
res = r.db(dbName).table(tableName).config().update({'shards':shards}).run(conn)
assert res['errors'] == 0, 'Errors after splitting into two shards: %s' % repr(res)
r.db(dbName).wait().run(conn)
cluster.check()
print("Changing the primary replica (%.2fs)" % (time.time() - startTime))
shards = [
{'primary_replica':server2.name, 'replicas':[server2.name, server1.name]},
{'primary_replica':server1.name, 'replicas':[server1.name, server2.name]}
]
assert r.db(dbName).table(tableName).config().update({'shards':shards}).run(conn)['errors'] == 0
r.db(dbName).wait().run(conn)
cluster.check()
print("Changing it back (%.2fs)" % (time.time() - startTime))
shards = [
{'primary_replica':server2.name, 'replicas':[server2.name, server1.name]},
{'primary_replica':server1.name, 'replicas':[server1.name, server2.name]}
]
assert r.db(dbName).table(tableName).config().update({'shards':shards}).run(conn)['errors'] == 0
print("Waiting for it to take effect (%.2fs)" % (time.time() - startTime))
r.db(dbName).wait().run(conn)
cluster.check()
assert len(list(r.db('rethinkdb').table('current_issues').run(conn))) == 0
print("Cleaning up (%.2fs)" % (time.time() - startTime))
print("Done. (%.2fs)" % (time.time() - startTime))
|
agpl-3.0
|
AkademieOlympia/sympy
|
sympy/geometry/line3d.py
|
55
|
40385
|
"""Line-like geometrical entities.
Contains
========
LinearEntity3D
Line3D
Ray3D
Segment3D
"""
from __future__ import division, print_function
from sympy.core import Dummy, S, nan
from sympy.functions.elementary.trigonometric import acos
from sympy.simplify.simplify import simplify
from sympy.solvers.solveset import solveset, linsolve
from sympy.geometry.exceptions import GeometryError
from sympy.core.compatibility import is_sequence, range
from .entity import GeometryEntity
from .point import Point3D
from .util import _symbol
class LinearEntity3D(GeometryEntity):
"""An base class for all linear entities (line, ray and segment)
in a 3-dimensional Euclidean space.
Attributes
==========
p1
p2
direction_ratio
direction_cosine
points
Notes
=====
This is a base class and is not meant to be instantiated.
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point3D(p1)
p2 = Point3D(p2)
if p1 == p2:
# if it makes sense to return a Point, handle in subclass
raise ValueError(
"%s.__new__ requires two unique Points." % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
@property
def p1(self):
"""The first defining point of a linear entity.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.p1
Point3D(0, 0, 0)
"""
return self.args[0]
@property
def p2(self):
"""The second defining point of a linear entity.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.p2
Point3D(5, 3, 1)
"""
return self.args[1]
@property
def direction_ratio(self):
"""The direction ratio of a given line in 3D.
See Also
========
sympy.geometry.line.Line.equation
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.direction_ratio
[5, 3, 1]
"""
p1, p2 = self.points
return p1.direction_ratio(p2)
@property
def direction_cosine(self):
"""The normalized direction ratio of a given line in 3D.
See Also
========
sympy.geometry.line.Line.equation
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.direction_cosine
[sqrt(35)/7, 3*sqrt(35)/35, sqrt(35)/35]
>>> sum(i**2 for i in _)
1
"""
p1, p2 = self.points
return p1.direction_cosine(p2)
@property
def length(self):
"""
The length of the line.
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 5, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.length
oo
"""
return S.Infinity
@property
def points(self):
"""The two points used to define this linear entity.
Returns
=======
points : tuple of Points
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 11, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.points
(Point3D(0, 0, 0), Point3D(5, 11, 1))
"""
return (self.p1, self.p2)
@staticmethod
def are_concurrent(*lines):
"""Is a sequence of linear entities concurrent?
Two or more linear entities are concurrent if they all
intersect at a single point.
Parameters
==========
lines : a sequence of linear entities.
Returns
=======
True : if the set of linear entities are concurrent,
False : otherwise.
Notes
=====
Simply take the first two lines and find their intersection.
If there is no intersection, then the first two lines were
parallel and had no intersection so concurrency is impossible
amongst the whole set. Otherwise, check to see if the
intersection point of the first two lines is a member on
the rest of the lines. If so, the lines are concurrent.
See Also
========
sympy.geometry.util.intersection
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 5, 2)
>>> p3, p4 = Point3D(-2, -2, -2), Point3D(0, 2, 1)
>>> l1, l2, l3 = Line3D(p1, p2), Line3D(p1, p3), Line3D(p1, p4)
>>> Line3D.are_concurrent(l1, l2, l3)
True
>>> l4 = Line3D(p2, p3)
>>> Line3D.are_concurrent(l2, l3, l4)
False
"""
# Concurrency requires intersection at a single point; One linear
# entity cannot be concurrent.
if len(lines) <= 1:
return False
try:
# Get the intersection (if parallel)
p = lines[0].intersection(lines[1])
if len(p) == 0:
return False
# Make sure the intersection is on every linear entity
for line in lines[2:]:
if p[0] not in line:
return False
return True
except AttributeError:
return False
def is_parallel(l1, l2):
"""Are two linear entities parallel?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are parallel,
False : otherwise.
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 4, 5)
>>> p3, p4 = Point3D(2, 1, 1), Point3D(8, 9, 11)
>>> l1, l2 = Line3D(p1, p2), Line3D(p3, p4)
>>> Line3D.is_parallel(l1, l2)
True
>>> p5 = Point3D(6, 6, 6)
>>> l3 = Line3D(p3, p5)
>>> Line3D.is_parallel(l1, l3)
False
"""
if l1 == l2:
return True
a = l1.direction_cosine
b = l2.direction_cosine
# lines are parallel if the direction_cosines are the same or
# differ by a constant
rat = set()
for i, j in zip(a, b):
if i and j:
rat.add(i/j)
if len(rat) > 1:
return False
elif i or j:
return False
return True
def is_perpendicular(l1, l2):
"""Are two linear entities perpendicular?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are perpendicular,
False : otherwise.
See Also
========
direction_ratio
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(-1, 2, 0)
>>> l1, l2 = Line3D(p1, p2), Line3D(p2, p3)
>>> l1.is_perpendicular(l2)
False
>>> p4 = Point3D(5, 3, 7)
>>> l3 = Line3D(p1, p4)
>>> l1.is_perpendicular(l3)
False
"""
a = sum([i*j for i, j in zip(l1.direction_ratio, l2.direction_ratio)])
if a == 0:
return True
else:
return False
def angle_between(l1, l2):
"""The angle formed between the two linear entities.
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
angle : angle in radians
Notes
=====
From the dot product of vectors v1 and v2 it is known that:
``dot(v1, v2) = |v1|*|v2|*cos(A)``
where A is the angle formed between the two vectors. We can
get the directional vectors of the two lines and readily
find the angle between the two using the above formula.
See Also
========
is_perpendicular
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(-1, 2, 0)
>>> l1, l2 = Line3D(p1, p2), Line3D(p2, p3)
>>> l1.angle_between(l2)
acos(-sqrt(2)/3)
"""
v1 = l1.p2 - l1.p1
v2 = l2.p2 - l2.p1
return acos(v1.dot(v2)/(abs(v1)*abs(v2)))
def parallel_line(self, p):
"""Create a new Line parallel to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point3D
Returns
=======
line : Line3D
See Also
========
is_parallel
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(2, 3, 4), Point3D(-2, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> l2 = l1.parallel_line(p3)
>>> p3 in l2
True
>>> l1.is_parallel(l2)
True
"""
d = self.direction_ratio
return Line3D(p, direction_ratio=d)
def perpendicular_line(self, p):
"""Create a new Line perpendicular to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point3D
Returns
=======
line : Line3D
See Also
========
is_perpendicular, perpendicular_segment
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(2, 3, 4), Point3D(-2, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
"""
p = Point3D(p)
if p in self:
raise NotImplementedError("Given point should not be on the line")
t = Dummy()
a = self.arbitrary_point(t)
b = [i - j for i, j in zip(p.args, a.args)]
c = sum([i*j for i, j in zip(b, self.direction_ratio)])
d = list(solveset(c, t))
e = a.subs(t, d[0])
return Line3D(p, e)
def perpendicular_segment(self, p):
"""Create a perpendicular line segment from `p` to this line.
The enpoints of the segment are ``p`` and the closest point in
the line containing self. (If self is not a line, the point might
not be in self.)
Parameters
==========
p : Point3D
Returns
=======
segment : Segment3D
Notes
=====
Returns `p` itself if `p` is on this linear entity.
See Also
========
perpendicular_line
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> s1 = l1.perpendicular_segment(p3)
>>> l1.is_perpendicular(s1)
True
>>> p3 in s1
True
>>> l1.perpendicular_segment(Point3D(4, 0, 0))
Segment3D(Point3D(4/3, 4/3, 4/3), Point3D(4, 0, 0))
"""
p = Point3D(p)
if p in self:
raise NotImplementedError("Given point should not be on the line")
t = Dummy()
a = self.arbitrary_point(t)
b = [i - j for i, j in zip(p.args, a.args)]
c = sum([i*j for i, j in zip(b, self.direction_ratio)])
d = list(solveset(c, t))
e = a.subs(t, d[0])
return Segment3D(p, e)
def projection(self, o):
"""Project a point, line, ray, or segment onto this linear entity.
Parameters
==========
other : Point or LinearEntity (Line, Ray, Segment)
Returns
=======
projection : Point or LinearEntity (Line, Ray, Segment)
The return type matches the type of the parameter ``other``.
Raises
======
GeometryError
When method is unable to perform projection.
Notes
=====
A projection involves taking the two points that define
the linear entity and projecting those points onto a
Line and then reforming the linear entity using these
projections.
A point P is projected onto a line L by finding the point
on L that is closest to P. This point is the intersection
of L and the line perpendicular to L that passes through P.
See Also
========
sympy.geometry.point.Point3D, perpendicular_line
Examples
========
>>> from sympy import Point3D, Line3D, Segment3D, Rational
>>> p1, p2, p3 = Point3D(0, 0, 1), Point3D(1, 1, 2), Point3D(2, 0, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.projection(p3)
Point3D(2/3, 2/3, 5/3)
>>> p4, p5 = Point3D(10, 0, 1), Point3D(12, 1, 3)
>>> s1 = Segment3D(p4, p5)
>>> l1.projection(s1)
[Segment3D(Point3D(10/3, 10/3, 13/3), Point3D(5, 5, 6))]
"""
tline = Line3D(self.p1, self.p2)
def _project(p):
"""Project a point onto the line representing self."""
if p in tline:
return p
l1 = tline.perpendicular_line(p)
return tline.intersection(l1)[0]
projected = None
if isinstance(o, Point3D):
return _project(o)
elif isinstance(o, LinearEntity3D):
n_p1 = _project(o.p1)
n_p2 = _project(o.p2)
if n_p1 == n_p2:
projected = n_p1
else:
projected = o.__class__(n_p1, n_p2)
# Didn't know how to project so raise an error
if projected is None:
n1 = self.__class__.__name__
n2 = o.__class__.__name__
raise GeometryError(
"Do not know how to project %s onto %s" % (n2, n1))
return self.intersection(projected)
def intersection(self, o):
"""The intersection with another geometrical entity.
Parameters
==========
o : Point or LinearEntity3D
Returns
=======
intersection : list of geometrical entities
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Line3D, Segment3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(7, 7, 7)
>>> l1 = Line3D(p1, p2)
>>> l1.intersection(p3)
[Point3D(7, 7, 7)]
>>> l1 = Line3D(Point3D(4,19,12), Point3D(5,25,17))
>>> l2 = Line3D(Point3D(-3, -15, -19), direction_ratio=[2,8,8])
>>> l1.intersection(l2)
[Point3D(1, 1, -3)]
>>> p6, p7 = Point3D(0, 5, 2), Point3D(2, 6, 3)
>>> s1 = Segment3D(p6, p7)
>>> l1.intersection(s1)
[]
"""
if isinstance(o, Point3D):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity3D):
if self == o:
return [self]
elif self.is_parallel(o):
if isinstance(self, Line3D):
if o.p1 in self:
return [o]
return []
elif isinstance(self, Ray3D):
if isinstance(o, Ray3D):
# case 1, rays in the same direction
if self.xdirection == o.xdirection and \
self.ydirection == o.ydirection and \
self.zdirection == o.zdirection:
return [self] if (self.source in o) else [o]
# case 2, rays in the opposite directions
else:
if o.source in self:
if self.source == o.source:
return [self.source]
return [Segment3D(o.source, self.source)]
return []
elif isinstance(o, Segment3D):
if o.p1 in self:
if o.p2 in self:
return [o]
return [Segment3D(o.p1, self.source)]
elif o.p2 in self:
return [Segment3D(o.p2, self.source)]
return []
elif isinstance(self, Segment3D):
if isinstance(o, Segment3D):
# A reminder that the points of Segments are ordered
# in such a way that the following works. See
# Segment3D.__new__ for details on the ordering.
if self.p1 not in o:
if self.p2 not in o:
# Neither of the endpoints are in o so either
# o is contained in this segment or it isn't
if o in self:
return [o]
return []
else:
# p1 not in o but p2 is. Either there is a
# segment as an intersection, or they only
# intersect at an endpoint
if self.p2 == o.p1:
return [o.p1]
return [Segment3D(o.p1, self.p2)]
elif self.p2 not in o:
# p2 not in o but p1 is. Either there is a
# segment as an intersection, or they only
# intersect at an endpoint
if self.p1 == o.p2:
return [o.p2]
return [Segment3D(o.p2, self.p1)]
# Both points of self in o so the whole segment
# is in o
return [self]
else: # unrecognized LinearEntity
raise NotImplementedError
else:
# If the lines are not parallel then solve their arbitrary points
# to obtain the point of intersection
t = t1, t2 = Dummy(), Dummy()
a = self.arbitrary_point(t1)
b = o.arbitrary_point(t2)
dx = a.x - b.x
c = linsolve([dx, a.y - b.y], t).args[0]
d = linsolve([dx, a.z - b.z], t).args[0]
if len(c.free_symbols) == 1 and len(d.free_symbols) == 1:
return []
e = a.subs(t1, c[0])
if e in self and e in o:
return [e]
else:
return []
return o.intersection(self)
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the Line.
Parameters
==========
parameter : str, optional
The name of the parameter which will be used for the parametric
point. The default value is 't'. When this parameter is 0, the
first point used to define the line will be returned, and when
it is 1 the second point will be returned.
Returns
=======
point : Point3D
Raises
======
ValueError
When ``parameter`` already appears in the Line's definition.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.arbitrary_point()
Point3D(4*t + 1, 3*t, t)
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name)
x = simplify(self.p1.x + t*(self.p2.x - self.p1.x))
y = simplify(self.p1.y + t*(self.p2.y - self.p1.y))
z = simplify(self.p1.z + t*(self.p2.z - self.p1.z))
return Point3D(x, y, z)
def is_similar(self, other):
"""
Return True if self and other are contained in the same line.
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(2, 2, 2)
>>> l1 = Line3D(p1, p2)
>>> l2 = Line3D(p1, p3)
>>> l1.is_similar(l2)
True
"""
if isinstance(other, Line3D):
if self.direction_cosine == other.direction_cosine and other.p1 in self:
return True
else:
return False
raise NotImplementedError()
def __contains__(self, other):
"""Return a definitive answer or else raise an error if it cannot
be determined that other is on the boundaries of self."""
result = self.contains(other)
if result is not None:
return result
else:
raise Undecidable(
"can't decide whether '%s' contains '%s'" % (self, other))
def contains(self, other):
"""Subclasses should implement this method and should return
True if other is on the boundaries of self;
False if not on the boundaries of self;
None if a determination cannot be made."""
raise NotImplementedError()
class Line3D(LinearEntity3D):
"""An infinite 3D line in space.
A line is declared with two distinct points or a point and direction_ratio
as defined using keyword `direction_ratio`.
Parameters
==========
p1 : Point3D
pt : Point3D
direction_ratio : list
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> import sympy
>>> from sympy import Point3D
>>> from sympy.abc import L
>>> from sympy.geometry import Line3D, Segment3D
>>> L = Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L
Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L.points
(Point3D(2, 3, 4), Point3D(3, 5, 1))
"""
def __new__(cls, p1, pt=None, direction_ratio=[], **kwargs):
if isinstance(p1, LinearEntity3D):
p1, pt = p1.args
else:
p1 = Point3D(p1)
if pt is not None and len(direction_ratio) == 0:
pt = Point3D(pt)
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError('A 2nd Point or keyword "direction_ratio" must '
'be used.')
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of line. Gives
values that will produce a line that is +/- 5 units long (where a
unit is the distance between the two points that define the line).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.plot_interval()
[t, -5, 5]
"""
t = _symbol(parameter)
return [t, -5, 5]
def equation(self, x='x', y='y', z='z', k='k'):
"""The equation of the line in 3D
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
z : str, optional
The name to use for the x-axis, default value is 'z'.
Returns
=======
equation : tuple
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 0)
>>> l1 = Line3D(p1, p2)
>>> l1.equation()
(x/4 - 1/4, y/3, zoo*z, k)
"""
x, y, z, k = _symbol(x), _symbol(y), _symbol(z), _symbol(k)
p1, p2 = self.points
a = p1.direction_ratio(p2)
return (((x - p1.x)/a[0]), ((y - p1.y)/a[1]),
((z - p1.z)/a[2]), k)
def contains(self, o):
"""Return True if o is on this Line, or False otherwise.
Examples
========
>>> from sympy import Line3D
>>> a = (0, 0, 0)
>>> b = (1, 1, 1)
>>> c = (2, 2, 2)
>>> l1 = Line3D(a, b)
>>> l2 = Line3D(b, a)
>>> l1 == l2
False
>>> l1 in l2
True
"""
if is_sequence(o):
o = Point3D(o)
if isinstance(o, Point3D):
sym = list(map(Dummy, 'xyz'))
eq = self.equation(*sym)
a = [eq[i].subs(sym[i], o.args[i]) for i in range(3)]
a = [i for i in a if i != nan]
if len(a) == 1:
return True
first = a.pop(0)
for i in a:
rv = first.equals(i)
if not rv:
return rv
return True
elif not isinstance(o, LinearEntity3D):
return False
elif isinstance(o, Line3D):
return all(i in self for i in o.points)
def distance(self, o):
"""
Finds the shortest distance between a line and a point.
Raises
======
NotImplementedError is raised if o is not an instance of Point3D
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 1)
>>> s = Line3D(p1, p2)
>>> s.distance(Point3D(-1, 1, 1))
2*sqrt(6)/3
>>> s.distance((-1, 1, 1))
2*sqrt(6)/3
"""
if not isinstance(o, Point3D):
if is_sequence(o):
o = Point3D(o)
if o in self:
return S.Zero
a = self.perpendicular_segment(o).length
return a
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Line3D):
return False
return Point3D.are_collinear(self.p1, other.p1, self.p2, other.p2)
class Ray3D(LinearEntity3D):
"""
A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point3D
The source of the Ray
p2 : Point or a direction vector
direction_ratio: Determines the direction in which the Ray propagates.
Attributes
==========
source
xdirection
ydirection
zdirection
See Also
========
sympy.geometry.point.Point3D, Line3D
Examples
========
>>> import sympy
>>> from sympy import Point3D, pi
>>> from sympy.abc import r
>>> from sympy.geometry import Ray3D
>>> r = Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r
Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r.points
(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r.source
Point3D(2, 3, 4)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.direction_ratio
[1, 2, -4]
"""
def __new__(cls, p1, pt=None, direction_ratio=[], **kwargs):
if isinstance(p1, LinearEntity3D):
p1, pt = p1.args
else:
p1 = Point3D(p1)
if pt is not None and len(direction_ratio) == 0:
pt = Point3D(pt)
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError('A 2nd Point or keyword "direction_ratio" must'
'be used.')
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
@property
def source(self):
"""The point from which the ray emanates.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 1, 5)
>>> r1 = Ray3D(p1, p2)
>>> r1.source
Point3D(0, 0, 0)
"""
return self.p1
@property
def xdirection(self):
"""The x direction of the ray.
Positive infinity if the ray points in the positive x direction,
negative infinity if the ray points in the negative x direction,
or 0 if the ray is vertical.
See Also
========
ydirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, -1, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.xdirection
oo
>>> r2.xdirection
0
"""
if self.p1.x < self.p2.x:
return S.Infinity
elif self.p1.x == self.p2.x:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""The y direction of the ray.
Positive infinity if the ray points in the positive y direction,
negative infinity if the ray points in the negative y direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
"""
if self.p1.y < self.p2.y:
return S.Infinity
elif self.p1.y == self.p2.y:
return S.Zero
else:
return S.NegativeInfinity
@property
def zdirection(self):
"""The z direction of the ray.
Positive infinity if the ray points in the positive z direction,
negative infinity if the ray points in the negative z direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
>>> r2.zdirection
0
"""
if self.p1.z < self.p2.z:
return S.Infinity
elif self.p1.z == self.p2.z:
return S.Zero
else:
return S.NegativeInfinity
def distance(self, o):
"""
Finds the shortest distance between the ray and a point.
Raises
======
NotImplementedError is raised if o is not a Point
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 2)
>>> s = Ray3D(p1, p2)
>>> s.distance(Point3D(-1, -1, 2))
sqrt(6)
>>> s.distance((-1, -1, 2))
sqrt(6)
"""
if not isinstance(o, Point3D):
if is_sequence(o):
o = Point3D(o)
if o in self:
return S.Zero
s = self.perpendicular_segment(o)
if not isinstance(s, Point3D):
non_o = s.p1 if s.p1 == o else s.p2
if self.contains(non_o):
return Line3D(self).distance(o) # = s.length but simpler
# the following applies when neither of the above apply
return self.source.distance(o)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ray. Gives
values that will produce a ray that is 10 units long (where a unit is
the distance between the two points that define the ray).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point3D, Ray3D, pi
>>> r = Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 1))
>>> r.plot_interval()
[t, 0, 10]
"""
t = _symbol(parameter)
return [t, 0, 10]
def contains(self, o):
"""Is other GeometryEntity contained in this Ray?"""
if isinstance(o, Ray3D):
return (Point3D.are_collinear(self.p1, self.p2, o.p1, o.p2) and
self.xdirection == o.xdirection and
self.ydirection == o.ydirection and
self.zdirection == o.zdirection)
elif isinstance(o, Segment3D):
return o.p1 in self and o.p2 in self
elif is_sequence(o):
o = Point3D(o)
if isinstance(o, Point3D):
if Point3D.are_collinear(self.p1, self.p2, o):
if self.xdirection is S.Infinity:
rv = o.x >= self.source.x
elif self.xdirection is S.NegativeInfinity:
rv = o.x <= self.source.x
elif self.ydirection is S.Infinity:
rv = o.y >= self.source.y
elif self.ydirection is S.NegativeInfinity:
rv = o.y <= self.source.y
elif self.zdirection is S.Infinity:
rv = o.z <= self.source.z
else:
rv = o.z <= self.source.z
if rv == True or rv == False:
return bool(rv)
raise Undecidable(
'Cannot determine if %s is in %s' % (o, self))
else:
# Points are not collinear, so the rays are not parallel
# and hence it is impossible for self to contain o
return False
# No other known entity can be contained in a Ray
return False
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Ray3D):
return False
return self.source == other.source and other.p2 in self
class Segment3D(LinearEntity3D):
"""A undirected line segment in a 3D space.
Parameters
==========
p1 : Point3D
p2 : Point3D
Attributes
==========
length : number or sympy expression
midpoint : Point3D
See Also
========
sympy.geometry.point.Point3D, Line3D
Examples
========
>>> import sympy
>>> from sympy import Point3D
>>> from sympy.abc import s
>>> from sympy.geometry import Segment3D
>>> Segment3D((1, 0, 0), (1, 1, 1)) # tuples are interpreted as pts
Segment3D(Point3D(1, 0, 0), Point3D(1, 1, 1))
>>> s = Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s
Segment3D(Point3D(1, 1, 7), Point3D(4, 3, 9))
>>> s.points
(Point3D(1, 1, 7), Point3D(4, 3, 9))
>>> s.length
sqrt(17)
>>> s.midpoint
Point3D(5/2, 2, 8)
"""
def __new__(cls, p1, p2, **kwargs):
# Reorder the two points under the following ordering:
# if p1.x != p2.x then p1.x < p2.x
# if p1.x == p2.x then p1.y < p2.y
# The z-coordinate will not come into picture while ordering
p1 = Point3D(p1)
p2 = Point3D(p2)
if p1 == p2:
return Point3D(p1)
if (p1.x > p2.x) == True:
p1, p2 = p2, p1
elif (p1.x == p2.x) == True and (p1.y > p2.y) == True:
p1, p2 = p2, p1
return LinearEntity3D.__new__(cls, p1, p2, **kwargs)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Segment gives
values that will produce the full segment in a plot.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 0)
>>> s1 = Segment3D(p1, p2)
>>> s1.plot_interval()
[t, 0, 1]
"""
t = _symbol(parameter)
return [t, 0, 1]
@property
def length(self):
"""The length of the line segment.
See Also
========
sympy.geometry.point.Point3D.distance
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 3, 3)
>>> s1 = Segment3D(p1, p2)
>>> s1.length
sqrt(34)
"""
return Point3D.distance(self.p1, self.p2)
@property
def midpoint(self):
"""The midpoint of the line segment.
See Also
========
sympy.geometry.point.Point3D.midpoint
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 3, 3)
>>> s1 = Segment3D(p1, p2)
>>> s1.midpoint
Point3D(2, 3/2, 3/2)
"""
return Point3D.midpoint(self.p1, self.p2)
def distance(self, o):
"""
Finds the shortest distance between a line segment and a point.
Raises
======
NotImplementedError is raised if o is not a Point3D
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 3), Point3D(1, 1, 4)
>>> s = Segment3D(p1, p2)
>>> s.distance(Point3D(10, 15, 12))
sqrt(341)
>>> s.distance((10, 15, 12))
sqrt(341)
"""
if is_sequence(o):
o = Point3D(o)
if isinstance(o, Point3D):
seg_vector = self.p2 - self.p1
pt_vector = o - self.p1
t = seg_vector.dot(pt_vector)/self.length**2
if t >= 1:
distance = Point3D.distance(self.p2, o)
elif t <= 0:
distance = Point3D.distance(self.p1, o)
else:
distance = Point3D.distance(
self.p1 + Point3D(t*seg_vector.x, t*seg_vector.y,
t*seg_vector.y), o)
return distance
raise NotImplementedError()
def contains(self, other):
"""
Is the other GeometryEntity contained within this Segment?
Examples
========
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 1, 1), Point3D(3, 4, 5)
>>> s = Segment3D(p1, p2)
>>> s2 = Segment3D(p2, p1)
>>> s.contains(s2)
True
"""
if is_sequence(other):
other = Point3D(other)
if isinstance(other, Segment3D):
return other.p1 in self and other.p2 in self
elif isinstance(other, Point3D):
if Point3D.are_collinear(self.p1, self.p2, other):
if other.distance(self.p1) + other.distance(self.p2) == self.length:
return True
else:
return False
return False
|
bsd-3-clause
|
windyuuy/opera
|
chromium/src/third_party/protobuf/python/google/protobuf/service.py
|
590
|
9131
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
|
bsd-3-clause
|
kfitzgerald/titcoin
|
contrib/pyminer/pyminer.py
|
385
|
6434
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mit
|
Xperia-Nicki/android_platform_sony_nicki
|
external/webkit/LayoutTests/http/tests/websocket/tests/hybi/protocol-test_wsh.py
|
108
|
1907
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cgi
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
r = request.ws_resource.split('?', 1)
if len(r) == 1:
return
param = cgi.parse_qs(r[1])
if 'protocol' in param:
request.ws_protocol = param['protocol'][0]
def web_socket_transfer_data(request):
msgutil.send_message(request, request.ws_protocol)
|
apache-2.0
|
VillageAlliance/django-cms
|
cms/forms/utils.py
|
2
|
3495
|
# -*- coding: utf-8 -*-
from cms.models import Page
from cms.models.titlemodels import Title
from cms.utils import i18n
from collections import defaultdict
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete
from django.utils import translation
from django.utils.safestring import mark_safe
def update_site_and_page_choices(lang=None):
lang = lang or translation.get_language()
SITE_CHOICES_KEY = get_site_cache_key(lang)
PAGE_CHOICES_KEY = get_page_cache_key(lang)
if settings.CMS_MODERATOR:
title_queryset = Title.objects.filter(page__publisher_is_draft=False)
else:
title_queryset = Title.objects.filter(page__publisher_is_draft=True)
title_queryset = title_queryset.select_related('page', 'page__site')
pages = defaultdict(lambda: defaultdict(dict))
sites = {}
for title in title_queryset:
pages[title.page.site.pk][title.page.pk][title.language] = title
sites[title.page.site.pk] = title.page.site.name
site_choices = []
page_choices = [('', '----')]
language_order = [lang] + i18n.get_fallback_languages(lang)
for sitepk, sitename in sites.items():
site_choices.append((sitepk, sitename))
site_page_choices = []
for titles in pages[sitepk].values():
title = None
for language in language_order:
title = titles.get(language)
if title:
break
if not title:
continue
indent = u" " * title.page.level
page_title = mark_safe(u"%s%s" % (indent, title.title))
site_page_choices.append((title.page.pk, page_title))
page_choices.append((sitename, site_page_choices))
# We set it to 1 day here because we actively invalidate this cache.
cache.set(SITE_CHOICES_KEY, site_choices, 86400)
cache.set(PAGE_CHOICES_KEY, page_choices, 86400)
return site_choices, page_choices
def get_site_choices(lang=None):
lang = lang or translation.get_language()
site_choices = cache.get(get_site_cache_key(lang))
if site_choices is None:
site_choices, page_choices = update_site_and_page_choices(lang)
return site_choices
def get_page_choices(lang=None):
lang = lang or translation.get_language()
page_choices = cache.get(get_page_cache_key(lang))
if page_choices is None:
site_choices, page_choices = update_site_and_page_choices(lang)
return page_choices
def _get_key(prefix, lang):
return "%s-%s" % (prefix, lang)
def get_site_cache_key(lang):
return _get_key(settings.CMS_SITE_CHOICES_CACHE_KEY, lang)
def get_page_cache_key(lang):
return _get_key(settings.CMS_PAGE_CHOICES_CACHE_KEY, lang)
def _clean_many(prefix):
keys = []
for lang in [l[0] for l in settings.LANGUAGES]:
keys.append(_get_key(prefix, lang))
cache.delete_many(keys)
def clean_site_choices_cache(sender, **kwargs):
_clean_many(settings.CMS_SITE_CHOICES_CACHE_KEY)
def clean_page_choices_cache(sender, **kwargs):
_clean_many(settings.CMS_PAGE_CHOICES_CACHE_KEY)
post_save.connect(clean_page_choices_cache, sender=Page)
post_save.connect(clean_site_choices_cache, sender=Site)
post_delete.connect(clean_page_choices_cache, sender=Page)
post_delete.connect(clean_site_choices_cache, sender=Site)
|
bsd-3-clause
|
pauloschilling/sentry
|
src/sentry/web/forms/invite_organization_member.py
|
22
|
1583
|
from __future__ import absolute_import
from django import forms
from django.db import transaction, IntegrityError
from sentry.models import (
AuditLogEntry, AuditLogEntryEvent, OrganizationMember,
OrganizationMemberType
)
class InviteOrganizationMemberForm(forms.ModelForm):
class Meta:
fields = ('email',)
model = OrganizationMember
def save(self, actor, organization, ip_address):
om = super(InviteOrganizationMemberForm, self).save(commit=False)
om.organization = organization
om.type = OrganizationMemberType.MEMBER
try:
existing = OrganizationMember.objects.filter(
organization=organization,
user__email__iexact=om.email,
)[0]
except IndexError:
pass
else:
return existing, False
sid = transaction.savepoint(using='default')
try:
om.save()
except IntegrityError:
transaction.savepoint_rollback(sid, using='default')
return OrganizationMember.objects.get(
email__iexact=om.email,
organization=organization,
), False
transaction.savepoint_commit(sid, using='default')
AuditLogEntry.objects.create(
organization=organization,
actor=actor,
ip_address=ip_address,
target_object=om.id,
event=AuditLogEntryEvent.MEMBER_INVITE,
data=om.get_audit_log_data(),
)
om.send_invite_email()
return om, True
|
bsd-3-clause
|
thatchristoph/namebench
|
namebench.py
|
171
|
2390
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""namebench: DNS service benchmarking tool."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import os
import platform
import sys
# Check before we start importing internal dependencies
if sys.version < '2.4':
your_version = sys.version.split(' ')[0]
print '* Your Python version (%s) is too old! Please upgrade to 2.6+!' % your_version
sys.exit(1)
elif sys.version >= '3.0':
print '* namebench is currently incompatible with Python 3.0 - trying anyways'
from libnamebench import cli
from libnamebench import config
if __name__ == '__main__':
options = config.GetMergedConfiguration()
use_tk = False
if len(sys.argv) == 1:
if os.getenv('DISPLAY', None):
use_tk = True
# Macs get a special Cocoa binary
if os.getenv('I_LOVE_TK', None):
use_tk = True
elif platform.mac_ver()[0]:
use_tk = False
elif platform.system() == 'Windows':
use_tk = True
if use_tk:
try:
# Workaround for unicode path errors.
# See http://code.google.com/p/namebench/issues/detail?id=41
if hasattr(sys, 'winver') and hasattr(sys, 'frozen'):
os.environ['TCL_LIBRARY'] = os.path.join(os.path.dirname(sys.executable), 'tcl', 'tcl8.5')
os.environ['TK_LIBRARY'] = os.path.join(os.path.dirname(sys.executable), 'tcl', 'tk8.5')
import Tkinter
except ImportError:
if len(sys.argv) == 1:
print "- The python-tk (tkinter) library is missing, using the command-line interface.\n"
use_tk = False
if use_tk:
print 'Starting graphical interface for namebench (use -x to force command-line usage)'
from libnamebench import tk
interface = tk.NameBenchGui
else:
interface = cli.NameBenchCli
namebench = interface(options)
namebench.Execute()
|
apache-2.0
|
dmore70/nexus7-kernel
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
5411
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
gpl-2.0
|
p0psicles/SickRage
|
lib/sqlalchemy/sql/expression.py
|
78
|
5624
|
# sql/expression.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the public namespace for SQL expression constructs.
Prior to version 0.9, this module contained all of "elements", "dml",
"default_comparator" and "selectable". The module was broken up
and most "factory" functions were moved to be grouped with their associated
class.
"""
__all__ = [
'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
'except_', 'except_all', 'exists', 'extract', 'func', 'modifier',
'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label',
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery',
'table', 'text',
'tuple_', 'type_coerce', 'union', 'union_all', 'update']
from .visitors import Visitable
from .functions import func, modifier, FunctionElement
from ..util.langhelpers import public_factory
from .elements import ClauseElement, ColumnElement,\
BindParameter, UnaryExpression, BooleanClauseList, \
Label, Cast, Case, ColumnClause, TextClause, Over, Null, \
True_, False_, BinaryExpression, Tuple, TypeClause, Extract, \
Grouping, not_, \
collate, literal_column, between,\
literal, outparam, type_coerce, ClauseList
from .elements import SavepointClause, RollbackToSavepointClause, \
ReleaseSavepointClause
from .base import ColumnCollection, Generative, Executable, \
PARSE_AUTOCOMMIT
from .selectable import Alias, Join, Select, Selectable, TableClause, \
CompoundSelect, CTE, FromClause, FromGrouping, SelectBase, \
alias, GenerativeSelect, \
subquery, HasPrefixes, Exists, ScalarSelect, TextAsFrom
from .dml import Insert, Update, Delete, UpdateBase, ValuesBase
# factory functions - these pull class-bound constructors and classmethods
# from SQL elements and selectables into public functions. This allows
# the functions to be available in the sqlalchemy.sql.* namespace and
# to be auto-cross-documenting from the function to the class itself.
and_ = public_factory(BooleanClauseList.and_, ".expression.and_")
or_ = public_factory(BooleanClauseList.or_, ".expression.or_")
bindparam = public_factory(BindParameter, ".expression.bindparam")
select = public_factory(Select, ".expression.select")
text = public_factory(TextClause._create_text, ".expression.text")
table = public_factory(TableClause, ".expression.table")
column = public_factory(ColumnClause, ".expression.column")
over = public_factory(Over, ".expression.over")
label = public_factory(Label, ".expression.label")
case = public_factory(Case, ".expression.case")
cast = public_factory(Cast, ".expression.cast")
extract = public_factory(Extract, ".expression.extract")
tuple_ = public_factory(Tuple, ".expression.tuple_")
except_ = public_factory(CompoundSelect._create_except, ".expression.except_")
except_all = public_factory(CompoundSelect._create_except_all, ".expression.except_all")
intersect = public_factory(CompoundSelect._create_intersect, ".expression.intersect")
intersect_all = public_factory(CompoundSelect._create_intersect_all, ".expression.intersect_all")
union = public_factory(CompoundSelect._create_union, ".expression.union")
union_all = public_factory(CompoundSelect._create_union_all, ".expression.union_all")
exists = public_factory(Exists, ".expression.exists")
nullsfirst = public_factory(UnaryExpression._create_nullsfirst, ".expression.nullsfirst")
nullslast = public_factory(UnaryExpression._create_nullslast, ".expression.nullslast")
asc = public_factory(UnaryExpression._create_asc, ".expression.asc")
desc = public_factory(UnaryExpression._create_desc, ".expression.desc")
distinct = public_factory(UnaryExpression._create_distinct, ".expression.distinct")
true = public_factory(True_._singleton, ".expression.true")
false = public_factory(False_._singleton, ".expression.false")
null = public_factory(Null._singleton, ".expression.null")
join = public_factory(Join._create_join, ".expression.join")
outerjoin = public_factory(Join._create_outerjoin, ".expression.outerjoin")
insert = public_factory(Insert, ".expression.insert")
update = public_factory(Update, ".expression.update")
delete = public_factory(Delete, ".expression.delete")
# internal functions still being called from tests and the ORM,
# these might be better off in some other namespace
from .base import _from_objects
from .elements import _literal_as_text, _clause_element_as_expr,\
_is_column, _labeled, _only_column_elements, _string_or_unprintable, \
_truncated_label, _clone, _cloned_difference, _cloned_intersection,\
_column_as_key, _literal_as_binds, _select_iterables, \
_corresponding_column_or_error
from .selectable import _interpret_as_from
# old names for compatibility
_Executable = Executable
_BindParamClause = BindParameter
_Label = Label
_SelectBase = SelectBase
_BinaryExpression = BinaryExpression
_Cast = Cast
_Null = Null
_False = False_
_True = True_
_TextClause = TextClause
_UnaryExpression = UnaryExpression
_Case = Case
_Tuple = Tuple
_Over = Over
_Generative = Generative
_TypeClause = TypeClause
_Extract = Extract
_Exists = Exists
_Grouping = Grouping
_FromGrouping = FromGrouping
_ScalarSelect = ScalarSelect
|
gpl-3.0
|
gitromand/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/user_unittest.py
|
124
|
7300
|
# Copyright (C) 2010 Research in Motion Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user import User
class UserTest(unittest.TestCase):
example_user_response = "example user response"
def test_prompt_repeat(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
if not self.repeatsRemaining:
return UserTest.example_user_response
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), UserTest.example_user_response)
def test_prompt_when_exceeded_repeats(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), None)
def test_prompt_with_multiple_lists(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_multiple_lists,
args=["title", ["subtitle1", "subtitle2"], [["foo", "bar"], ["foobar", "barbaz", "foobaz"]]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n\nsubtitle1\n 1. foo\n 2. bar\n\nsubtitle2\n 3. foobar\n 4. barbaz\n 5. foobaz\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["3"], "foobar")
run_prompt_test(["4"], "barbaz")
run_prompt_test(["5"], "foobaz")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["1-3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["1-2,3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["2-1,3"], ["foobar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
def test_prompt_with_list(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_list,
args=["title", ["foo", "bar"]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n 1. foo\n 2. bar\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True)
def test_confirm(self):
test_cases = (
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')),
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')),
(("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')),
)
for test_case in test_cases:
expected, inputs = test_case
def mock_raw_input(message):
self.assertEqual(expected[0], message)
return inputs[1]
result = User().confirm(default=inputs[0],
raw_input=mock_raw_input)
self.assertEqual(expected[1], result)
def test_warn_if_application_is_xcode(self):
output = OutputCapture()
user = User()
output.assert_outputs(self, user._warn_if_application_is_xcode, ["TextMate"])
output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Applications/TextMate.app"])
output.assert_outputs(self, user._warn_if_application_is_xcode, ["XCode"]) # case sensitive matching
xcode_warning = "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\".\n"
output.assert_outputs(self, user._warn_if_application_is_xcode, ["Xcode"], expected_stdout=xcode_warning)
output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Developer/Applications/Xcode.app"], expected_stdout=xcode_warning)
|
bsd-3-clause
|
tomasreimers/tensorflow-emscripten
|
tensorflow/python/saved_model/tag_constants.py
|
70
|
1120
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common tags used for graphs in SavedModel.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.all_util import remove_undocumented
# Tag for the `serving` graph.
SERVING = "serve"
# Tag for the `training` graph.
TRAINING = "train"
_allowed_symbols = [
"SERVING",
"TRAINING"
]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
IEMLdev/ieml-api
|
ieml/dictionary/script/script.py
|
2
|
23759
|
import itertools
import numpy as np
from ieml.exceptions import InvalidScriptCharacter, InvalidScript, IncompatiblesScriptsLayers, TooManySingularSequences
from ieml.commons import TreeStructure, DecoratedComponent
from ieml.constants import MAX_LAYER, MAX_SINGULAR_SEQUENCES, MAX_SIZE_HEADER, LAYER_MARKS, PRIMITIVES, \
remarkable_multiplication_lookup_table, REMARKABLE_ADDITION, character_value, AUXILIARY_CLASS, VERB_CLASS, \
NOUN_CLASS
from itertools import chain
class Script(TreeStructure, DecoratedComponent):
""" A parser is defined by a character (PRIMITIVES, REMARKABLE_ADDITION OR REMARKABLE_MULTIPLICATION)
or a list of parser children. All the element in the children list must be an AdditiveScript or
a MultiplicativeScript."""
def __init__(self, children=None, character=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if children:
self.children = children
else:
self.children = []
if character:
self.character = character
else:
self.character = None
# Layer of this parser
self.layer = None
# If the script is a paradigm
self.is_paradigm = None
# If the script is the empty script
self.empty = None
# The number of singular sequence (1 for singular sequences)
self.cardinal = None
# The singular sequences ordered list
self._singular_sequences = None
self._singular_sequences_set = None
# The contained paradigms (tables)
self._tables = None
self._cells = None
self._tables_script = None
self._headers = None
# The canonical string to compare same layer and cardinal parser (__lt__)
self.canonical = None
# class of the parser, one of the following : VERB (1), AUXILIARY (0), and NOUN (2)
self.script_class = None
self.grammatical_class = None
def __new__(cls, *args, **kwargs):
"""
Need this to pickle scripts, the pickler use __hash__ method before unpickling the
object attribute. Then need to pass the _str.
"""
instance = super(Script, cls).__new__(cls)
if 'str' in kwargs:
instance._str = kwargs['str']
return instance
def __getnewargs_ex__(self):
return ((), {
'str': str(self)
})
def __add__(self, other):
if not isinstance(other, Script):
raise InvalidScript()
return AdditiveScript(children=[self, other])
def __eq__(self, other):
if isinstance(other, Script):
return self._str == other._str
else:
return super().__eq__(other)
def __hash__(self):
"""Since the IEML string for a script is its definition, it can be used as a hash"""
return self._str.__hash__()
def __lt__(self, other):
if not isinstance(self, Script) or not isinstance(other, Script):
return NotImplemented
if self == other:
return False
if self.layer != other.layer:
# order by layer
return self.layer < other.layer
else:
# Cardinal of 1 the minimum, null_script is minimum
if isinstance(self, NullScript):
return True
if isinstance(other, NullScript):
return False
if self.cardinal != other.cardinal:
# then by number of singular sequence
return self.cardinal < other.cardinal
else:
# Compare the canonical form
if self.canonical != other.canonical:
return self.canonical < other.canonical
elif self.layer != 0:
# layer != 0 => children is set, no children are for layer 0 (MultiplicativeScript)
if isinstance(self, other.__class__):
# if they are the same class
# Compare the children in alphabetical order
iterator = iter(other.children)
for s in self.children:
try:
o = iterator.__next__()
if o != s:
return s < o
except StopIteration:
return False
# self have fewer elements, all equals to the first of other's children, self is lower.
return True
else:
# not an instance, one is multiplicative, other one is additive
# They have the same number of singular sequence so the multiplicative is fewer :
# each variable of the addition have less singular sequence than the multiplication parser
return isinstance(self, MultiplicativeScript)
else:
# Layer 0
# Compare the value of the character or the sum
if isinstance(self, AdditiveScript):
# The character value is the sum of all character of the addition
self_char_value = sum((character_value[c.character] for c in self.children))
else:
self_char_value = character_value[self.character]
if isinstance(other, AdditiveScript):
# The character value is the sum of all character of the addition
other_char_value = sum((character_value[c.character] for c in other.children))
else:
other_char_value = character_value[other.character]
return self_char_value < other_char_value
# def __getitem__(self, index):
# return self.children[index]
def __contains__(self, item):
if not isinstance(item, Script):
return False
if item.layer != self.layer:
return False
return item.singular_sequences_set.issubset(self.singular_sequences_set)
def __len__(self):
return self.cardinal
def _build_tables(self):
if self.cardinal == 1:
self._cells = (np.array([[[self]]]),)
self._tables_script = (self,)
self._headers = ()
else:
_cells, _tables_script, _headers = self._compute_cells()
self._cells, self._tables_script, self._headers = tuple(_cells), tuple(_tables_script), tuple(_headers)
@property
def cells(self):
if self._cells is None:
self._build_tables()
return self._cells
@property
def headers(self):
if self._headers is None:
self._build_tables()
return self._headers
@property
def tables_script(self):
if self._tables_script is None:
self._build_tables()
return self._tables_script
@property
def singular_sequences(self):
if self._singular_sequences is None:
self._singular_sequences = self._compute_singular_sequences()
return self._singular_sequences
@property
def singular_sequences_set(self):
if self._singular_sequences_set is None:
self._singular_sequences_set = set(self.singular_sequences)
return self._singular_sequences_set
@property
def is_singular(self):
return self.cardinal == 1
def iter_structure(self):
return []
def iter_structure_path(self, flexion=False):
return []
def iter_structure_path_by_script_ss(self, flexion=False):
from ieml.usl.decoration.path import UslPath
yield (UslPath(), self)
def _compute_cells(self):
pass
def _compute_singular_sequences(self):
pass
def check(self):
pass
class AdditiveScript(Script):
""" Represent an addition of same layer scripts."""
def __init__(self, children=None, character=None):
_character = None
_children = []
if children:
_children = children
if character in REMARKABLE_ADDITION:
_character = character
_children = REMARKABLE_ADDITION_SCRIPT[_character]
elif character is not None:
raise InvalidScriptCharacter(character)
else:
if len(_children) == 0:
raise InvalidScript()
# check all children are the same layer
l = _children[0].layer
for c in _children:
if c.layer != l:
raise IncompatiblesScriptsLayers(_children[0], c)
if _children:
to_remove = []
to_add = []
# remove the sub addition
for c in _children:
if isinstance(c, AdditiveScript):
to_remove.append(c)
to_add.extend(c.children)
_children.extend(to_add)
# Remove duplicate children
_children = list(set(c for c in _children if c not in to_remove))
# make a character with the children if possible
if l == 0:
_char_set = set(map(lambda e: str(e)[0], _children))
for key, value in REMARKABLE_ADDITION.items():
if _char_set == value:
_character = key
break
super().__init__(children=_children, character=_character)
if self.character: # remarkable addition
self.layer = 0
self.empty = False
self.paradigm = True
self.cardinal = len(REMARKABLE_ADDITION[self.character])
else:
self.layer = self.children[0].layer
self.empty = all((e.empty for e in self.children))
self.paradigm = len(self.children) > 1 or any(child.paradigm for child in self.children)
self.cardinal = sum((e.cardinal for e in self.children))
if self.cardinal > MAX_SINGULAR_SEQUENCES:
raise TooManySingularSequences(self.cardinal)
self.script_class = max(c.script_class for c in self)
self.grammatical_class = self.script_class
self.__order()
self._do_precompute_str()
def _do_precompute_str(self):
self._str = \
(self.character + LAYER_MARKS[0]) if self.character is not None \
else '+'.join([str(child) for child in self.children])
def __order(self):
# Ordering of the children
self.children.sort()
if self.layer == 0:
value = 0b0
for child in self:
value |= character_value[child.character]
self.canonical = bytes([value])
else:
self.canonical = b''.join([child.canonical for child in self])
def _compute_singular_sequences(self):
# Generating the singular sequence
if not self.paradigm:
return [self]
else:
# additive proposition has always children set
s = [sequence for child in self.children for sequence in child.singular_sequences]
s.sort()
return s
def _compute_cells(self):
# we generate one table per children, unless one children is a singular sequence.
# if so, we generate one column instead
# we generate one set of headers per element in product
if any(not c.paradigm for c in self.children):
# layer 0 -> column paradigm (like I: F: M: O:)
return [np.array([[[s]] for s in self.singular_sequences])], [self], [[[self]]]
# cells =
# # if the child are multiple tables, merge thems
# if
return [t for c in self.children for t in c.cells], [t for c in self.children for t in c.tables_script], \
list(chain(*(c.headers for c in self.children)))
class MultiplicativeScript(Script):
""" Represent a multiplication of three scripts of the same layer."""
def __init__(self, substance=None, attribute=None, mode=None, children=None, character=None):
if not (substance or children or character):
raise InvalidScript()
# Build children
if children is None:
children = [substance, attribute, mode]
_children = []
for child in children:
if child is not None:
_children.append(child)
else:
break
# Replace all the corresponding children to character
_character = None
if character is not None:
if character == 'E':
raise InvalidScript()
_character = character
if _character in PRIMITIVES:
_children = []
layer = 0
elif _character in REMARKABLE_MULTIPLICATION_SCRIPT:
_children = REMARKABLE_MULTIPLICATION_SCRIPT[_character]
layer = 1
else:
raise InvalidScriptCharacter(character)
else:
layer = _children[0].layer
for i, c in enumerate(_children):
elem = c
if isinstance(c, AdditiveScript) and len(c.children) == 1:
elem = c.children[0]
_children[i] = elem
# Replace the empty values
for i, c in enumerate(_children):
if c.empty:
_children[i] = NullScript(layer=c.layer)
# Fill the children to get a size of 3
if _character not in PRIMITIVES:
for i in range(len(_children), 3):
_children.append(NullScript(layer=layer))
# Add the character to children corresponding to specific combination
_str_children = self._render_children(_children, _character)
if _str_children in remarkable_multiplication_lookup_table:
_character = remarkable_multiplication_lookup_table[_str_children]
super().__init__(children=_children, character=_character)
# Compute the attributes of this script
if self.character:
self.layer = 0 if self.character in PRIMITIVES else 1
self.paradigm = False
self.cardinal = 1
self.empty = self.character == 'E'
else:
self.layer = _children[0].layer + 1
self.empty = all((e.empty for e in self.children))
self.paradigm = any((e.paradigm for e in self.children))
self.cardinal = 1
for e in self.children:
self.cardinal = self.cardinal * e.cardinal
if self.layer == 0:
self.script_class = VERB_CLASS if self.character in REMARKABLE_ADDITION['O'] else NOUN_CLASS
else:
self.script_class = self.children[0].script_class
self.grammatical_class = self.script_class
if self.layer != 0:
# check number of children
if not len(self.children) == 3:
raise InvalidScript("Invalid number of children provided for multiplicative script, expected 3")
# check every child of the same layer
if not self.children[0].layer == self.children[1].layer == self.children[2].layer:
raise InvalidScript("Inconsistent layers in children")
# check layer
if not self.layer == self.children[0].layer + 1:
raise InvalidScript("")
if self.cardinal > MAX_SINGULAR_SEQUENCES:
raise TooManySingularSequences(self.cardinal)
self.__order()
self._do_precompute_str()
def _render_children(self, children=None, character=None):
if character:
return character
else:
empty = True
result = ''
for i, c in enumerate(reversed(children)):
if not c.empty:
empty = False
if not empty or not c.empty or i == 2:
result = str(c) + result
return result
def _do_precompute_str(self):
self._str = self._render_children(self.children, self.character) + LAYER_MARKS[self.layer]
def __order(self):
if self.layer == 0:
self.canonical = bytes([character_value[self.character]])
else:
self.canonical = b''.join([child.canonical for child in self])
def _compute_singular_sequences(self):
# Generate the singular sequence
if not self.paradigm:
return [self]
else:
children_sequences = []
for i in range(0, 3):
if not self.children[i].empty:
children_sequences.append([(i, c) for c in self.children[i].singular_sequences])
s = []
for triplet in itertools.product(*children_sequences):
children = self.children[:]
for tpl in triplet:
children[tpl[0]] = tpl[1]
sequence = MultiplicativeScript(children=children)
s.append(sequence)
s.sort()
return s
def _compute_cells(self):
# check how many plurals child
plurals_child = [(c, i) for i, c in enumerate(self.children) if c.cardinal != 1]
if len(plurals_child) == 1:
# only one plural child, we recurse
v = plurals_child[0]
# translate the child ss as ours
map_seq = {s.children[v[1]]: s for s in self.singular_sequences}
def resolve_ss(s):
return map_seq[s]
def map_script(s):
return MultiplicativeScript(children=[
self.children[i] if i != v[1] else s for i in range(3)
])
return [np.vectorize(resolve_ss)(c) for c in v[0].cells], [map_script(c) for c in v[0].tables_script], \
[[[map_script(h) for h in k] for k in c] for c in v[0].headers]
# more than one plural var, we build a multidimensional array
# Check the table dimension
if any(c.cardinal > MAX_SIZE_HEADER for c, _ in plurals_child):
raise ValueError("The table defined by the script %s produce a table with more than %d headers."%
(str(self), MAX_SIZE_HEADER))
# 1st dim the rows
# 2nd dim the columns
# 3rd dim the tabs
result = np.zeros(shape=[plurals_child[i][0].cardinal if i < len(plurals_child)
else 1 for i in range(3)], dtype=object)
seq_index = [{s:i for i, s in enumerate(v[0].singular_sequences)} for v in plurals_child]
for s in self.singular_sequences:
res = [None, None, None]
for i, v in enumerate(plurals_child):
res[i] = seq_index[i][s.children[v[1]]]
result[res[0], res[1], res[2]] = s
if len(plurals_child) == 3:
tables_script = [MultiplicativeScript(children=[self.children[0], self.children[1], ss])
for ss in self.children[2].singular_sequences]
header = [[[MultiplicativeScript(children=[ss_dim, self.children[1], ss]) \
for ss_dim in self.children[0].singular_sequences],
[MultiplicativeScript(children=[self.children[0], ss_dim, ss]) \
for ss_dim in self.children[1].singular_sequences]]
for ss in self.children[2].singular_sequences]
else:
tables_script = [self]
header = [[[MultiplicativeScript(children=[c if ii != i else ss for ii, c in enumerate(self.children)])
for ss in self.children[i].singular_sequences] for _, i in plurals_child]]
return [result], tables_script, header
class NullScript(Script):
def __init__(self, layer):
super().__init__(children=[])
self.layer = layer
self.paradigm = False
self.empty = True
self.cardinal = 1
self.character = 'E'
self._do_precompute_str()
self.canonical = bytes([character_value[self.character]] * pow(3, self.layer))
self.script_class = AUXILIARY_CLASS
self.grammatical_class = self.script_class
def __iter__(self):
if self.layer == 0:
return [self].__iter__()
return ([NULL_SCRIPTS[self.layer - 1]] * 3).__iter__()
def _do_precompute_str(self):
result = self.character
for l in range(0, self.layer + 1):
result = result + LAYER_MARKS[l]
self._str = result
def _compute_singular_sequences(self):
return [self]
NULL_SCRIPTS = [NullScript(level) for level in range(0, MAX_LAYER)]
# Building the remarkable multiplication to parser
REMARKABLE_MULTIPLICATION_SCRIPT = {
"wo": [MultiplicativeScript(character='U'), MultiplicativeScript(character='U'), NullScript(layer=0)],
"wa": [MultiplicativeScript(character='U'), MultiplicativeScript(character='A'), NullScript(layer=0)],
"y": [MultiplicativeScript(character='U'), MultiplicativeScript(character='S'), NullScript(layer=0)],
"o": [MultiplicativeScript(character='U'), MultiplicativeScript(character='B'), NullScript(layer=0)],
"e": [MultiplicativeScript(character='U'), MultiplicativeScript(character='T'), NullScript(layer=0)],
"wu": [MultiplicativeScript(character='A'), MultiplicativeScript(character='U'), NullScript(layer=0)],
"we": [MultiplicativeScript(character='A'), MultiplicativeScript(character='A'), NullScript(layer=0)],
"u": [MultiplicativeScript(character='A'), MultiplicativeScript(character='S'), NullScript(layer=0)],
"a": [MultiplicativeScript(character='A'), MultiplicativeScript(character='B'), NullScript(layer=0)],
"i": [MultiplicativeScript(character='A'), MultiplicativeScript(character='T'), NullScript(layer=0)],
"j": [MultiplicativeScript(character='S'), MultiplicativeScript(character='U'), NullScript(layer=0)],
"g": [MultiplicativeScript(character='S'), MultiplicativeScript(character='A'), NullScript(layer=0)],
"s": [MultiplicativeScript(character='S'), MultiplicativeScript(character='S'), NullScript(layer=0)],
"b": [MultiplicativeScript(character='S'), MultiplicativeScript(character='B'), NullScript(layer=0)],
"t": [MultiplicativeScript(character='S'), MultiplicativeScript(character='T'), NullScript(layer=0)],
"h": [MultiplicativeScript(character='B'), MultiplicativeScript(character='U'), NullScript(layer=0)],
"c": [MultiplicativeScript(character='B'), MultiplicativeScript(character='A'), NullScript(layer=0)],
"k": [MultiplicativeScript(character='B'), MultiplicativeScript(character='S'), NullScript(layer=0)],
"m": [MultiplicativeScript(character='B'), MultiplicativeScript(character='B'), NullScript(layer=0)],
"n": [MultiplicativeScript(character='B'), MultiplicativeScript(character='T'), NullScript(layer=0)],
"p": [MultiplicativeScript(character='T'), MultiplicativeScript(character='U'), NullScript(layer=0)],
"x": [MultiplicativeScript(character='T'), MultiplicativeScript(character='A'), NullScript(layer=0)],
"d": [MultiplicativeScript(character='T'), MultiplicativeScript(character='S'), NullScript(layer=0)],
"f": [MultiplicativeScript(character='T'), MultiplicativeScript(character='B'), NullScript(layer=0)],
"l": [MultiplicativeScript(character='T'), MultiplicativeScript(character='T'), NullScript(layer=0)]
}
# Building the remarkable addition to parser
REMARKABLE_ADDITION_SCRIPT = {key: [MultiplicativeScript(character=c) if c != 'E' else NullScript(layer=0) for c in REMARKABLE_ADDITION[key]] for key in REMARKABLE_ADDITION}
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.