repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ValentinaPeona/tebreak | lib/make_refGene_fa.py | 1 | 1381 | #!/usr/bin/env python
import sys
from gzip import open
from pysam import Fastafile
from collections import defaultdict as dd
def usage():
return "usage: %s <reference genome fasta> <refGenes.txt.gz>" % sys.argv[0]
if len(sys.argv) == 3:
fa = Fastafile(sys.argv[1])
assert sys.argv[2].endswith('.gz'), "refGenes.txt must be gzipped"
genes = dd(list)
with open(sys.argv[2], 'r') as ref:
for line in ref:
(bin,
name,
chrom,
strand,
txStart,
txEnd,
cdsStart,
cdsEnd,
exonCount,
exonStarts,
exonEnds,
score,
name2,
cdsStartStat,
cdsEndStat,
exonFrames) = line.strip().split()
exonStarts = map(int, exonStarts.split(',')[:-1])
exonEnds = map(int, exonEnds.split(',')[:-1])
assert len(exonEnds) == len(exonStarts) == int(exonCount)
seq = ''
for start, end in zip(exonStarts, exonEnds):
if chrom in fa.references:
seq += fa.fetch(chrom, start, end)
if seq:
genes[name2].append(seq)
for name in genes:
for i, tx in enumerate(genes[name]):
print ">%s.%d\n%s" % (name, i, tx)
else:
sys.exit(usage())
| mit |
reyrodrigues/EU-SMS | temba/ivr/migrations/0006_auto_20150203_0558.py | 2 | 2396 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from temba.msgs.models import Msg, HANDLED, IVR
from temba.orgs.models import Org
from temba.flows.models import FlowStep, ActionSet, SayAction
from django.conf import settings
class Migration(migrations.Migration):
def reverse(apps, schema_editor):
IVRAction = apps.get_model("ivr", "IVRAction")
#
for org in Org.objects.all():
channel = org.get_call_channel()
print "Processing %s" % org
if channel:
for ivr in IVRAction.objects.filter(org=org):
step = FlowStep.objects.get(pk=ivr.step.pk)
if step.rule_value:
print "[%s] %s" % (ivr.call.contact_urn, step.rule_value)
step.messages.all().delete()
print step.messages.all().count()
def create_messages_for_ivr_actions(apps, schema_editor):
from django.contrib.auth.models import User
IVRAction = apps.get_model("ivr", "IVRAction")
# create a one-to-one mapping for any ivr actions as ivr messages
for org in Org.objects.all():
channel = org.get_call_channel()
# print "Processing %s" % org
if channel:
for ivr in IVRAction.objects.filter(org=org):
step = FlowStep.objects.get(pk=ivr.step.pk)
if step.rule_value:
urn = ivr.call.contact_urn
msg_dict = {}
if step.rule_value[0:4] == 'http':
msg_dict['recording_url'] = step.rule_value
user = User.objects.get(pk=ivr.call.created_by_id)
msg = Msg.create_incoming(channel, (urn.scheme, urn.path), step.rule_value,
user=user, topup=ivr.topup, status=HANDLED,
msg_type=IVR, date=ivr.created_on, org=org, **msg_dict)
step.add_message(msg)
dependencies = [
('ivr', '0005_auto_20150129_1759'),
('msgs', '0003_auto_20150129_0515'),
('orgs', '0012_auto_20151026_1152'),
]
operations = [
migrations.RunPython(create_messages_for_ivr_actions, reverse)
]
| agpl-3.0 |
RobertABT/heightmap | build/scipy/scipy/weave/tests/test_scxx_dict.py | 5 | 8775 | """ Test refcounting and behavior of SCXX.
"""
from __future__ import absolute_import, print_function
import sys
from numpy.testing import (TestCase, dec, assert_, assert_raises,
run_module_suite)
from scipy.weave import inline_tools
class TestDictConstruct(TestCase):
#------------------------------------------------------------------------
# Check that construction from basic types is allowed and have correct
# reference counts
#------------------------------------------------------------------------
@dec.slow
def test_empty(self):
# strange int value used to try and make sure refcount is 2.
code = """
py::dict val;
return_val = val;
"""
res = inline_tools.inline(code)
assert_(sys.getrefcount(res) == 2)
assert_(res == {})
class TestDictHasKey(TestCase):
@dec.slow
def test_obj(self):
class Foo:
pass
key = Foo()
a = {}
a[key] = 12345
code = """
return_val = a.has_key(key);
"""
res = inline_tools.inline(code,['a','key'])
assert_(res)
@dec.slow
def test_int(self):
a = {}
a[1234] = 12345
code = """
return_val = a.has_key(1234);
"""
res = inline_tools.inline(code,['a'])
assert_(res)
@dec.slow
def test_double(self):
a = {}
a[1234.] = 12345
code = """
return_val = a.has_key(1234.);
"""
res = inline_tools.inline(code,['a'])
assert_(res)
@dec.slow
def test_complex(self):
a = {}
a[1+1j] = 12345
key = 1+1j
code = """
return_val = a.has_key(key);
"""
res = inline_tools.inline(code,['a','key'])
assert_(res)
@dec.slow
def test_string(self):
a = {}
a["b"] = 12345
code = """
return_val = a.has_key("b");
"""
res = inline_tools.inline(code,['a'])
assert_(res)
@dec.slow
def test_std_string(self):
a = {}
a["b"] = 12345
key_name = "b"
code = """
return_val = a.has_key(key_name);
"""
res = inline_tools.inline(code,['a','key_name'])
assert_(res)
@dec.slow
def test_string_fail(self):
a = {}
a["b"] = 12345
code = """
return_val = a.has_key("c");
"""
res = inline_tools.inline(code,['a'])
assert_(not res)
class TestDictGetItemOp(TestCase):
def generic_get(self,code,args=['a']):
a = {}
a['b'] = 12345
res = inline_tools.inline(code,args)
assert_(res == a['b'])
@dec.slow
def test_char(self):
self.generic_get('return_val = a["b"];')
@dec.knownfailureif(True)
@dec.slow
def test_char_fail(self):
# We can't through a KeyError for dicts on RHS of
# = but not on LHS. Not sure how to deal with this.
assert_raises(KeyError, self.generic_get, 'return_val = a["c"];')
@dec.slow
def test_string(self):
self.generic_get('return_val = a[std::string("b")];')
@dec.slow
def test_obj(self):
code = """
py::object name = "b";
return_val = a[name];
"""
self.generic_get(code,['a'])
@dec.knownfailureif(True)
@dec.slow
def test_obj_fail(self):
# We can't through a KeyError for dicts on RHS of
# = but not on LHS. Not sure how to deal with this.
code = """
py::object name = "c";
return_val = a[name];
"""
assert_raises(KeyError, self.generic_get, code, ['a'])
class TestDictSetOperator(TestCase):
def generic_new(self,key,val):
# test that value is set correctly and that reference counts
# on dict, key, and val are being handled correctly.
a = {}
# call once to handle mysterious addition of one ref count
# on first call to inline.
inline_tools.inline("a[key] = val;",['a','key','val'])
assert_(a[key] == val)
before = sys.getrefcount(a), sys.getrefcount(key), sys.getrefcount(val)
inline_tools.inline("a[key] = val;",['a','key','val'])
assert_(a[key] == val)
after = sys.getrefcount(a), sys.getrefcount(key), sys.getrefcount(val)
assert_(before == after)
def generic_overwrite(self,key,val):
a = {}
overwritten = 1
a[key] = overwritten # put an item in the dict to be overwritten
# call once to handle mysterious addition of one ref count
# on first call to inline.
before_overwritten = sys.getrefcount(overwritten)
inline_tools.inline("a[key] = val;",['a','key','val'])
assert_(a[key] == val)
before = sys.getrefcount(a), sys.getrefcount(key), sys.getrefcount(val)
inline_tools.inline("a[key] = val;",['a','key','val'])
assert_(a[key] == val)
after = sys.getrefcount(a), sys.getrefcount(key), sys.getrefcount(val)
after_overwritten = sys.getrefcount(overwritten)
assert_(before == after)
assert_(before_overwritten == after_overwritten)
@dec.slow
def test_new_int_int(self):
key,val = 1234,12345
self.generic_new(key,val)
@dec.slow
def test_new_double_int(self):
key,val = 1234.,12345
self.generic_new(key,val)
@dec.slow
def test_new_std_string_int(self):
key,val = "hello",12345
self.generic_new(key,val)
@dec.slow
def test_new_complex_int(self):
key,val = 1+1j,12345
self.generic_new(key,val)
@dec.slow
def test_new_obj_int(self):
class Foo:
pass
key,val = Foo(),12345
self.generic_new(key,val)
@dec.slow
def test_overwrite_int_int(self):
key,val = 1234,12345
self.generic_overwrite(key,val)
@dec.slow
def test_overwrite_double_int(self):
key,val = 1234.,12345
self.generic_overwrite(key,val)
@dec.slow
def test_overwrite_std_string_int(self):
key,val = "hello",12345
self.generic_overwrite(key,val)
@dec.slow
def test_overwrite_complex_int(self):
key,val = 1+1j,12345
self.generic_overwrite(key,val)
@dec.slow
def test_overwrite_obj_int(self):
class Foo:
pass
key,val = Foo(),12345
self.generic_overwrite(key,val)
class TestDictDel(TestCase):
def generic(self,key):
# test that value is set correctly and that reference counts
# on dict, key, are being handled correctly. after deletion,
# the keys refcount should be one less than before.
a = {}
a[key] = 1
inline_tools.inline("a.del(key);",['a','key'])
assert_(key not in a)
a[key] = 1
before = sys.getrefcount(a), sys.getrefcount(key)
inline_tools.inline("a.del(key);",['a','key'])
assert_(key not in a)
after = sys.getrefcount(a), sys.getrefcount(key)
assert_(before[0] == after[0])
assert_(before[1] == after[1] + 1)
@dec.slow
def test_int(self):
key = 1234
self.generic(key)
@dec.slow
def test_double(self):
key = 1234.
self.generic(key)
@dec.slow
def test_std_string(self):
key = "hello"
self.generic(key)
@dec.slow
def test_complex(self):
key = 1+1j
self.generic(key)
@dec.slow
def test_obj(self):
class Foo:
pass
key = Foo()
self.generic(key)
class TestDictOthers(TestCase):
@dec.slow
def test_clear(self):
a = {}
a["hello"] = 1
inline_tools.inline("a.clear();",['a'])
assert_(not a)
@dec.slow
def test_items(self):
a = {}
a["hello"] = 1
items = inline_tools.inline("return_val = a.items();",['a'])
assert_(items == a.items())
@dec.slow
def test_values(self):
a = {}
a["hello"] = 1
values = inline_tools.inline("return_val = a.values();",['a'])
assert_(values == a.values())
@dec.slow
def test_keys(self):
a = {}
a["hello"] = 1
keys = inline_tools.inline("return_val = a.keys();",['a'])
assert_(keys == a.keys())
@dec.slow
def test_update(self):
a,b = {},{}
a["hello"] = 1
b["hello"] = 2
inline_tools.inline("a.update(b);",['a','b'])
assert_(a == b)
if __name__ == "__main__":
run_module_suite()
| mit |
thedrow/django | tests/timezones/tests.py | 165 | 57662 | from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import SkipTest, skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import connection, connections
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
requires_pytz = skipIf(pytz is None, "this test requires pytz")
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@requires_pytz
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_accepts_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [utc_naive_dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_returns_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [utc_naive_dt])
self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class ForcedTimeZoneDatabaseTests(TransactionTestCase):
"""
Test the TIME_ZONE database configuration parameter.
Since this involves reading and writing to the same database through two
connections, this is a TransactionTestCase.
"""
available_apps = ['timezones']
@classmethod
def setUpClass(cls):
# @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The
# outermost takes precedence. Handle skipping manually instead.
if connection.features.supports_timezones:
raise SkipTest("Database has feature(s) supports_timezones")
if not connection.features.test_db_allows_multiple_connections:
raise SkipTest("Database doesn't support feature(s): test_db_allows_multiple_connections")
super(ForcedTimeZoneDatabaseTests, cls).setUpClass()
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
@classmethod
def tearDownClass(cls):
connections['tz'].close()
del connections['tz']
del connections.databases['tz']
super(ForcedTimeZoneDatabaseTests, cls).tearDownClass()
def test_read_datetime(self):
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
Event.objects.create(dt=fake_dt)
event = Event.objects.using('tz').get()
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, dt)
def test_write_datetime(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.using('tz').create(dt=dt)
event = Event.objects.get()
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, fake_dt)
@skipUnlessDBFeature('supports_timezones')
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class UnsupportedTimeZoneDatabaseTests(TestCase):
def test_time_zone_parameter_not_supported_if_database_supports_timezone(self):
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
tz_conn = connections['tz']
try:
with self.assertRaises(ImproperlyConfigured):
tz_conn.cursor()
finally:
connections['tz'].close() # in case the test fails
del connections['tz']
del connections.databases['tz']
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(SimpleTestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@requires_pytz
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@requires_pytz
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@requires_pytz
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@requires_pytz
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@requires_pytz
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@requires_pytz
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_pytz
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
is_superuser=True, username='super', first_name='Super', last_name='User',
email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
)
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
| bsd-3-clause |
micbou/ycmd | ycmd/tests/javascript/debug_info_test.py | 4 | 2326 | # Copyright (C) 2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from mock import patch
from hamcrest import ( any_of, assert_that, contains, has_entries, has_entry,
instance_of, none )
from ycmd.tests.javascript import IsolatedYcmd, SharedYcmd
from ycmd.tests.test_utils import BuildRequest
@SharedYcmd
def DebugInfo_TypeScriptCompleter_test( app ):
request_data = BuildRequest( filetype = 'javascript' )
assert_that(
app.post_json( '/debug_info', request_data ).json,
has_entry( 'completer', has_entries( {
'name': 'TypeScript',
'servers': contains( has_entries( {
'name': 'TSServer',
'is_running': True,
'executable': instance_of( str ),
'pid': instance_of( int ),
'address': None,
'port': None,
'logfiles': contains( instance_of( str ) ),
'extras': contains( has_entries( {
'key': 'version',
'value': any_of( None, instance_of( str ) )
} ) )
} ) )
} ) )
)
@patch( 'ycmd.completers.javascript.hook.'
'ShouldEnableTypeScriptCompleter', return_value = False )
@patch( 'ycmd.completers.javascript.hook.'
'ShouldEnableTernCompleter', return_value = False )
@IsolatedYcmd
def DebugInfo_NoCompleter_test( app, *args ):
request_data = BuildRequest( filetype = 'javascript' )
assert_that(
app.post_json( '/debug_info', request_data ).json,
has_entry( 'completer', none() )
)
| gpl-3.0 |
suiyuan2009/tensorflow | tensorflow/examples/image_retraining/retrain_test.py | 31 | 5680 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-bad-import-order,unused-import
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
from tensorflow.examples.image_retraining import label_image
from tensorflow.examples.image_retraining import retrain
from tensorflow.python.framework import test_util
class ImageRetrainingTest(test_util.TensorFlowTestCase):
def dummyImageLists(self):
return {'label_one': {'dir': 'somedir', 'training': ['image_one.jpg',
'image_two.jpg'],
'testing': ['image_three.jpg', 'image_four.jpg'],
'validation': ['image_five.jpg', 'image_six.jpg']},
'label_two': {'dir': 'otherdir', 'training': ['image_one.jpg',
'image_two.jpg'],
'testing': ['image_three.jpg', 'image_four.jpg'],
'validation': ['image_five.jpg', 'image_six.jpg']}}
def testGetImagePath(self):
image_lists = self.dummyImageLists()
self.assertEqual('image_dir/somedir/image_one.jpg', retrain.get_image_path(
image_lists, 'label_one', 0, 'image_dir', 'training'))
self.assertEqual('image_dir/otherdir/image_four.jpg',
retrain.get_image_path(image_lists, 'label_two', 1,
'image_dir', 'testing'))
def testGetBottleneckPath(self):
image_lists = self.dummyImageLists()
self.assertEqual('bottleneck_dir/somedir/image_five.jpg_imagenet_v3.txt',
retrain.get_bottleneck_path(
image_lists, 'label_one', 0, 'bottleneck_dir',
'validation', 'imagenet_v3'))
def testShouldDistortImage(self):
self.assertEqual(False, retrain.should_distort_images(False, 0, 0, 0))
self.assertEqual(True, retrain.should_distort_images(True, 0, 0, 0))
self.assertEqual(True, retrain.should_distort_images(False, 10, 0, 0))
self.assertEqual(True, retrain.should_distort_images(False, 0, 1, 0))
self.assertEqual(True, retrain.should_distort_images(False, 0, 0, 50))
def testAddInputDistortions(self):
with tf.Graph().as_default():
with tf.Session() as sess:
retrain.add_input_distortions(True, 10, 10, 10, 299, 299, 3, 128, 128)
self.assertIsNotNone(sess.graph.get_tensor_by_name('DistortJPGInput:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('DistortResult:0'))
@tf.test.mock.patch.object(retrain, 'FLAGS', learning_rate=0.01)
def testAddFinalTrainingOps(self, flags_mock):
with tf.Graph().as_default():
with tf.Session() as sess:
bottleneck = tf.placeholder(
tf.float32, [1, 1024],
name='bottleneck')
retrain.add_final_training_ops(5, 'final', bottleneck, 1024)
self.assertIsNotNone(sess.graph.get_tensor_by_name('final:0'))
def testAddEvaluationStep(self):
with tf.Graph().as_default():
final = tf.placeholder(tf.float32, [1], name='final')
gt = tf.placeholder(tf.float32, [1], name='gt')
self.assertIsNotNone(retrain.add_evaluation_step(final, gt))
def testLabelImage(self):
image_filename = ('../label_image/data/grace_hopper.jpg')
# Load some default data
label_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/labels.txt')
labels = label_image.load_labels(label_path)
self.assertEqual(len(labels), 3)
image_path = os.path.join(tf.resource_loader.get_data_files_path(),
image_filename)
image = label_image.load_image(image_path)
self.assertEqual(len(image), 61306)
# Create trivial graph; note that the two nodes don't meet
with tf.Graph().as_default():
jpeg = tf.constant(image)
# Input node that doesn't lead anywhere.
tf.image.decode_jpeg(jpeg, name='DecodeJpeg')
# Output node, that always outputs a constant.
tf.constant([[10, 30, 5]], name='final')
# As label_image outputs via print, we assume that
# if it returns, everything is OK.
result = label_image.run_graph(image, labels, jpeg, 'final:0', 3)
self.assertEqual(result, 0)
def testAddJpegDecoding(self):
with tf.Graph().as_default():
jpeg_data, mul_image = retrain.add_jpeg_decoding(10, 10, 3, 0, 255)
self.assertIsNotNone(jpeg_data)
self.assertIsNotNone(mul_image)
def testCreateModelInfo(self):
did_raise_value_error = False
try:
retrain.create_model_info('no_such_model_name')
except ValueError:
did_raise_value_error = True
self.assertTrue(did_raise_value_error)
model_info = retrain.create_model_info('inception_v3')
self.assertIsNotNone(model_info)
self.assertEqual(299, model_info['input_width'])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
MehdiSfr/tensor-flow | tensorflow/tensorboard/float_wrapper.py | 27 | 1776 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module providing a function for serializing JSON values with Infinity.
Python provides no way to override how json.dumps serializes
Infinity/-Infinity/NaN; if allow_nan is true, it encodes them as
Infinity/-Infinity/NaN, in violation of the JSON spec and in violation of what
JSON.parse accepts. If it's false, it throws a ValueError, Neither subclassing
JSONEncoder nor passing a function in the |default| keyword argument overrides
this.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
def WrapSpecialFloats(obj):
"""Replaces all instances of Infinity/-Infinity/NaN with strings."""
if obj == float('inf'):
return 'Infinity'
elif obj == float('-inf'):
return '-Infinity'
elif isinstance(obj, float) and math.isnan(obj):
return 'NaN'
elif isinstance(obj, list) or isinstance(obj, tuple):
return list(map(WrapSpecialFloats, obj))
elif isinstance(obj, dict):
return {
WrapSpecialFloats(k): WrapSpecialFloats(v)
for k, v in obj.items()
}
else:
return obj
| apache-2.0 |
evidation-health/bokeh | websocket_worker.py | 44 | 1122 | #!/usr/bin/env python
import argparse
from bokeh.server.websocket import make_app
import logging
def build_parser():
parser = argparse.ArgumentParser(description="start bokeh websocket")
parser.add_argument("--url-prefix",
help="url prefix",
type=str,
default=None
)
parser.add_argument("--zmqaddr",
help="zmq url",
action='append'
)
parser.add_argument("--ws-port",
help="port for websocket worker",
default=5007,
type=int
)
return parser
def run_args(args):
#dont' know how to do default args with append and argparse
if args.zmqaddr is None:
args.zmqaddr = ["tcp://127.0.0.1:5007"]
app = make_app(args.url_prefix, args.zmqaddr, args.ws_port)
try:
app.start()
finally:
app.stop()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = build_parser()
args = parser.parse_args()
run_args(args)
| bsd-3-clause |
bsalimi/myria | jsonQueries/multiwayJoin_shumo/plan_fragment.py | 2 | 7206 | #!/usr/bin/env python
import json
def pretty_json(obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ':'))
def twitter_small_relation_key():
relation_key = {
"userName" : "chushumo",
"programName" : "multiway_join",
"relationName" : "twitter_small"
}
return relation_key
def scan_R_then_shuffle():
scan = {
"opType" : "TableScan",
"opId" : "Scan(R)",
"relationKey" : twitter_small_relation_key()
}
hyper_shuffle = {
"opType" : "HyperShuffleProducer",
"opId" : "HyperShuffle(R)",
"argChild" : "Scan(R)",
"argOperatorId" : "hash(follower)",
"fieldIndexes" : [0],
"hyperCubeDimensions" : [2,2],
"cellPartition": [ [0,1],[2,3] ]
}
fragment = {
"operators" : [scan, hyper_shuffle]
}
return fragment
def scan_S_then_shuffle():
scan = {
"opType" : "TableScan",
"opId" : "Scan(S)",
"relationKey" : twitter_small_relation_key()
}
hyper_shuffle = {
"opType" : "HyperShuffleProducer",
"opId" : "HyperShuffle(S)",
"argChild" : "Scan(S)",
"argOperatorId" : "hash(followee)",
"fieldIndexes" : [1],
"hyperCubeDimensions" : [2,2],
"cellPartition": [ [0,2],[1,3] ]
}
fragment = {
"operators" : [scan, hyper_shuffle]
}
return fragment
def receive_then_join():
gatherR = {
"opType" : "HyperShuffleConsumer",
"opId" : "GatherR",
"argOperatorId" : "hash(follower)",
"arg_schema" : {
"columnTypes" : ["LONG_TYPE", "LONG_TYPE"],
"columnNames" : ["follower", "followee"]
}
}
gatherS = {
"opType" : "HyperShuffleConsumer",
"opId" : "GatherS",
"argOperatorId" : "hash(followee)",
"arg_schema" : {
"columnTypes" : ["LONG_TYPE", "LONG_TYPE"],
"columnNames" : ["follower", "followee"]
}
}
join = {
"opType" : "SymmetricHashJoin",
"opId" : "Join",
"argChild1" : "GatherR",
"argChild2" : "GatherS",
"argColumns1" : [1],
"argColumns2" : [0],
"argSelect1" : [0],
"argSelect2" : [1]
}
collect = {
"argChild": "Join",
"argOperatorId": "collect",
"opId": "SendResult",
"opType": "CollectProducer"
}
fragment = {
"operators": [gatherR, gatherS, join, collect]
}
return fragment
def collect_result():
gather = {
"argOperatorId": "collect",
"arg_schema": {
"columnNames": [
"follower",
"followee"
],
"columnTypes": [
"LONG_TYPE",
"LONG_TYPE"
]
},
"opId": "CollectResult",
"opType": "CollectConsumer"
}
insert = {
"argChild": "CollectResult",
"argOverwriteTable": True,
"opId": "Insert",
"opType": "DbInsert",
"relationKey": {
"programName": "multiway_join",
"relationName": "twitter_small_join_twitter_small",
"userName": "chushumo"
}
}
fragment = {
"operators": [ gather, insert],
"overrideWorkers" : [ 1 ]
}
return fragment
def two_dimension_multiway_join():
fragments = [scan_R_then_shuffle(), scan_S_then_shuffle(), receive_then_join(), collect_result()]
whole_plan = {
"fragments":fragments,
"logicalRa" : "two dimension multiway join",
"rawQuery" : "two dimension multiway join"
}
return whole_plan
def scan_R_then_partition():
scan = {
"opType" : "TableScan",
"opId" : "Scan(R)",
"relationKey" : twitter_small_relation_key()
}
shuffle = {
"opType" : "ShuffleProducer",
"opId" : "Shuffle(R)",
"argChild" : "Scan(R)",
"argOperatorId" : "hash(followee)",
"argPf" :
{
"type" : "SingleFieldHash",
"index" : 1
}
}
fragment = {
"operators" : [scan, shuffle]
}
return fragment
def scan_S_then_partition():
scan = {
"opType" : "TableScan",
"opId" : "Scan(S)",
"relationKey" : twitter_small_relation_key()
}
shuffle = {
"opType" : "ShuffleProducer",
"opId" : "Shuffle(S)",
"argChild" : "Scan(S)",
"argOperatorId" : "hash(follower)",
"argPf" :
{
"type" : "SingleFieldHash",
"index" : 0
}
}
fragment = {
"operators" : [scan, shuffle]
}
return fragment
def receive_partition_then_join():
gatherR = {
"opType" : "ShuffleConsumer",
"opId" : "GatherR",
"argOperatorId" : "hash(followee)",
"arg_schema" : {
"columnTypes" : ["LONG_TYPE", "LONG_TYPE"],
"columnNames" : ["follower", "followee"]
}
}
gatherS = {
"opType" : "ShuffleConsumer",
"opId" : "GatherS",
"argOperatorId" : "hash(follower)",
"arg_schema" : {
"columnTypes" : ["LONG_TYPE", "LONG_TYPE"],
"columnNames" : ["follower", "followee"]
}
}
join = {
"opType" : "SymmetricHashJoin",
"opId" : "Join",
"argChild1" : "GatherR",
"argChild2" : "GatherS",
"argColumns1" : [1],
"argColumns2" : [0],
"argSelect1" : [0],
"argSelect2" : [1]
}
collect = {
"argChild": "Join",
"argOperatorId": "collect",
"opId": "SendResult",
"opType": "CollectProducer"
}
fragment = {
"operators": [gatherR, gatherS, join, collect]
}
return fragment
def collect_partition_join_result():
gather = {
"argOperatorId": "collect",
"arg_schema": {
"columnNames": [
"follower",
"followee"
],
"columnTypes": [
"LONG_TYPE",
"LONG_TYPE"
]
},
"opId": "CollectResult",
"opType": "CollectConsumer"
}
insert = {
"argChild": "CollectResult",
"argOverwriteTable": True,
"opId": "Insert",
"opType": "DbInsert",
"relationKey": {
"programName": "multiway_join",
"relationName": "twitter_small_partition_join_twitter_small",
"userName": "chushumo"
}
}
fragment = {
"operators": [gather, insert],
"overrideWorkers" : [ 1 ]
}
return fragment
# as a baseline to validate
def partition_join():
fragments = [scan_R_then_partition(), scan_S_then_partition(), receive_partition_then_join(), collect_partition_join_result()]
whole_plan = {
"fragments":fragments,
"logicalRa" : "partition join",
"rawQuery" : "parittion join"
}
return whole_plan
#print pretty_json(two_dimension_multiway_join())
print pretty_json(partition_join())
| bsd-3-clause |
askeing/servo | tests/wpt/web-platform-tests/webdriver/tests/actions/special_keys.py | 9 | 1725 | # META: timeout=long
import pytest
import time
from tests.actions.support.keys import ALL_EVENTS, Keys
from tests.actions.support.refine import filter_dict, get_keys, get_events
@pytest.mark.parametrize("name,expected", ALL_EVENTS.items())
def test_webdriver_special_key_sends_keydown(session,
key_reporter,
key_chain,
name,
expected):
if name.startswith("F"):
# Prevent default behavior for F1, etc., but only after keydown
# bubbles up to body. (Otherwise activated browser menus/functions
# may interfere with subsequent tests.)
session.execute_script("""
document.body.addEventListener("keydown",
function(e) { e.preventDefault() });
""")
key_chain.key_down(getattr(Keys, name)).perform()
# only interested in keydown
first_event = get_events(session)[0]
# make a copy so we can throw out irrelevant keys and compare to events
expected = dict(expected)
del expected["value"]
# check and remove keys that aren't in expected
assert first_event["type"] == "keydown"
assert first_event["repeat"] == False
first_event = filter_dict(first_event, expected)
if first_event["code"] == None:
del first_event["code"]
del expected["code"]
assert first_event == expected
# only printable characters should be recorded in input field
entered_keys = get_keys(key_reporter)
if len(expected["key"]) == 1:
assert entered_keys == expected["key"]
else:
assert len(entered_keys) == 0
| mpl-2.0 |
euronautic/cosmos | code/graph-algorithms/transitive_closure_graph/transitive_closure_graph.py | 5 | 1457 | # Python program to print transitive closure of a graph
# Part of Cosmos by OpenGenus Foundation
from collections import defaultdict
# This class represents a directed graph using adjacency list representation
class Graph:
def __init__(self,vertices):
# No. of vertices
self.V= vertices
# default dictionary to store graph
self.graph= defaultdict(list)
# To store transitive closure
self.tc = [[0 for j in range(self.V)] for i in range(self.V)]
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
# A recursive DFS traversal function that finds
# all reachable vertices for s
def DFSUtil(self,s,v):
# Mark reachability from s to v as true.
self.tc[s][v] = 1
# Find all the vertices reachable through v
for i in self.graph[v]:
if self.tc[s][i]==0:
self.DFSUtil(s,i)
# The function to find transitive closure. It uses
# recursive DFSUtil()
def transitiveClosure(self):
# Call the recursive function to print DFS
# traversal starting from all vertices one by one
for i in range(self.V):
self.DFSUtil(i, i)
print (self.tc)
# Create a graph
g = Graph(4)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
print ("Transitive closure matrix is")
g.transitiveClosure();
| gpl-3.0 |
limingzhou/aliyun-cli | aliyuncli/response.py | 11 | 8222 | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
# -*- coding:utf-8 -*-
import sys
import json
import text
from table import MultiTable
import jmespath
class Response(object):
def __init__(self,args):
self.args = args
def __call__(self, command,response, stream=None):
if stream is None:
stream = sys.stdout
if _has_filter_param(self.args)[0]:
filter_value =_has_filter_param(self.args)[1]
expression = jmespath.compile(filter_value)
response = expression.search(response)
try:
self._format_response(command, response, stream)
except IOError as e:
pass
finally:
self._flush_stream(stream)
def _flush_stream(self, stream):
try:
stream.flush()
except IOError:
pass
class JSONResponse(Response):
def _format_response (self, command,response,stream=None):
if stream is None :
stream = sys.stdout
if response:
json.dump(response,stream, indent=4)
stream.write('\n')
class TextResponse (Response):
def __call__(self, command, response, stream=None):
if stream is None:
stream = sys.stdout
try:
self._format_response(response, stream)
finally:
# flush is needed to avoid the "close failed in file object
# destructor" in python2.x (see http://bugs.python.org/issue11380).
self._flush_stream(stream)
def _format_response(self, response, stream):
if _has_filter_param(self.args)[0]:
filter_value =_has_filter_param(self.args)[1]
expression = jmespath.compile(filter_value)
response = expression.search(response)
text.format_text(response, stream)
class TableResponse (Response):
def __init__(self, args, table=None):
super(TableResponse, self).__init__(args)
self.table = MultiTable(initial_section=False,
column_separator='|')
def _format_response(self, command, response, stream):
if self._build_table(command, response):
try:
self.table.render(stream)
except IOError:
# If they're piping stdout to another process which exits before
# we're done writing all of our output, we'll get an error about a
# closed pipe which we can safely ignore.
pass
def _build_table(self, title, current, indent_level=0):
if not current:
return False
if title is not None:
self.table.new_section(title, indent_level=indent_level)
if isinstance(current, list):
if isinstance(current[0], dict):
self._build_sub_table_from_list(current, indent_level, title)
else:
for item in current:
if self._scalar_type(item):
self.table.add_row([item])
elif all(self._scalar_type(el) for el in item):
self.table.add_row(item)
else:
self._build_table(title=None, current=item)
if isinstance(current, dict):
# Render a single row section with keys as header
# and the row as the values, unless the value
# is a list.
self._build_sub_table_from_dict(current, indent_level)
return True
def _build_sub_table_from_dict(self, current, indent_level):
# Render a single row section with keys as header
# and the row as the values, unless the value
# is a list.
headers, more = self._group_scalar_keys(current)
if len(headers) == 1:
# Special casing if a dict has a single scalar key/value pair.
self.table.add_row([headers[0], current[headers[0]]])
elif headers:
self.table.add_row_header(headers)
self.table.add_row([current[k] for k in headers])
for remaining in more:
self._build_table(remaining, current[remaining],indent_level=indent_level + 1)
def _build_sub_table_from_list(self, current, indent_level, title):
headers, more = self._group_scalar_keys_from_list(current)
self.table.add_row_header(headers)
first = True
for element in current:
if not first and more:
self.table.new_section(title,
indent_level=indent_level)
self.table.add_row_header(headers)
first = False
# Use .get() to account for the fact that sometimes an element
# may not have all the keys from the header.
self.table.add_row([element.get(header, '') for header in headers])
for remaining in more:
# Some of the non scalar attributes may not necessarily
# be in every single element of the list, so we need to
# check this condition before recursing.
if remaining in element:
self._build_table(remaining, element[remaining],
indent_level=indent_level + 1)
def _scalar_type(self, element):
return not isinstance(element, (list, dict))
def _group_scalar_keys_from_list(self, list_of_dicts):
# We want to make sure we catch all the keys in the list of dicts.
# Most of the time each list element has the same keys, but sometimes
# a list element will have keys not defined in other elements.
headers = set()
more = set()
for item in list_of_dicts:
current_headers, current_more = self._group_scalar_keys(item)
headers.update(current_headers)
more.update(current_more)
headers = list(sorted(headers))
more = list(sorted(more))
return headers, more
def _group_scalar_keys(self, current):
# Given a dict, separate the keys into those whose values are
# scalar, and those whose values aren't. Return two lists,
# one is the scalar value keys, the second is the remaining keys.
more = []
headers = []
for element in current:
if self._scalar_type(current[element]):
headers.append(element)
else:
more.append(element)
headers.sort()
more.sort()
return headers, more
def _has_filter_param(args):
has = False
param =None
if isinstance(args,dict):
value = args.get('filter')
if isinstance(value,list) and len(value)>0:
param=value[0]
param = param.strip()
if len(param) >0:
has=True
return [has,param]
def get_response (output_type,parsed_args):
if output_type == None :
output_type = 'JSON'
output_type = output_type.lower()
if output_type == 'json':
return JSONResponse(parsed_args)
elif output_type == 'text':
return TextResponse(parsed_args)
elif output_type == 'table':
return TableResponse(parsed_args)
raise ValueError("Unknown output type: %s" % output_type)
def display_response(command, response,output,parsed_globals=None):
if output is None:
output = 'JSON'
formatter = get_response(output, parsed_globals)
formatter(command, response)
| apache-2.0 |
krzychb/rtd-test-bed | examples/protocols/asio/tcp_echo_server/asio_tcp_server_test.py | 1 | 2110 | import re
import os
import sys
import socket
try:
import IDF
except ImportError:
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_asio_tcp_server(env, extra_data):
"""
steps: |
1. join AP
2. Start server
3. Test connects to server and sends a test message
4. Test evaluates received test message from server
5. Test evaluates received test message on server stdout
"""
test_msg = b"echo message from client to server"
dut1 = env.get_dut("tcp_echo_server", "examples/protocols/asio/tcp_echo_server")
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "asio_tcp_echo_server.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("asio_tcp_echo_server_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("asio_tcp_echo_server_size", bin_size // 1024)
# 1. start test
dut1.start_app()
# 2. get the server IP address
data = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
# 3. create tcp client and connect to server
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cli.settimeout(30)
cli.connect((data[0], 2222))
cli.send(test_msg)
data = cli.recv(1024)
# 4. check the message received back from the server
if (data == test_msg):
print("PASS: Received correct message")
pass
else:
print("Failure!")
raise ValueError('Wrong data received from asi tcp server: {} (expected:{})'.format(data, test_msg))
# 5. check the client message appears also on server terminal
dut1.expect(test_msg.decode())
if __name__ == '__main__':
test_examples_protocol_asio_tcp_server()
| apache-2.0 |
bob-white/UnityIronPythonConsole | Assets/IronPythonConsole/Plugins/Lib/unittest/case.py | 41 | 42078 | """Test case implementation"""
import collections
import sys
import functools
import difflib
import pprint
import re
import warnings
from . import result
from .util import (
strclass, safe_repr, unorderable_list_difference,
_count_diff_all_purpose, _count_diff_hashable
)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
pass
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
pass
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = False
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80*8
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, self.assertDictEqual)
self.addTypeEqualityFunc(list, self.assertListEqual)
self.addTypeEqualityFunc(tuple, self.assertTupleEqual)
self.addTypeEqualityFunc(set, self.assertSetEqual)
self.addTypeEqualityFunc(frozenset, self.assertSetEqual)
self.addTypeEqualityFunc(unicode, self.assertMultiLineEqual)
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest as e:
self._addSkip(result, str(e))
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except KeyboardInterrupt:
raise
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure as e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, sys.exc_info())
except SkipTest as e:
self._addSkip(result, str(e))
except:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
success = False
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self)
if callableObj is None:
return context
with context:
callableObj(*args, **kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
Counter(iter(expected_seq)))
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(actual_seq), list(expected_seq)
with warnings.catch_warnings():
if sys.py3kwarning:
# Silence Py3k warning raised during the sorting
for _msg in ["(code|dict|type) inequality comparisons",
"builtin_function_or_method order comparisons",
"comparing unequal types"]:
warnings.filterwarnings("ignore", _msg, DeprecationWarning)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, basestring,
'First argument is not a string')
self.assertIsInstance(second, basestring,
'Second argument is not a string')
if first != second:
firstlines = first.splitlines(True)
secondlines = second.splitlines(True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, expected_regexp)
if callable_obj is None:
return context
with context:
callable_obj(*args, **kwargs)
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| mpl-2.0 |
tmxdyf/CouchPotatoServer | libs/pyutil/platformutil.py | 106 | 3607 | # Thanks to Daenyth for help porting this to Arch Linux.
import os, platform, re, subprocess
_distributor_id_cmdline_re = re.compile("(?:Distributor ID:)\s*(.*)", re.I)
_release_cmdline_re = re.compile("(?:Release:)\s*(.*)", re.I)
_distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I)
_release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I)
global _distname,_version
_distname = None
_version = None
def get_linux_distro():
""" Tries to determine the name of the Linux OS distribution name.
First, try to parse a file named "/etc/lsb-release". If it exists, and
contains the "DISTRIB_ID=" line and the "DISTRIB_RELEASE=" line, then return
the strings parsed from that file.
If that doesn't work, then invoke platform.dist().
If that doesn't work, then try to execute "lsb_release", as standardized in
2001:
http://refspecs.freestandards.org/LSB_1.0.0/gLSB/lsbrelease.html
The current version of the standard is here:
http://refspecs.freestandards.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/lsbrelease.html
that lsb_release emitted, as strings.
Returns a tuple (distname,version). Distname is what LSB calls a
"distributor id", e.g. "Ubuntu". Version is what LSB calls a "release",
e.g. "8.04".
A version of this has been submitted to python as a patch for the standard
library module "platform":
http://bugs.python.org/issue3937
"""
global _distname,_version
if _distname and _version:
return (_distname, _version)
try:
etclsbrel = open("/etc/lsb-release", "rU")
for line in etclsbrel:
m = _distributor_id_file_re.search(line)
if m:
_distname = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
m = _release_file_re.search(line)
if m:
_version = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
except EnvironmentError:
pass
(_distname, _version) = platform.dist()[:2]
if _distname and _version:
return (_distname, _version)
try:
p = subprocess.Popen(["lsb_release", "--all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rc = p.wait()
if rc == 0:
for line in p.stdout.readlines():
m = _distributor_id_cmdline_re.search(line)
if m:
_distname = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
m = _release_cmdline_re.search(p.stdout.read())
if m:
_version = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
except EnvironmentError:
pass
if os.path.exists("/etc/arch-release"):
return ("Arch_Linux", "")
return (_distname,_version)
def get_platform():
# Our version of platform.platform(), telling us both less and more than the
# Python Standard Library's version does.
# We omit details such as the Linux kernel version number, but we add a
# more detailed and correct rendition of the Linux distribution and
# distribution-version.
if "linux" in platform.system().lower():
return platform.system()+"-"+"_".join(get_linux_distro())+"-"+platform.machine()+"-"+"_".join([x for x in platform.architecture() if x])
else:
return platform.platform()
| gpl-3.0 |
ytaben/cyphesis | rulesets/deeds/world/tasks/Combat.py | 3 | 6055 | #This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2005-2006 Al Riddoch (See the file COPYING for details).
from atlas import *
from physics import *
from physics import Quaternion
from physics import Vector3D
from random import *
import server
class Combat(server.Task):
"""A very simple combat system example."""
def attack_operation(self, op):
""" The attack op is FROM the the character that initiated combat which
we term the attacker, TO the character that is attacker which we
term the defender. We store the IDs of both. """
# Check if the attacked characters stamina is too low for combat
if self.character.stamina < 0.1:
# print "Aborting defender stamina low"
self.irrelevant()
return
assert(op.from_ != op.to)
if op.to != self.character.id:
self.oponent = op.to
# print "Attack operation is not to this character"
# We have initiative
else:
self.oponent = op.from_
# print "Attack operation is to this character"
self.surprise = True
# We do not have initiative
# Attach this task to the attacker. Its already implicitly attached
# to the defender who owns this task.
a=server.world.get_object(self.oponent)
# Check if the attacking characters stamina is too low for combat
if not a or a.stamina < 0.1:
self.irrelevant()
return
# a.set_task(self.cppthing)
self.square_range = 25
def tick_operation(self, op):
""" This method is called repeatedly, each time a combat turn occurs.
In this example the interval is fixed, but it can be varied.
self.attacker is the ID of the character that initiated the combat
self.defender is the ID of the character that was initially
attacked The self.attack flag is used to alternate the attack from
one combatant to the other. """
# if self.count() < 2:
# print "Someone has dropped out"
# self.irrelevant()
# return
assert(self.character.id == op.to)
if self.character.stamina <= 0:
# print "I am exhausted"
self.irrelevant()
return
attacker = self.character
if not attacker:
sys.stderr.write("Attacker owning combat task destroyed, but task still running")
self.irrelevant()
return
if attacker.stamina <= 0:
# print "Attacker exhausted"
self.irrelevant()
return
defender = server.world.get_object(self.oponent)
if not defender:
# print "No defender"
self.irrelevant()
return
if hasattr(self, 'surprise') and self.surprise:
# print 'Surprised!'
self.surprise = False
return self.next_tick(0.75 + uniform(0,0.25))
if square_distance(self.character.location, defender.location) > self.square_range:
return self.next_tick(1.75 + uniform(0,0.25))
a=self.character.id
d=self.oponent
# A very simple formula is used to determine the damage done
damage = (attacker.statistics.attack / defender.statistics.defence) / uniform(2,10)
# Damage is counted against stamina, to ensure combat is non lethal,
# and make recovery easier.
stamina=defender.stamina-damage
if stamina<0: stamina=0
set_arg=Entity(self.oponent, stamina=stamina)
# We send 3 operations to indicate what is going on. The imginary ops
# provide emotes for the actions. The sight(attack) operation
# indicates that a singleshot animation of attacking should be
# triggered on the attacker.
attacker.send_world(Operation("imaginary", Entity(description="hits for massive damage."), to=attacker))
attacker.send_world(Operation("sight", Operation("attack", to=d, from_=a)))
defender.send_world(Operation("imaginary", Entity(description="defends skillfully."), to=defender))
# If the defenders stamina has reached zero, combat is over, and emotes
# are sent to indicate this.
if stamina <= 0:
set_arg.status = defender.status - 0.1
defender.send_world(Operation("imaginary", Entity(description="has been defeated"), to=defender))
defender.send_world(Operation("sight", Operation("collapse", from_=d)))
attacker.send_world(Operation("imaginary", Entity(description="is victorious"), to=attacker))
self.irrelevant()
res=Oplist()
# This set op makes the change to defenders stamina, and a small health
# change if they have been defeated
res.append(Operation("set", set_arg, to=defender))
# Turn the attacker to face the defender. This has to go through
# the mind2body interface, so it does not interrupt what the
# the character is doing.
faceop=self.face(defender)
if faceop:
faceop=attacker.mind2body(faceop)
if faceop:
res.append(faceop)
# Don't return the following tick op if this task is now complete
if self.obsolete():
return res
# Schedule a new tick op
res.append(self.next_tick(1.75 + uniform(0,0.25)))
return res
def face(self, other):
""" Turn to face that another character, ensuring that
we are facing the character we are hitting """
vector = distance_to(self.character.location, other.location)
vector.z = 0
if vector.square_mag() < 0.1:
return
vector = vector.unit_vector()
newloc = Location(self.character.location.parent)
newloc.orientation = Quaternion(Vector3D(1,0,0), vector)
return Operation("move", Entity(self.character.id, location=newloc))
| gpl-2.0 |
sargas/scipy | scipy/weave/examples/binary_search.py | 3 | 6766 | # Offers example of inline C for binary search algorithm.
# Borrowed from Kalle Svensson in the Python Cookbook.
# The results are nearly in the "not worth it" catagory.
#
# C:\home\ej\wrk\scipy\compiler\examples>python binary_search.py
# Binary search for 3000 items in 100000 length list of integers:
# speed in python: 0.139999985695
# speed in c: 0.0900000333786
# speed up: 1.41
# search(a,3450) 3450 3450
# search(a,-1) -1 -1
# search(a,10001) 10001 10001
#
# Note -- really need to differentiate between conversion errors and
# run time errors. This would reduce useless compiles and provide a
# more intelligent control of things.
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
#from compiler import inline_tools
import scipy.weave.inline_tools as inline_tools
from bisect import bisect_left as bisect
import types
def c_int_search(seq,t,chk=1):
# do partial type checking in Python.
# checking that list items are ints should happen in py_to_scalar<int>
#if chk:
# assert(type(t) is int)
# assert(type(seq) is list)
code = """
#line 33 "binary_search.py"
if (!PyList_Check(py_seq))
py::fail(PyExc_TypeError, "seq must be a list");
if (!PyInt_Check(py_t))
py::fail(PyExc_TypeError, "t must be an integer");
int val, m, min = 0;
int max = seq.len()- 1;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = py_to_int(PyList_GET_ITEM(py_seq,m),"val");
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
#return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose = 2)
def c_int_search_scxx(seq,t,chk=1):
# do partial type checking in Python.
# checking that list items are ints should happen in py_to_scalar<int>
if chk:
assert(type(t) is int)
assert(type(seq) is list)
code = """
#line 67 "binary_search.py"
int val, m, min = 0;
int max = seq.len()- 1;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = seq[m];
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
#return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose = 2)
try:
from numpy import *
def c_array_int_search(seq,t):
code = """
#line 62 "binary_search.py"
int val, m, min = 0;
int max = Nseq[0] - 1;
PyObject *py_val;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = seq[m];
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
#return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose = 2,
extra_compile_args=['-O2','-G6'])
except:
pass
def py_int_search(seq, t):
min = 0; max = len(seq) - 1
while 1:
if max < min:
return -1
m = (min + max) / 2
if seq[m] < t:
min = m + 1
elif seq[m] > t:
max = m - 1
else:
return m
import time
def search_compare(a,n):
print('Binary search for %d items in %d length list of integers:'%(n,m))
t1 = time.time()
for i in range(n):
py_int_search(a,i)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1))
# bisect
t1 = time.time()
for i in range(n):
bisect(a,i)
t2 = time.time()
bi = (t2-t1) +1e-20 # protect against div by zero
print(' speed of bisect:', bi)
print(' speed up: %3.2f' % (py/bi))
# get it in cache
c_int_search(a,i)
t1 = time.time()
for i in range(n):
c_int_search(a,i,chk=1)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c:',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search(a,i)
t1 = time.time()
for i in range(n):
c_int_search(a,i,chk=0)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c(no asserts):',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search_scxx(a,i)
t1 = time.time()
for i in range(n):
c_int_search_scxx(a,i,chk=1)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed for scxx:',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search_scxx(a,i)
t1 = time.time()
for i in range(n):
c_int_search_scxx(a,i,chk=0)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed for scxx(no asserts):',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
a = array(a)
try:
a = array(a)
c_array_int_search(a,i)
t1 = time.time()
for i in range(n):
c_array_int_search(a,i)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c(numpy arrays):',sp)
print(' speed up: %3.2f' % (py/sp))
except:
pass
if __name__ == "__main__":
# note bisect returns index+1 compared to other algorithms
m= 100000
a = range(m)
n = 50000
search_compare(a,n)
print('search(a,3450)', c_int_search(a,3450), py_int_search(a,3450), bisect(a,3450))
print('search(a,-1)', c_int_search(a,-1), py_int_search(a,-1), bisect(a,-1))
print('search(a,10001)', c_int_search(a,10001), py_int_search(a,10001),bisect(a,10001))
| bsd-3-clause |
dvliman/jaikuengine | .google_appengine/lib/django-1.5/django/conf/urls/__init__.py | 116 | 2538 | from django.core.urlresolvers import (RegexURLPattern,
RegexURLResolver, LocaleRegexURLResolver)
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils import six
__all__ = ['handler403', 'handler404', 'handler500', 'include', 'patterns', 'url']
handler403 = 'django.views.defaults.permission_denied'
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
def include(arg, namespace=None, app_name=None):
if isinstance(arg, tuple):
# callable returning a namespace hint
if namespace:
raise ImproperlyConfigured('Cannot override the namespace for a dynamic module that provides a namespace')
urlconf_module, app_name, namespace = arg
else:
# No namespace hint - use manually provided namespace
urlconf_module = arg
if isinstance(urlconf_module, six.string_types):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
# Make sure we can iterate through the patterns (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
# Test if the LocaleRegexURLResolver is used within the include;
# this should throw an error since this is not allowed!
if isinstance(url_pattern, LocaleRegexURLResolver):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.')
return (urlconf_module, app_name, namespace)
def patterns(prefix, *args):
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list,tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
| apache-2.0 |
asadziach/tensorflow | tensorflow/contrib/slim/python/slim/nets/inception.py | 165 | 1697 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Brings inception_v1, inception_v2 and inception_v3 under one namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1_arg_scope
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1_base
from tensorflow.contrib.slim.python.slim.nets.inception_v2 import inception_v2
from tensorflow.contrib.slim.python.slim.nets.inception_v2 import inception_v2_arg_scope
from tensorflow.contrib.slim.python.slim.nets.inception_v2 import inception_v2_base
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_arg_scope
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base
# pylint: enable=unused-import
| apache-2.0 |
maartenq/ansible | lib/ansible/constants.py | 16 | 7696 | # Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ast import literal_eval
from jinja2 import Template
from string import ascii_letters, digits
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean, BOOLEANS_TRUE
from ansible.module_utils.six import string_types
from ansible.config.manager import ConfigManager, ensure_type, get_ini_config_value
def _warning(msg):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
try:
from __main__ import display
display.warning(msg)
except Exception:
import sys
sys.stderr.write(' [WARNING] %s\n' % (msg))
def _deprecated(msg, version='2.8'):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
try:
from __main__ import display
display.deprecated(msg, version=version)
except Exception:
import sys
sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
def mk_boolean(value):
''' moved to module_utils'''
_deprecated('ansible.constants.mk_boolean() is deprecated. Use ansible.module_utils.parsing.convert_bool.boolean() instead')
return boolean(value, strict=False)
def get_config(parser, section, key, env_var, default_value, value_type=None, expand_relative_paths=False):
''' kept for backwarsd compatibility, but deprecated '''
_deprecated('ansible.constants.get_config() is deprecated. There is new config API, see porting docs.')
value = None
# small reconstruction of the old code env/ini/default
value = os.environ.get(env_var, None)
if value is None:
try:
value = get_ini_config_value(parser, {'key': key, 'section': section})
except Exception:
pass
if value is None:
value = default_value
value = ensure_type(value, value_type)
return value
def set_constant(name, value, export=vars()):
''' sets constants and returns resolved options dict '''
export[name] = value
# CONSTANTS ### yes, actual ones
BECOME_METHODS = ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun', 'enable', 'machinectl']
BECOME_ERROR_STRINGS = {
'sudo': 'Sorry, try again.',
'su': 'Authentication failure',
'pbrun': '',
'pfexec': '',
'doas': 'Permission denied',
'dzdo': '',
'ksu': 'Password incorrect',
'pmrun': 'You are not permitted to run this command',
'enable': '',
'machinectl': '',
} # FIXME: deal with i18n
BECOME_MISSING_STRINGS = {
'sudo': 'sorry, a password is required to run sudo',
'su': '',
'pbrun': '',
'pfexec': '',
'doas': 'Authorization required',
'dzdo': '',
'ksu': 'No password given',
'pmrun': '',
'enable': '',
'machinectl': '',
} # FIXME: deal with i18n
BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
BOOL_TRUE = BOOLEANS_TRUE
CONTROLER_LANG = os.getenv('LANG', 'en_US.UTF-8')
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
# FIXME: expand to other plugins, but never doc fragments
CONFIGURABLE_PLUGINS = ('cache', 'callback', 'connection', 'inventory', 'lookup', 'shell', 'cliconf', 'httpapi')
# NOTE: always update the docs/docsite/Makefile to match
DOCUMENTABLE_PLUGINS = CONFIGURABLE_PLUGINS + ('module', 'strategy', 'vars')
IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES") # ignore during module search
INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
MODULE_REQUIRE_ARGS = ('command', 'win_command', 'shell', 'win_shell', 'raw', 'script')
MODULE_NO_JSON = ('command', 'win_command', 'shell', 'win_shell', 'raw')
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python')
TREE_DIR = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
# FIXME: remove once play_context mangling is removed
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
COMMON_CONNECTION_VARS = frozenset(('ansible_connection', 'ansible_host', 'ansible_user', 'ansible_shell_executable',
'ansible_port', 'ansible_pipelining', 'ansible_password', 'ansible_timeout',
'ansible_shell_type', 'ansible_module_compression', 'ansible_private_key_file'))
MAGIC_VARIABLE_MAPPING = dict(
# base
connection=('ansible_connection', ),
module_compression=('ansible_module_compression', ),
shell=('ansible_shell_type', ),
executable=('ansible_shell_executable', ),
# connection common
remote_addr=('ansible_ssh_host', 'ansible_host'),
remote_user=('ansible_ssh_user', 'ansible_user'),
password=('ansible_ssh_pass', 'ansible_password'),
port=('ansible_ssh_port', 'ansible_port'),
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
# networking modules
network_os=('ansible_network_os', ),
connection_user=('ansible_connection_user',),
# ssh TODO: remove
ssh_executable=('ansible_ssh_executable', ),
ssh_common_args=('ansible_ssh_common_args', ),
sftp_extra_args=('ansible_sftp_extra_args', ),
scp_extra_args=('ansible_scp_extra_args', ),
ssh_extra_args=('ansible_ssh_extra_args', ),
ssh_transfer_method=('ansible_ssh_transfer_method', ),
# docker TODO: remove
docker_extra_args=('ansible_docker_extra_args', ),
# become
become=('ansible_become', ),
become_method=('ansible_become_method', ),
become_user=('ansible_become_user', ),
become_pass=('ansible_become_password', 'ansible_become_pass'),
become_exe=('ansible_become_exe', ),
become_flags=('ansible_become_flags', ),
# deprecated
sudo=('ansible_sudo', ),
sudo_user=('ansible_sudo_user', ),
sudo_pass=('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe=('ansible_sudo_exe', ),
sudo_flags=('ansible_sudo_flags', ),
su=('ansible_su', ),
su_user=('ansible_su_user', ),
su_pass=('ansible_su_password', 'ansible_su_pass'),
su_exe=('ansible_su_exe', ),
su_flags=('ansible_su_flags', ),
)
# POPULATE SETTINGS FROM CONFIG ###
config = ConfigManager()
# Generate constants from config
for setting in config.data.get_settings():
value = setting.value
if setting.origin == 'default' and \
isinstance(setting.value, string_types) and \
(setting.value.startswith('{{') and setting.value.endswith('}}')):
try:
t = Template(setting.value)
value = t.render(vars())
try:
value = literal_eval(value)
except ValueError:
pass # not a python data structure
except Exception:
pass # not templatable
value = ensure_type(value, setting.type)
set_constant(setting.name, value)
for warn in config.WARNINGS:
_warning(warn)
| gpl-3.0 |
azumimuo/family-xbmc-addon | script.module.youtube.dl/lib/youtube_dl/extractor/hitrecord.py | 50 | 2269 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
float_or_none,
int_or_none,
try_get,
)
class HitRecordIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hitrecord\.org/records/(?P<id>\d+)'
_TEST = {
'url': 'https://hitrecord.org/records/2954362',
'md5': 'fe1cdc2023bce0bbb95c39c57426aa71',
'info_dict': {
'id': '2954362',
'ext': 'mp4',
'title': 'A Very Different World (HITRECORD x ACLU)',
'description': 'md5:e62defaffab5075a5277736bead95a3d',
'duration': 139.327,
'timestamp': 1471557582,
'upload_date': '20160818',
'uploader': 'Zuzi.C12',
'uploader_id': '362811',
'view_count': int,
'like_count': int,
'comment_count': int,
'tags': list,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'https://hitrecord.org/api/web/records/%s' % video_id, video_id)
title = video['title']
video_url = video['source_url']['mp4_url']
tags = None
tags_list = try_get(video, lambda x: x['tags'], list)
if tags_list:
tags = [
t['text']
for t in tags_list
if isinstance(t, dict) and t.get('text') and
isinstance(t['text'], compat_str)]
return {
'id': video_id,
'url': video_url,
'title': title,
'description': clean_html(video.get('body')),
'duration': float_or_none(video.get('duration'), 1000),
'timestamp': int_or_none(video.get('created_at_i')),
'uploader': try_get(
video, lambda x: x['user']['username'], compat_str),
'uploader_id': try_get(
video, lambda x: compat_str(x['user']['id'])),
'view_count': int_or_none(video.get('total_views_count')),
'like_count': int_or_none(video.get('hearts_count')),
'comment_count': int_or_none(video.get('comments_count')),
'tags': tags,
}
| gpl-2.0 |
CounterpartyXCP/counterparty-cli | counterpartycli/clientapi.py | 1 | 10729 | import sys
import logging
import binascii
from urllib.parse import quote_plus as urlencode
from counterpartylib.lib import config, script
from counterpartycli import util
from counterpartycli import wallet
from counterpartycli import messages
from counterpartycli.messages import get_pubkeys
logger = logging.getLogger()
DEFAULT_REQUESTS_TIMEOUT = 5 # seconds
class ConfigurationError(Exception):
pass
def initialize(testnet=False, testcoin=False, regtest=True, customnet="",
counterparty_rpc_connect=None, counterparty_rpc_port=None,
counterparty_rpc_user=None, counterparty_rpc_password=None,
counterparty_rpc_ssl=False, counterparty_rpc_ssl_verify=False,
wallet_name=None, wallet_connect=None, wallet_port=None,
wallet_user=None, wallet_password=None,
wallet_ssl=False, wallet_ssl_verify=False,
requests_timeout=DEFAULT_REQUESTS_TIMEOUT):
def handle_exception(exc_type, exc_value, exc_traceback):
logger.error("Unhandled Exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
# testnet
config.TESTNET = testnet or False
config.REGTEST = regtest or False
if len(customnet) > 0:
config.CUSTOMNET = True
config.REGTEST = True
else:
config.CUSTOMNET = False
# testcoin
config.TESTCOIN = testcoin or False
##############
# THINGS WE CONNECT TO
# Server host (Bitcoin Core)
config.COUNTERPARTY_RPC_CONNECT = counterparty_rpc_connect or 'localhost'
# Server RPC port (Bitcoin Core)
if counterparty_rpc_port:
config.COUNTERPARTY_RPC_PORT = counterparty_rpc_port
else:
if config.TESTNET:
config.COUNTERPARTY_RPC_PORT = config.DEFAULT_RPC_PORT_TESTNET
elif config.CUSTOMNET:
config.COUNTERPARTY_RPC_PORT = config.DEFAULT_RPC_PORT_REGTEST
elif config.REGTEST:
config.COUNTERPARTY_RPC_PORT = config.DEFAULT_RPC_PORT_REGTEST
else:
config.COUNTERPARTY_RPC_PORT = config.DEFAULT_RPC_PORT
try:
config.COUNTERPARTY_RPC_PORT = int(config.COUNTERPARTY_RPC_PORT)
if not (int(config.COUNTERPARTY_RPC_PORT) > 1 and int(config.COUNTERPARTY_RPC_PORT) < 65535):
raise ConfigurationError('invalid RPC port number')
except:
raise Exception("Please specific a valid port number counterparty-rpc-port configuration parameter")
# Server RPC user (Bitcoin Core)
config.COUNTERPARTY_RPC_USER = counterparty_rpc_user or 'rpc'
# Server RPC password (Bitcoin Core)
if counterparty_rpc_password:
config.COUNTERPARTY_RPC_PASSWORD = counterparty_rpc_password
else:
config.COUNTERPARTY_RPC_PASSWORD = None
# Server RPC SSL
config.COUNTERPARTY_RPC_SSL = counterparty_rpc_ssl or False # Default to off.
# Server RPC SSL Verify
config.COUNTERPARTY_RPC_SSL_VERIFY = counterparty_rpc_ssl_verify or False # Default to off (support self‐signed certificates)
# Construct server URL.
config.COUNTERPARTY_RPC = config.COUNTERPARTY_RPC_CONNECT + ':' + str(config.COUNTERPARTY_RPC_PORT)
if config.COUNTERPARTY_RPC_PASSWORD:
config.COUNTERPARTY_RPC = urlencode(config.COUNTERPARTY_RPC_USER) + ':' + urlencode(config.COUNTERPARTY_RPC_PASSWORD) + '@' + config.COUNTERPARTY_RPC
if config.COUNTERPARTY_RPC_SSL:
config.COUNTERPARTY_RPC = 'https://' + config.COUNTERPARTY_RPC
else:
config.COUNTERPARTY_RPC = 'http://' + config.COUNTERPARTY_RPC
config.COUNTERPARTY_RPC += '/rpc/'
# BTC Wallet name
config.WALLET_NAME = wallet_name or 'bitcoincore'
# BTC Wallet host
config.WALLET_CONNECT = wallet_connect or 'localhost'
# BTC Wallet port
if wallet_port:
config.WALLET_PORT = wallet_port
else:
if config.TESTNET:
config.WALLET_PORT = config.DEFAULT_BACKEND_PORT_TESTNET
elif config.CUSTOMNET:
config.WALLET_PORT = config.DEFAULT_BACKEND_PORT_REGTEST
elif config.REGTEST:
config.WALLET_PORT = config.DEFAULT_BACKEND_PORT_REGTEST
else:
config.WALLET_PORT = config.DEFAULT_BACKEND_PORT
try:
config.WALLET_PORT = int(config.WALLET_PORT)
if not (int(config.WALLET_PORT) > 1 and int(config.WALLET_PORT) < 65535):
raise ConfigurationError('invalid wallet API port number')
except:
raise ConfigurationError("Please specific a valid port number wallet-port configuration parameter")
# BTC Wallet user
config.WALLET_USER = wallet_user or 'bitcoinrpc'
# BTC Wallet password
if wallet_password:
config.WALLET_PASSWORD = wallet_password
else:
raise ConfigurationError('wallet RPC password not set. (Use configuration file or --wallet-password=PASSWORD)')
# BTC Wallet SSL
config.WALLET_SSL = wallet_ssl or False # Default to off.
# BTC Wallet SSL Verify
config.WALLET_SSL_VERIFY = wallet_ssl_verify or False # Default to off (support self‐signed certificates)
# Construct BTC wallet URL.
config.WALLET_URL = urlencode(config.WALLET_USER) + ':' + urlencode(config.WALLET_PASSWORD) + '@' + config.WALLET_CONNECT + ':' + str(config.WALLET_PORT)
if config.WALLET_SSL:
config.WALLET_URL = 'https://' + config.WALLET_URL
else:
config.WALLET_URL = 'http://' + config.WALLET_URL
config.REQUESTS_TIMEOUT = requests_timeout
# Encoding
if config.TESTCOIN:
config.PREFIX = b'XX' # 2 bytes (possibly accidentally created)
else:
config.PREFIX = b'CNTRPRTY' # 8 bytes
# (more) Testnet
if config.TESTNET:
config.MAGIC_BYTES = config.MAGIC_BYTES_TESTNET
if config.TESTCOIN:
config.ADDRESSVERSION = config.ADDRESSVERSION_TESTNET
config.P2SH_ADDRESSVERSION = config.P2SH_ADDRESSVERSION_TESTNET
config.BLOCK_FIRST = config.BLOCK_FIRST_TESTNET_TESTCOIN
config.BURN_START = config.BURN_START_TESTNET_TESTCOIN
config.BURN_END = config.BURN_END_TESTNET_TESTCOIN
config.UNSPENDABLE = config.UNSPENDABLE_TESTNET
else:
config.ADDRESSVERSION = config.ADDRESSVERSION_TESTNET
config.P2SH_ADDRESSVERSION = config.P2SH_ADDRESSVERSION_TESTNET
config.BLOCK_FIRST = config.BLOCK_FIRST_TESTNET
config.BURN_START = config.BURN_START_TESTNET
config.BURN_END = config.BURN_END_TESTNET
config.UNSPENDABLE = config.UNSPENDABLE_TESTNET
elif config.CUSTOMNET:
custom_args = customnet.split('|')
if len(custom_args) == 3:
config.MAGIC_BYTES = config.MAGIC_BYTES_REGTEST
config.ADDRESSVERSION = binascii.unhexlify(custom_args[1])
config.P2SH_ADDRESSVERSION = binascii.unhexlify(custom_args[2])
config.BLOCK_FIRST = config.BLOCK_FIRST_REGTEST
config.BURN_START = config.BURN_START_REGTEST
config.BURN_END = config.BURN_END_REGTEST
config.UNSPENDABLE = custom_args[0]
else:
raise "Custom net parameter needs to be like UNSPENDABLE_ADDRESS|ADDRESSVERSION|P2SH_ADDRESSVERSION (version bytes in HH format)"
elif config.REGTEST:
config.MAGIC_BYTES = config.MAGIC_BYTES_REGTEST
if config.TESTCOIN:
config.ADDRESSVERSION = config.ADDRESSVERSION_REGTEST
config.P2SH_ADDRESSVERSION = config.P2SH_ADDRESSVERSION_REGTEST
config.BLOCK_FIRST = config.BLOCK_FIRST_REGTEST_TESTCOIN
config.BURN_START = config.BURN_START_REGTEST_TESTCOIN
config.BURN_END = config.BURN_END_REGTEST_TESTCOIN
config.UNSPENDABLE = config.UNSPENDABLE_REGTEST
else:
config.ADDRESSVERSION = config.ADDRESSVERSION_REGTEST
config.P2SH_ADDRESSVERSION = config.P2SH_ADDRESSVERSION_REGTEST
config.BLOCK_FIRST = config.BLOCK_FIRST_REGTEST
config.BURN_START = config.BURN_START_REGTEST
config.BURN_END = config.BURN_END_REGTEST
config.UNSPENDABLE = config.UNSPENDABLE_REGTEST
else:
config.MAGIC_BYTES = config.MAGIC_BYTES_MAINNET
if config.TESTCOIN:
config.ADDRESSVERSION = config.ADDRESSVERSION_MAINNET
config.P2SH_ADDRESSVERSION = config.P2SH_ADDRESSVERSION_MAINNET
config.BLOCK_FIRST = config.BLOCK_FIRST_MAINNET_TESTCOIN
config.BURN_START = config.BURN_START_MAINNET_TESTCOIN
config.BURN_END = config.BURN_END_MAINNET_TESTCOIN
config.UNSPENDABLE = config.UNSPENDABLE_MAINNET
else:
config.ADDRESSVERSION = config.ADDRESSVERSION_MAINNET
config.P2SH_ADDRESSVERSION = config.P2SH_ADDRESSVERSION_MAINNET
config.BLOCK_FIRST = config.BLOCK_FIRST_MAINNET
config.BURN_START = config.BURN_START_MAINNET
config.BURN_END = config.BURN_END_MAINNET
config.UNSPENDABLE = config.UNSPENDABLE_MAINNET
WALLET_METHODS = [
'get_wallet_addresses', 'get_btc_balances', 'sign_raw_transaction',
'get_pubkey', 'is_valid', 'is_mine', 'get_btc_balance', 'send_raw_transaction',
'wallet', 'asset', 'balances', 'pending', 'is_locked', 'unlock', 'wallet_last_block',
'sweep'
]
def call(method, args, pubkey_resolver=None):
"""
Unified function to call Wallet and Server API methods
Should be used by applications like `counterparty-gui`
:Example:
import counterpartycli.clientapi
clientapi.initialize(...)
unsigned_hex = clientapi.call('create_send', {...})
signed_hex = clientapi.call('sign_raw_transaction', unsigned_hex)
tx_hash = clientapi.call('send_raw_transaction', signed_hex)
"""
if method in WALLET_METHODS:
func = getattr(wallet, method)
return func(**args)
else:
if method.startswith('create_'):
# Get provided pubkeys from params.
pubkeys = []
for address_name in ['source', 'destination']:
if address_name in args:
address = args[address_name]
if script.is_multisig(address) or address_name != 'destination': # We don’t need the pubkey for a mono‐sig destination.
pubkeys += get_pubkeys(address, pubkey_resolver=pubkey_resolver)
args['pubkey'] = pubkeys
result = util.api(method, args)
if method.startswith('create_'):
messages.check_transaction(method, args, result)
return result
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit |
lsqtongxin/django | tests/check_framework/test_templates.py | 288 | 1403 | from copy import deepcopy
from django.core.checks.templates import E001
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckTemplateSettingsAppDirsTest(SimpleTestCase):
TEMPLATES_APP_DIRS_AND_LOADERS = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'loaders': ['django.template.loaders.filesystem.Loader'],
},
},
]
@property
def func(self):
from django.core.checks.templates import check_setting_app_dirs_loaders
return check_setting_app_dirs_loaders
@override_settings(TEMPLATES=TEMPLATES_APP_DIRS_AND_LOADERS)
def test_app_dirs_and_loaders(self):
"""
Error if template loaders are specified and APP_DIRS is True.
"""
self.assertEqual(self.func(None), [E001])
def test_app_dirs_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['APP_DIRS']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
def test_loaders_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['OPTIONS']['loaders']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
| bsd-3-clause |
GaZ3ll3/numpy | numpy/linalg/lapack_lite/fortran.py | 132 | 3476 | from __future__ import division, absolute_import, print_function
import re
import itertools
def isBlank(line):
return not line
def isLabel(line):
return line[0].isdigit()
def isComment(line):
return line[0] != ' '
def isContinuation(line):
return line[5] != ' '
COMMENT, STATEMENT, CONTINUATION = 0, 1, 2
def lineType(line):
"""Return the type of a line of Fortan code."""
if isBlank(line):
return COMMENT
elif isLabel(line):
return STATEMENT
elif isComment(line):
return COMMENT
elif isContinuation(line):
return CONTINUATION
else:
return STATEMENT
class LineIterator(object):
"""LineIterator(iterable)
Return rstrip()'d lines from iterable, while keeping a count of the
line number in the .lineno attribute.
"""
def __init__(self, iterable):
object.__init__(self)
self.iterable = iter(iterable)
self.lineno = 0
def __iter__(self):
return self
def __next__(self):
self.lineno += 1
line = next(self.iterable)
line = line.rstrip()
return line
next = __next__
class PushbackIterator(object):
"""PushbackIterator(iterable)
Return an iterator for which items can be pushed back into.
Call the .pushback(item) method to have item returned as the next
value of .next().
"""
def __init__(self, iterable):
object.__init__(self)
self.iterable = iter(iterable)
self.buffer = []
def __iter__(self):
return self
def __next__(self):
if self.buffer:
return self.buffer.pop()
else:
return next(self.iterable)
def pushback(self, item):
self.buffer.append(item)
next = __next__
def fortranSourceLines(fo):
"""Return an iterator over statement lines of a Fortran source file.
Comment and blank lines are stripped out, and continuation lines are
merged.
"""
numberingiter = LineIterator(fo)
# add an extra '' at the end
with_extra = itertools.chain(numberingiter, [''])
pushbackiter = PushbackIterator(with_extra)
for line in pushbackiter:
t = lineType(line)
if t == COMMENT:
continue
elif t == STATEMENT:
lines = [line]
# this is where we need the extra '', so we don't finish reading
# the iterator when we don't want to handle that
for next_line in pushbackiter:
t = lineType(next_line)
if t == CONTINUATION:
lines.append(next_line[6:])
else:
pushbackiter.pushback(next_line)
break
yield numberingiter.lineno, ''.join(lines)
else:
raise ValueError("jammed: continuation line not expected: %s:%d" %
(fo.name, numberingiter.lineno))
def getDependencies(filename):
"""For a Fortran source file, return a list of routines declared as EXTERNAL
in it.
"""
fo = open(filename)
external_pat = re.compile(r'^\s*EXTERNAL\s', re.I)
routines = []
for lineno, line in fortranSourceLines(fo):
m = external_pat.match(line)
if m:
names = line = line[m.end():].strip().split(',')
names = [n.strip().lower() for n in names]
names = [n for n in names if n]
routines.extend(names)
fo.close()
return routines
| bsd-3-clause |
yd0str/infernal-twin | build/pip/pip/_vendor/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| gpl-3.0 |
Endika/OpenUpgrade | openerp/workflow/service.py | 378 | 4972 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from helpers import Session
from helpers import Record
from openerp.workflow.instance import WorkflowInstance
# import instance
class WorkflowService(object):
CACHE = {}
@classmethod
def clear_cache(cls, dbname):
cls.CACHE[dbname] = {}
@classmethod
def new(cls, cr, uid, model_name, record_id):
return cls(Session(cr, uid), Record(model_name, record_id))
def __init__(self, session, record):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
self.cr = self.session.cr
def write(self):
self.cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s',
(self.record.id or None, self.record.model or None, 'active')
)
for (instance_id,) in self.cr.fetchall():
WorkflowInstance(self.session, self.record, {'id': instance_id}).update()
def trigger(self):
self.cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (self.record.id, self.record.model))
res = self.cr.fetchall()
for (instance_id,) in res:
self.cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (self.session.uid, instance_id,))
current_uid, current_model_name, current_record_id = self.cr.fetchone()
current_session = Session(self.session.cr, current_uid)
current_record = Record(current_model_name, current_record_id)
WorkflowInstance(current_session, current_record, {'id': instance_id}).update()
def delete(self):
WorkflowInstance(self.session, self.record, {}).delete()
def create(self):
WorkflowService.CACHE.setdefault(self.cr.dbname, {})
wkf_ids = WorkflowService.CACHE[self.cr.dbname].get(self.record.model, None)
if not wkf_ids:
self.cr.execute('select id from wkf where osv=%s and on_create=True', (self.record.model,))
wkf_ids = self.cr.fetchall()
WorkflowService.CACHE[self.cr.dbname][self.record.model] = wkf_ids
for (wkf_id, ) in wkf_ids:
WorkflowInstance.create(self.session, self.record, wkf_id)
def validate(self, signal):
result = False
# ids of all active workflow instances for a corresponding resource (id, model_nam)
self.cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (self.record.id, self.record.model, 'active'))
# TODO: Refactor the workflow instance object
for (instance_id,) in self.cr.fetchall():
wi = WorkflowInstance(self.session, self.record, {'id': instance_id})
res2 = wi.validate(signal)
result = result or res2
return result
def redirect(self, new_rid):
# get ids of wkf instances for the old resource (res_id)
# CHECKME: shouldn't we get only active instances?
self.cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (self.record.id, self.record.model))
for old_inst_id, workflow_id in self.cr.fetchall():
# first active instance for new resource (new_rid), using same wkf
self.cr.execute(
'SELECT id '\
'FROM wkf_instance '\
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
(new_rid, self.record.model, workflow_id, 'active'))
new_id = self.cr.fetchone()
if new_id:
# select all workitems which "wait" for the old instance
self.cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
for (item_id,) in self.cr.fetchall():
# redirect all those workitems to the wkf instance of the new resource
self.cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
| agpl-3.0 |
textbook/flask-forecaster | tests/integration/test_home_page.py | 1 | 1720 | import re
from flask import url_for
import pytest
from selenium.webdriver.common.by import By
from tests.helpers import slow
from tests.integration.integration_helpers import (
enter_api_token,
go_to_home_page,
wait_for_element,
wait_for_title,
)
@pytest.mark.usefixtures('live_server')
@slow
class TestHomePage:
def test_home_page_available(self, selenium):
go_to_home_page(selenium)
title = 'Tracker Forecaster'
wait_for_title(selenium, title)
assert selenium.title == 'Tracker Forecaster'
assert 'Enter Tracker API token:' in selenium.page_source
selenium.close()
def test_token_entry_rejected(self, selenium):
go_to_home_page(selenium)
enter_api_token(selenium, 'dummy API token')
err_msg = 'API token must be 32 alphanumeric characters'
assert err_msg in selenium.page_source
selenium.close()
def test_token_entry_accepted(self, selenium, config):
go_to_home_page(selenium)
enter_api_token(selenium, config.get('VALID_TOKEN'))
assert 'Projects' in selenium.page_source
selenium.close()
def test_project_links(self, selenium, config):
go_to_home_page(selenium)
enter_api_token(selenium, config.get('VALID_TOKEN'))
link = wait_for_element(selenium, By.CSS_SELECTOR, '.project-entry a')
project_id = self._get_project_id_from_link(link)
link.click()
assert selenium.current_url == url_for(
'project',
project_id=project_id,
_external=True,
)
def _get_project_id_from_link(self, link):
return int(re.search(r'\d+$', link.get_attribute('href')).group(0))
| isc |
dariobottazzi/kafka-python | test/test_codec.py | 3 | 2762 | import struct
import unittest2
from kafka.codec import (
has_snappy, gzip_encode, gzip_decode,
snappy_encode, snappy_decode
)
from kafka.protocol import (
create_gzip_message, create_message, create_snappy_message, KafkaProtocol
)
from testutil import *
class TestCodec(unittest2.TestCase):
def test_gzip(self):
for i in xrange(1000):
s1 = random_string(100)
s2 = gzip_decode(gzip_encode(s1))
self.assertEquals(s1, s2)
@unittest2.skipUnless(has_snappy(), "Snappy not available")
def test_snappy(self):
for i in xrange(1000):
s1 = random_string(100)
s2 = snappy_decode(snappy_encode(s1))
self.assertEquals(s1, s2)
@unittest2.skipUnless(has_snappy(), "Snappy not available")
def test_snappy_detect_xerial(self):
import kafka as kafka1
_detect_xerial_stream = kafka1.codec._detect_xerial_stream
header = b'\x82SNAPPY\x00\x00\x00\x00\x01\x00\x00\x00\x01Some extra bytes'
false_header = b'\x01SNAPPY\x00\x00\x00\x01\x00\x00\x00\x01'
random_snappy = snappy_encode('SNAPPY' * 50)
short_data = b'\x01\x02\x03\x04'
self.assertTrue(_detect_xerial_stream(header))
self.assertFalse(_detect_xerial_stream(b''))
self.assertFalse(_detect_xerial_stream(b'\x00'))
self.assertFalse(_detect_xerial_stream(false_header))
self.assertFalse(_detect_xerial_stream(random_snappy))
self.assertFalse(_detect_xerial_stream(short_data))
@unittest2.skipUnless(has_snappy(), "Snappy not available")
def test_snappy_decode_xerial(self):
header = b'\x82SNAPPY\x00\x00\x00\x00\x01\x00\x00\x00\x01'
random_snappy = snappy_encode('SNAPPY' * 50)
block_len = len(random_snappy)
random_snappy2 = snappy_encode('XERIAL' * 50)
block_len2 = len(random_snappy2)
to_test = header \
+ struct.pack('!i', block_len) + random_snappy \
+ struct.pack('!i', block_len2) + random_snappy2 \
self.assertEquals(snappy_decode(to_test), ('SNAPPY' * 50) + ('XERIAL' * 50))
@unittest2.skipUnless(has_snappy(), "Snappy not available")
def test_snappy_encode_xerial(self):
to_ensure = b'\x82SNAPPY\x00\x00\x00\x00\x01\x00\x00\x00\x01' + \
'\x00\x00\x00\x18' + \
'\xac\x02\x14SNAPPY\xfe\x06\x00\xfe\x06\x00\xfe\x06\x00\xfe\x06\x00\x96\x06\x00' + \
'\x00\x00\x00\x18' + \
'\xac\x02\x14XERIAL\xfe\x06\x00\xfe\x06\x00\xfe\x06\x00\xfe\x06\x00\x96\x06\x00'
to_test = ('SNAPPY' * 50) + ('XERIAL' * 50)
compressed = snappy_encode(to_test, xerial_compatible=True, xerial_blocksize=300)
self.assertEquals(compressed, to_ensure)
| apache-2.0 |
maestrano/odoo | addons/marketing_campaign/report/__init__.py | 441 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import campaign_analysis
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GetSomeBlocks/ServerStatus | resources/lib/twisted/twisted/conch/ssh/common.py | 56 | 2643 | # -*- test-case-name: twisted.conch.test.test_ssh -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Common functions for the SSH classes.
Maintainer: Paul Swartz
"""
import struct, warnings
try:
from Crypto import Util
except ImportError:
warnings.warn("PyCrypto not installed, but continuing anyways!",
RuntimeWarning)
from twisted.python import randbytes
def NS(t):
"""
net string
"""
return struct.pack('!L',len(t)) + t
def getNS(s, count=1):
"""
get net string
"""
ns = []
c = 0
for i in range(count):
l, = struct.unpack('!L',s[c:c+4])
ns.append(s[c+4:4+l+c])
c += 4 + l
return tuple(ns) + (s[c:],)
def MP(number):
if number==0: return '\000'*4
assert number>0
bn = Util.number.long_to_bytes(number)
if ord(bn[0])&128:
bn = '\000' + bn
return struct.pack('>L',len(bn)) + bn
def getMP(data, count=1):
"""
Get multiple precision integer out of the string. A multiple precision
integer is stored as a 4-byte length followed by length bytes of the
integer. If count is specified, get count integers out of the string.
The return value is a tuple of count integers followed by the rest of
the data.
"""
mp = []
c = 0
for i in range(count):
length, = struct.unpack('>L',data[c:c+4])
mp.append(Util.number.bytes_to_long(data[c+4:c+4+length]))
c += 4 + length
return tuple(mp) + (data[c:],)
def _MPpow(x, y, z):
"""return the MP version of (x**y)%z
"""
return MP(pow(x,y,z))
def ffs(c, s):
"""
first from second
goes through the first list, looking for items in the second, returns the first one
"""
for i in c:
if i in s: return i
getMP_py = getMP
MP_py = MP
_MPpow_py = _MPpow
pyPow = pow
def _fastgetMP(data, count=1):
mp = []
c = 0
for i in range(count):
length = struct.unpack('!L', data[c:c+4])[0]
mp.append(long(gmpy.mpz(data[c + 4:c + 4 + length][::-1] + '\x00', 256)))
c += length + 4
return tuple(mp) + (data[c:],)
def _fastMP(i):
i2 = gmpy.mpz(i).binary()[::-1]
return struct.pack('!L', len(i2)) + i2
def _fastMPpow(x, y, z=None):
r = pyPow(gmpy.mpz(x),y,z).binary()[::-1]
return struct.pack('!L', len(r)) + r
def _fastpow(x, y, z=None):
return pyPow(gmpy.mpz(x), y, z)
def install():
global getMP, MP, _MPpow
getMP = _fastgetMP
MP = _fastMP
_MPpow = _fastMPpow
__builtins__['pow'] = _fastpow # evil evil
try:
import gmpy
install()
except ImportError:
pass
| mit |
kevhill/luigi | test/dynamic_import_test.py | 19 | 1462 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import LuigiTestCase
import luigi
import luigi.interface
import tempfile
import re
_testing_glob_var = None
class CmdlineTest(LuigiTestCase):
def test_dynamic_loading(self):
with tempfile.NamedTemporaryFile(dir='test/', prefix="_foo_module", suffix='.py') as temp_module_file:
temp_module_file.file.write(b'''
import luigi
class FooTask(luigi.Task):
x = luigi.IntParameter()
def run(self):
luigi._testing_glob_var = self.x
''')
temp_module_file.file.flush()
temp_module_path = temp_module_file.name
temp_module_name = re.search(r'/(_foo_module.*).py', temp_module_path).group(1)
luigi.interface.run(['--module', temp_module_name, 'FooTask', '--x', '123', '--local-scheduler', '--no-lock'])
self.assertEqual(luigi._testing_glob_var, 123)
| apache-2.0 |
stadelmanma/OpenPNM | test/unit/Phases/models/SurfaceTensionTest.py | 2 | 1862 | import OpenPNM
import scipy as sp
class SurfaceTensionTest:
def setup_class(self):
self.net = OpenPNM.Network.Cubic(shape=[3, 3, 3])
self.phase = OpenPNM.Phases.GenericPhase(network=self.net)
self.phase['pore.temperature'] = 298.0 # K
self.phase['pore.molecular_weight'] = 0.018 # kg/mol
self.phase['pore.critical_temperature'] = 647.15 # K
self.phase['pore.critical_pressure'] = 3771000.0 # Pa
self.phase['pore.salinity'] = 0 # g/kg
self.phase['pore.molar_density'] = 55.5 # mol/m3
def test_water(self):
f = OpenPNM.Phases.models.surface_tension.water
self.phase.models.add(propname='pore.surface_tension',
model=f)
assert sp.allclose(self.phase['pore.surface_tension'], 0.07199533)
def test_eotvos(self):
f = OpenPNM.Phases.models.surface_tension.eotvos
self.phase.models.add(propname='pore.surface_tension',
model=f,
k=0.000014)
assert sp.allclose(self.phase['pore.surface_tension'], 0.07112169)
def test_guggenheim_katayama(self):
f = OpenPNM.Phases.models.surface_tension.guggenheim_katayama
self.phase.models.add(propname='pore.surface_tension',
model=f,
K2=0.0000014,
n=0.1)
assert sp.allclose(self.phase['pore.surface_tension'], 0.27582571)
def test_brock_bird_scaling(self):
f = OpenPNM.Phases.models.surface_tension.brock_bird_scaling
self.phase.models.add(propname='pore.surface_tension',
model=f,
sigma_o=0.0608,
To=363)
assert sp.allclose(self.phase['pore.surface_tension'], 0.07820761)
| mit |
Houzz/luigi | examples/wordcount_hadoop.py | 23 | 2729 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
import luigi.contrib.hadoop
import luigi.contrib.hdfs
# To make this run, you probably want to edit /etc/luigi/client.cfg and add something like:
#
# [hadoop]
# jar: /usr/lib/hadoop-xyz/hadoop-streaming-xyz-123.jar
class InputText(luigi.ExternalTask):
"""
This task is a :py:class:`luigi.task.ExternalTask` which means it doesn't generate the
:py:meth:`~.InputText.output` target on its own instead relying on the execution something outside of Luigi
to produce it.
"""
date = luigi.DateParameter()
def output(self):
"""
Returns the target output for this task.
In this case, it expects a file to be present in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget(self.date.strftime('/tmp/text/%Y-%m-%d.txt'))
class WordCount(luigi.contrib.hadoop.JobTask):
"""
This task runs a :py:class:`luigi.contrib.hadoop.JobTask`
over the target data returned by :py:meth:`~/.InputText.output` and
writes the result into its :py:meth:`~.WordCount.output` target.
This class uses :py:meth:`luigi.contrib.hadoop.JobTask.run`.
"""
date_interval = luigi.DateIntervalParameter()
def requires(self):
"""
This task's dependencies:
* :py:class:`~.InputText`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [InputText(date) for date in self.date_interval.dates()]
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget('/tmp/text-count/%s' % self.date_interval)
def mapper(self, line):
for word in line.strip().split():
yield word, 1
def reducer(self, key, values):
yield key, sum(values)
if __name__ == '__main__':
luigi.run()
| apache-2.0 |
partofthething/home-assistant | homeassistant/components/tankerkoenig/sensor.py | 10 | 4399 | """Tankerkoenig sensor integration."""
import logging
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CURRENCY_EURO,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DOMAIN, NAME
_LOGGER = logging.getLogger(__name__)
ATTR_BRAND = "brand"
ATTR_CITY = "city"
ATTR_FUEL_TYPE = "fuel_type"
ATTR_HOUSE_NUMBER = "house_number"
ATTR_IS_OPEN = "is_open"
ATTR_POSTCODE = "postcode"
ATTR_STATION_NAME = "station_name"
ATTR_STREET = "street"
ATTRIBUTION = "Data provided by https://creativecommons.tankerkoenig.de"
ICON = "mdi:gas-station"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the tankerkoenig sensors."""
if discovery_info is None:
return
tankerkoenig = hass.data[DOMAIN]
async def async_update_data():
"""Fetch data from API endpoint."""
try:
return await tankerkoenig.fetch_data()
except LookupError as err:
raise UpdateFailed("Failed to fetch data") from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=NAME,
update_method=async_update_data,
update_interval=tankerkoenig.update_interval,
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
stations = discovery_info.values()
entities = []
for station in stations:
for fuel in tankerkoenig.fuel_types:
if fuel not in station:
_LOGGER.warning(
"Station %s does not offer %s fuel", station["id"], fuel
)
continue
sensor = FuelPriceSensor(
fuel,
station,
coordinator,
f"{NAME}_{station['name']}_{fuel}",
tankerkoenig.show_on_map,
)
entities.append(sensor)
_LOGGER.debug("Added sensors %s", entities)
async_add_entities(entities)
class FuelPriceSensor(CoordinatorEntity):
"""Contains prices for fuel in a given station."""
def __init__(self, fuel_type, station, coordinator, name, show_on_map):
"""Initialize the sensor."""
super().__init__(coordinator)
self._station = station
self._station_id = station["id"]
self._fuel_type = fuel_type
self._name = name
self._latitude = station["lat"]
self._longitude = station["lng"]
self._city = station["place"]
self._house_number = station["houseNumber"]
self._postcode = station["postCode"]
self._street = station["street"]
self._price = station[fuel_type]
self._show_on_map = show_on_map
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
@property
def unit_of_measurement(self):
"""Return unit of measurement."""
return CURRENCY_EURO
@property
def state(self):
"""Return the state of the device."""
# key Fuel_type is not available when the fuel station is closed, use "get" instead of "[]" to avoid exceptions
return self.coordinator.data[self._station_id].get(self._fuel_type)
@property
def unique_id(self) -> str:
"""Return a unique identifier for this entity."""
return f"{self._station_id}_{self._fuel_type}"
@property
def device_state_attributes(self):
"""Return the attributes of the device."""
data = self.coordinator.data[self._station_id]
attrs = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_BRAND: self._station["brand"],
ATTR_FUEL_TYPE: self._fuel_type,
ATTR_STATION_NAME: self._station["name"],
ATTR_STREET: self._street,
ATTR_HOUSE_NUMBER: self._house_number,
ATTR_POSTCODE: self._postcode,
ATTR_CITY: self._city,
}
if self._show_on_map:
attrs[ATTR_LATITUDE] = self._latitude
attrs[ATTR_LONGITUDE] = self._longitude
if data is not None and "status" in data:
attrs[ATTR_IS_OPEN] = data["status"] == "open"
return attrs
| mit |
skymanaditya1/numpy | numpy/f2py/f90mod_rules.py | 45 | 9787 | #!/usr/bin/env python
"""
Build F90 module support for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/02/03 19:30:23 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.27 $"[10:-1]
f2py_version = 'See `f2py -v`'
import numpy as np
from .auxfuncs import (
applyrules, dictappend, hasbody, hasnote, isallocatable, isfunction,
isintent_hide, ismodule, isprivate, isroutine, isstringarray, l_or,
outmess
)
from . import capi_maps
from . import func2subr
from .crackfortran import undo_rmbadname, undo_rmbadname1
options = {}
def findf90modules(m):
if ismodule(m):
return [m]
if not hasbody(m):
return []
ret = []
for b in m['body']:
if ismodule(b):
ret.append(b)
else:
ret = ret + findf90modules(b)
return ret
fgetdims1 = """\
external f2pysetdata
logical ns
integer r,i,j
integer(%d) s(*)
ns = .FALSE.
if (allocated(d)) then
do i=1,r
if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then
ns = .TRUE.
end if
end do
if (ns) then
deallocate(d)
end if
end if
if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize
fgetdims2 = """\
end if
if (allocated(d)) then
do i=1,r
s(i) = size(d,i)
end do
end if
flag = 1
call f2pysetdata(d,allocated(d))"""
fgetdims2_sa = """\
end if
if (allocated(d)) then
do i=1,r
s(i) = size(d,i)
end do
!s(r) must be equal to len(d(1))
end if
flag = 2
call f2pysetdata(d,allocated(d))"""
def buildhooks(pymod):
global fgetdims1, fgetdims2
from . import rules
ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [],
'need': ['F_FUNC', 'arrayobject.h'],
'separatorsfor': {'includes0': '\n', 'includes': '\n'},
'docs': ['"Fortran 90/95 modules:\\n"'],
'latexdoc': []}
fhooks = ['']
def fadd(line, s=fhooks):
s[0] = '%s\n %s' % (s[0], line)
doc = ['']
def dadd(line, s=doc):
s[0] = '%s\n%s' % (s[0], line)
for m in findf90modules(pymod):
sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [
m['name']], []
sargsp = []
ifargs = []
mfargs = []
if hasbody(m):
for b in m['body']:
notvars.append(b['name'])
for n in m['vars'].keys():
var = m['vars'][n]
if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)):
onlyvars.append(n)
mfargs.append(n)
outmess('\t\tConstructing F90 module support for "%s"...\n' %
(m['name']))
if onlyvars:
outmess('\t\t Variables: %s\n' % (' '.join(onlyvars)))
chooks = ['']
def cadd(line, s=chooks):
s[0] = '%s\n%s' % (s[0], line)
ihooks = ['']
def iadd(line, s=ihooks):
s[0] = '%s\n%s' % (s[0], line)
vrd = capi_maps.modsign2map(m)
cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name']))
dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name']))
if hasnote(m):
note = m['note']
if isinstance(note, list):
note = '\n'.join(note)
dadd(note)
if onlyvars:
dadd('\\begin{description}')
for n in onlyvars:
var = m['vars'][n]
modobjs.append(n)
ct = capi_maps.getctype(var)
at = capi_maps.c2capi_map[ct]
dm = capi_maps.getarrdims(n, var)
dms = dm['dims'].replace('*', '-1').strip()
dms = dms.replace(':', '-1').strip()
if not dms:
dms = '-1'
use_fgetdims2 = fgetdims2
if isstringarray(var):
if 'charselector' in var and 'len' in var['charselector']:
cadd('\t{"%s",%s,{{%s,%s}},%s},'
% (undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at))
use_fgetdims2 = fgetdims2_sa
else:
cadd('\t{"%s",%s,{{%s}},%s},' %
(undo_rmbadname1(n), dm['rank'], dms, at))
else:
cadd('\t{"%s",%s,{{%s}},%s},' %
(undo_rmbadname1(n), dm['rank'], dms, at))
dadd('\\item[]{{}\\verb@%s@{}}' %
(capi_maps.getarrdocsign(n, var)))
if hasnote(var):
note = var['note']
if isinstance(note, list):
note = '\n'.join(note)
dadd('--- %s' % (note))
if isallocatable(var):
fargs.append('f2py_%s_getdims_%s' % (m['name'], n))
efargs.append(fargs[-1])
sargs.append(
'void (*%s)(int*,int*,void(*)(char*,int*),int*)' % (n))
sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)')
iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n))
fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1]))
fadd('use %s, only: d => %s\n' %
(m['name'], undo_rmbadname1(n)))
fadd('integer flag\n')
fhooks[0] = fhooks[0] + fgetdims1
dms = eval('range(1,%s+1)' % (dm['rank']))
fadd(' allocate(d(%s))\n' %
(','.join(['s(%s)' % i for i in dms])))
fhooks[0] = fhooks[0] + use_fgetdims2
fadd('end subroutine %s' % (fargs[-1]))
else:
fargs.append(n)
sargs.append('char *%s' % (n))
sargsp.append('char*')
iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n))
if onlyvars:
dadd('\\end{description}')
if hasbody(m):
for b in m['body']:
if not isroutine(b):
print('Skipping', b['block'], b['name'])
continue
modobjs.append('%s()' % (b['name']))
b['modulename'] = m['name']
api, wrap = rules.buildapi(b)
if isfunction(b):
fhooks[0] = fhooks[0] + wrap
fargs.append('f2pywrap_%s_%s' % (m['name'], b['name']))
ifargs.append(func2subr.createfuncwrapper(b, signature=1))
else:
if wrap:
fhooks[0] = fhooks[0] + wrap
fargs.append('f2pywrap_%s_%s' % (m['name'], b['name']))
ifargs.append(
func2subr.createsubrwrapper(b, signature=1))
else:
fargs.append(b['name'])
mfargs.append(fargs[-1])
api['externroutines'] = []
ar = applyrules(api, vrd)
ar['docs'] = []
ar['docshort'] = []
ret = dictappend(ret, ar)
cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},' %
(b['name'], m['name'], b['name'], m['name'], b['name']))
sargs.append('char *%s' % (b['name']))
sargsp.append('char *')
iadd('\tf2py_%s_def[i_f2py++].data = %s;' %
(m['name'], b['name']))
cadd('\t{NULL}\n};\n')
iadd('}')
ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % (
m['name'], ','.join(sargs), ihooks[0])
if '_' in m['name']:
F_FUNC = 'F_FUNC_US'
else:
F_FUNC = 'F_FUNC'
iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'
% (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp)))
iadd('static void f2py_init_%s(void) {' % (m['name']))
iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'
% (F_FUNC, m['name'], m['name'].upper(), m['name']))
iadd('}\n')
ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks
ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % (
m['name'], m['name'], m['name'])] + ret['initf90modhooks']
fadd('')
fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name']))
if mfargs:
for a in undo_rmbadname(mfargs):
fadd('use %s, only : %s' % (m['name'], a))
if ifargs:
fadd(' '.join(['interface'] + ifargs))
fadd('end interface')
fadd('external f2pysetupfunc')
if efargs:
for a in undo_rmbadname(efargs):
fadd('external %s' % (a))
fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs))))
fadd('end subroutine f2pyinit%s\n' % (m['name']))
dadd('\n'.join(ret['latexdoc']).replace(
r'\subsection{', r'\subsubsection{'))
ret['latexdoc'] = []
ret['docs'].append('"\t%s --- %s"' % (m['name'],
','.join(undo_rmbadname(modobjs))))
ret['routine_defs'] = ''
ret['doc'] = []
ret['docshort'] = []
ret['latexdoc'] = doc[0]
if len(ret['docs']) <= 1:
ret['docs'] = ''
return ret, fhooks[0]
| bsd-3-clause |
NaturalGIS/naturalgis_qgis | python/plugins/processing/tools/system.py | 33 | 3596 | # -*- coding: utf-8 -*-
"""
***************************************************************************
py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import time
import sys
import uuid
import math
from qgis.PyQt.QtCore import QDir
from qgis.core import (QgsApplication,
QgsProcessingUtils)
numExported = 1
def userFolder():
userDir = os.path.join(QgsApplication.qgisSettingsDirPath(), 'processing')
if not QDir(userDir).exists():
QDir().mkpath(userDir)
return str(QDir.toNativeSeparators(userDir))
def defaultOutputFolder():
folder = os.path.join(userFolder(), 'outputs')
if not QDir(folder).exists():
QDir().mkpath(folder)
return str(QDir.toNativeSeparators(folder))
def isWindows():
return os.name == 'nt'
def isMac():
return sys.platform == 'darwin'
def getTempFilename(ext=None):
tmpPath = QgsProcessingUtils.tempFolder()
t = time.time()
m = math.floor(t)
uid = '{:8x}{:05x}'.format(m, int((t - m) * 1000000))
if ext is None:
filename = os.path.join(tmpPath, '{}{}'.format(uid, getNumExportedLayers()))
else:
filename = os.path.join(tmpPath, '{}{}.{}'.format(uid, getNumExportedLayers(), ext))
return filename
def getTempDirInTempFolder():
"""Returns a temporary directory, putting it into a temp folder.
"""
path = QgsProcessingUtils.tempFolder()
path = os.path.join(path, uuid.uuid4().hex)
mkdir(path)
return path
def removeInvalidChars(string):
validChars = \
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:.'
string = ''.join(c for c in string if c in validChars)
return string
def getNumExportedLayers():
global numExported
numExported += 1
return numExported
def mkdir(newdir):
newdir = newdir.strip('\n\r ')
if os.path.isdir(newdir):
pass
else:
(head, tail) = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir(head)
if tail:
os.mkdir(newdir)
def tempHelpFolder():
tmp = os.path.join(str(QDir.tempPath()), 'processing_help')
if not QDir(tmp).exists():
QDir().mkpath(tmp)
return str(os.path.abspath(tmp))
def escapeAndJoin(strList):
"""
.. deprecated:: 3.0
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("processing.escapeAndJoin is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
joined = ''
for s in strList:
if s[0] != '-' and ' ' in s:
escaped = '"' + s.replace('\\', '\\\\').replace('"', '\\"') \
+ '"'
else:
escaped = s
joined += escaped + ' '
return joined.strip()
| gpl-2.0 |
abrahamrq/ViLeOnline | python/ViLe/memory.py | 2 | 3994 | # -*- coding: utf-8 -*-
from semantics import *
from vm import *
# Clase que maneja el mapeo de memoria del lenguaje.
class Memory:
# Se inicializa una instancia de la clase memoria. Se le asigna el nombre de
# la función y se asigna memoria dependiendo de la memoria que esta función
# haya marcado que necesite por variable.
def __init__(self, name, memory_needed):
self.function_name = name
# Memoria de la función
self.integers = [[None]] * memory_needed['int']
self.floats = [[None]] * memory_needed['float']
self.bools = [[None]] * memory_needed['bool']
self.strings = [[None]] * memory_needed['string']
#Memoria temporal de la función
self.temp_integers = [[None]] * memory_needed['temp_int']
self.temp_floats = [[None]] * memory_needed['temp_float']
self.temp_bools = [[None]] * memory_needed['temp_bool']
self.temp_strings = [[None]] * memory_needed['temp_string']
# Regresa lo que esté guardado dentro de la dirección real de la variable
def get_value_from_real_address(self, type, virtual_address):
scope = self.get_variable_scope(virtual_address)[0]
if scope != 'const':
offset = self.get_variable_scope(virtual_address)[1]
offset += self.get_type_offset(type)
corresponding_memory = self.get_corresponding_memory(scope, type)
real_address = virtual_address - offset
return corresponding_memory[real_address]
else:
return var_dict['inverse_constants'][virtual_address]['value']
# Asigna un valor a la memoria real de una variable
def assign_to_real_address(self, type, virtual_address, value):
scope = self.get_variable_scope(virtual_address)[0]
offset = self.get_variable_scope(virtual_address)[1]
offset += self.get_type_offset(type)
corresponding_memory = self.get_corresponding_memory(scope, type)
real_address = self.get_real_address(type, virtual_address)
corresponding_memory[real_address] = value
# Regresa el valor real de la dirección de memoria real
def get_real_address(self, type, virtual_address):
scope = self.get_variable_scope(virtual_address)[0]
offset = self.get_variable_scope(virtual_address)[1]
offset += self.get_type_offset(type)
corresponding_memory = self.get_corresponding_memory(scope, type)
return virtual_address - offset
# Esta función simplemente regresa a que parte del programa pertenece la
# variable, y su offset para poder acceder a su dirección real.
def get_variable_scope(self, virtual_address):
if virtual_address >= 0 and virtual_address < 10000:
return ["main", 0]
elif virtual_address >= 10000 and virtual_address < 20000:
return ["function", 10000]
elif virtual_address >= 20000 and virtual_address < 30000:
return ["temp", 20000]
elif virtual_address >= 30000 and virtual_address < 40000:
return ["const", 30000]
elif virtual_address >= 40000 and virtual_address < 50000:
return ["global", 40000]
elif virtual_address >= 50000 and virtual_address < 60000:
return ["fun_temp", 50000]
# Regresa el offset de la memoria virtual generado por el tipo de dato
def get_type_offset(self, type):
if type == 1:
return 0
elif type == 2:
return 2500
elif type == 3:
return 7500
elif type == 4:
return 5000
# Esta función regresa el bloque de memoria al que pertenece la variable
def get_corresponding_memory(self, scope, type):
if scope == 'main' or scope == 'function' or scope == 'global':
if type == 1:
return self.integers
elif type == 2:
return self.floats
elif type == 3:
return self.strings
elif type == 4:
return self.bools
elif scope == 'temp' or scope == 'fun_temp':
if type == 1:
return self.temp_integers
elif type == 2:
return self.temp_floats
elif type == 3:
return self.temp_strings
elif type == 4:
return self.temp_bools
| mit |
isyippee/nova | nova/image/api.py | 78 | 9507 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Main abstraction layer for retrieving and storing information about disk
images used by the compute layer.
"""
from nova.image import glance
class API(object):
"""Responsible for exposing a relatively stable internal API for other
modules in Nova to retrieve information about disk images. This API
attempts to match the nova.volume.api and nova.network.api calling
interface.
"""
def _get_session_and_image_id(self, context, id_or_uri):
"""Returns a tuple of (session, image_id). If the supplied `id_or_uri`
is an image ID, then the default client session will be returned
for the context's user, along with the image ID. If the supplied
`id_or_uri` parameter is a URI, then a client session connecting to
the URI's image service endpoint will be returned along with a
parsed image ID from that URI.
:param context: The `nova.context.Context` object for the request
:param id_or_uri: A UUID identifier or an image URI to look up image
information for.
"""
return glance.get_remote_image_service(context, id_or_uri)
def _get_session(self, _context):
"""Returns a client session that can be used to query for image
information.
:param _context: The `nova.context.Context` object for the request
"""
# TODO(jaypipes): Refactor glance.get_remote_image_service and
# glance.get_default_image_service into a single
# method that takes a context and actually respects
# it, returning a real session object that keeps
# the context alive...
return glance.get_default_image_service()
def get_all(self, context, **kwargs):
"""Retrieves all information records about all disk images available
to show to the requesting user. If the requesting user is an admin,
all images in an ACTIVE status are returned. If the requesting user
is not an admin, the all public images and all private images that
are owned by the requesting user in the ACTIVE status are returned.
:param context: The `nova.context.Context` object for the request
:param kwargs: A dictionary of filter and pagination values that
may be passed to the underlying image info driver.
"""
session = self._get_session(context)
return session.detail(context, **kwargs)
def get(self, context, id_or_uri, include_locations=False,
show_deleted=True):
"""Retrieves the information record for a single disk image. If the
supplied identifier parameter is a UUID, the default driver will
be used to return information about the image. If the supplied
identifier is a URI, then the driver that matches that URI endpoint
will be used to query for image information.
:param context: The `nova.context.Context` object for the request
:param id_or_uri: A UUID identifier or an image URI to look up image
information for.
:param include_locations: (Optional) include locations in the returned
dict of information if the image service API
supports it. If the image service API does
not support the locations attribute, it will
still be included in the returned dict, as an
empty list.
:param show_deleted: (Optional) show the image even the status of
image is deleted.
"""
session, image_id = self._get_session_and_image_id(context, id_or_uri)
return session.show(context, image_id,
include_locations=include_locations,
show_deleted=show_deleted)
def create(self, context, image_info, data=None):
"""Creates a new image record, optionally passing the image bits to
backend storage.
:param context: The `nova.context.Context` object for the request
:param image_info: A dict of information about the image that is
passed to the image registry.
:param data: Optional file handle or bytestream iterator that is
passed to backend storage.
"""
session = self._get_session(context)
return session.create(context, image_info, data=data)
def update(self, context, id_or_uri, image_info,
data=None, purge_props=False):
"""Update the information about an image, optionally along with a file
handle or bytestream iterator for image bits. If the optional file
handle for updated image bits is supplied, the image may not have
already uploaded bits for the image.
:param context: The `nova.context.Context` object for the request
:param id_or_uri: A UUID identifier or an image URI to look up image
information for.
:param image_info: A dict of information about the image that is
passed to the image registry.
:param data: Optional file handle or bytestream iterator that is
passed to backend storage.
:param purge_props: Optional, defaults to False. If set, the backend
image registry will clear all image properties
and replace them the image properties supplied
in the image_info dictionary's 'properties'
collection.
"""
session, image_id = self._get_session_and_image_id(context, id_or_uri)
return session.update(context, image_id, image_info, data=data,
purge_props=purge_props)
def delete(self, context, id_or_uri):
"""Delete the information about an image and mark the image bits for
deletion.
:param context: The `nova.context.Context` object for the request
:param id_or_uri: A UUID identifier or an image URI to look up image
information for.
"""
session, image_id = self._get_session_and_image_id(context, id_or_uri)
return session.delete(context, image_id)
def download(self, context, id_or_uri, data=None, dest_path=None):
"""Transfer image bits from Glance or a known source location to the
supplied destination filepath.
:param context: The `nova.context.RequestContext` object for the
request
:param id_or_uri: A UUID identifier or an image URI to look up image
information for.
:param data: A file object to use in downloading image data.
:param dest_path: Filepath to transfer image bits to.
Note that because of the poor design of the
`glance.ImageService.download` method, the function returns different
things depending on what arguments are passed to it. If a data argument
is supplied but no dest_path is specified (only done in the XenAPI virt
driver's image.utils module) then None is returned from the method. If
the data argument is not specified but a destination path *is*
specified, then a writeable file handle to the destination path is
constructed in the method and the image bits written to that file, and
again, None is returned from the method. If no data argument is
supplied and no dest_path argument is supplied (VMWare and XenAPI virt
drivers), then the method returns an iterator to the image bits that
the caller uses to write to wherever location it wants. Finally, if the
allow_direct_url_schemes CONF option is set to something, then the
nova.image.download modules are used to attempt to do an SCP copy of
the image bits from a file location to the dest_path and None is
returned after retrying one or more download locations (libvirt and
Hyper-V virt drivers through nova.virt.images.fetch).
I think the above points to just how hacky/wacky all of this code is,
and the reason it needs to be cleaned up and standardized across the
virt driver callers.
"""
# TODO(jaypipes): Deprecate and remove this method entirely when we
# move to a system that simply returns a file handle
# to a bytestream iterator and allows the caller to
# handle streaming/copying/zero-copy as they see fit.
session, image_id = self._get_session_and_image_id(context, id_or_uri)
return session.download(context, image_id, data=data,
dst_path=dest_path)
| apache-2.0 |
greg-hellings/ansible-modules-extras | monitoring/nagios.py | 36 | 33057 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is largely copied from the Nagios module included in the
# Func project. Original copyright follows:
#
# func-nagios - Schedule downtime and enables/disable notifications
# Copyright 2011, Red Hat, Inc.
# Tim Bielawa <tbielawa@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license version 2 or any later version.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: nagios
short_description: Perform common tasks in Nagios related to downtime and notifications.
description:
- "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
- All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on.
- You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
- When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
- When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter.
version_added: "0.7"
options:
action:
description:
- Action to take.
- servicegroup options were added in 2.0.
required: true
default: null
choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
"silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
"servicegroup_host_downtime" ]
host:
description:
- Host to operate on in Nagios.
required: false
default: null
cmdfile:
description:
- Path to the nagios I(command file) (FIFO pipe).
Only required if auto-detection fails.
required: false
default: auto-detected
author:
description:
- Author to leave downtime comments as.
Only usable with the C(downtime) action.
required: false
default: Ansible
comment:
version_added: "2.0"
description:
- Comment for C(downtime) action.
required: false
default: Scheduling downtime
minutes:
description:
- Minutes to schedule downtime for.
- Only usable with the C(downtime) action.
required: false
default: 30
services:
description:
- What to manage downtime/alerts for. Separate multiple services with commas.
C(service) is an alias for C(services).
B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions.
aliases: [ "service" ]
required: true
default: null
servicegroup:
version_added: "2.0"
description:
- the Servicegroup we want to set downtimes/alerts for.
B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
command:
description:
- The raw command to send to nagios, which
should not include the submitted time header or the line-feed
B(Required) option when using the C(command) action.
required: true
default: null
author: "Tim Bielawa (@tbielawa)"
requirements: [ "Nagios" ]
'''
EXAMPLES = '''
# set 30 minutes of apache downtime
- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }}
# schedule an hour of HOST downtime
- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
# schedule an hour of HOST downtime, with a comment describing the reason
- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
comment='This host needs disciplined'
# schedule downtime for ALL services on HOST
- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }}
# schedule downtime for a few services
- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }}
# set 30 minutes downtime for all services in servicegroup foo
- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
# set 30 minutes downtime for all host in servicegroup foo
- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
# enable SMART disk alerts
- nagios: action=enable_alerts service=smart host={{ inventory_hostname }}
# "two services at once: disable httpd and nfs alerts"
- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }}
# disable HOST alerts
- nagios: action=disable_alerts service=host host={{ inventory_hostname }}
# silence ALL alerts
- nagios: action=silence host={{ inventory_hostname }}
# unsilence all alerts
- nagios: action=unsilence host={{ inventory_hostname }}
# SHUT UP NAGIOS
- nagios: action=silence_nagios
# ANNOY ME NAGIOS
- nagios: action=unsilence_nagios
# command something
- nagios: action=command command='DISABLE_FAILURE_PREDICTION'
'''
import ConfigParser
import types
import time
import os.path
######################################################################
def which_cmdfile():
locations = [
# rhel
'/etc/nagios/nagios.cfg',
# debian
'/etc/nagios3/nagios.cfg',
# older debian
'/etc/nagios2/nagios.cfg',
# bsd, solaris
'/usr/local/etc/nagios/nagios.cfg',
# groundwork it monitoring
'/usr/local/groundwork/nagios/etc/nagios.cfg',
# open monitoring distribution
'/omd/sites/oppy/tmp/nagios/nagios.cfg',
# ???
'/usr/local/nagios/etc/nagios.cfg',
'/usr/local/nagios/nagios.cfg',
'/opt/nagios/etc/nagios.cfg',
'/opt/nagios/nagios.cfg',
# icinga on debian/ubuntu
'/etc/icinga/icinga.cfg',
# icinga installed from source (default location)
'/usr/local/icinga/etc/icinga.cfg',
]
for path in locations:
if os.path.exists(path):
for line in open(path):
if line.startswith('command_file'):
return line.split('=')[1].strip()
return None
######################################################################
def main():
ACTION_CHOICES = [
'downtime',
'silence',
'unsilence',
'enable_alerts',
'disable_alerts',
'silence_nagios',
'unsilence_nagios',
'command',
'servicegroup_host_downtime',
'servicegroup_service_downtime',
]
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, default=None, choices=ACTION_CHOICES),
author=dict(default='Ansible'),
comment=dict(default='Scheduling downtime'),
host=dict(required=False, default=None),
servicegroup=dict(required=False, default=None),
minutes=dict(default=30),
cmdfile=dict(default=which_cmdfile()),
services=dict(default=None, aliases=['service']),
command=dict(required=False, default=None),
)
)
action = module.params['action']
host = module.params['host']
servicegroup = module.params['servicegroup']
minutes = module.params['minutes']
services = module.params['services']
cmdfile = module.params['cmdfile']
command = module.params['command']
##################################################################
# Required args per action:
# downtime = (minutes, service, host)
# (un)silence = (host)
# (enable/disable)_alerts = (service, host)
# command = command
#
# AnsibleModule will verify most stuff, we need to verify
# 'minutes' and 'service' manually.
##################################################################
if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
if not host:
module.fail_json(msg='no host specified for action requiring one')
######################################################################
if action == 'downtime':
# Make sure there's an actual service selected
if not services:
module.fail_json(msg='no service selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
######################################################################
if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
# Make sure there's an actual servicegroup selected
if not servicegroup:
module.fail_json(msg='no servicegroup selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
##################################################################
if action in ['enable_alerts', 'disable_alerts']:
if not services:
module.fail_json(msg='a service is required when setting alerts')
if action in ['command']:
if not command:
module.fail_json(msg='no command passed for command action')
##################################################################
if not cmdfile:
module.fail_json('unable to locate nagios.cfg')
##################################################################
ansible_nagios = Nagios(module, **module.params)
if module.check_mode:
module.exit_json(changed=True)
else:
ansible_nagios.act()
##################################################################
######################################################################
class Nagios(object):
"""
Perform common tasks in Nagios related to downtime and
notifications.
The complete set of external commands Nagios handles is documented
on their website:
http://old.nagios.org/developerinfo/externalcommands/commandlist.php
Note that in the case of `schedule_svc_downtime`,
`enable_svc_notifications`, and `disable_svc_notifications`, the
service argument should be passed as a list.
"""
def __init__(self, module, **kwargs):
self.module = module
self.action = kwargs['action']
self.author = kwargs['author']
self.comment = kwargs['comment']
self.host = kwargs['host']
self.servicegroup = kwargs['servicegroup']
self.minutes = int(kwargs['minutes'])
self.cmdfile = kwargs['cmdfile']
self.command = kwargs['command']
if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
self.services = kwargs['services']
else:
self.services = kwargs['services'].split(',')
self.command_results = []
def _now(self):
"""
The time in seconds since 12:00:00AM Jan 1, 1970
"""
return int(time.time())
def _write_command(self, cmd):
"""
Write the given command to the Nagios command file
"""
try:
fp = open(self.cmdfile, 'w')
fp.write(cmd)
fp.flush()
fp.close()
self.command_results.append(cmd.strip())
except IOError:
self.module.fail_json(msg='unable to write to nagios command file',
cmdfile=self.cmdfile)
def _fmt_dt_str(self, cmd, host, duration, author=None,
comment=None, start=None,
svc=None, fixed=1, trigger=0):
"""
Format an external-command downtime string.
cmd - Nagios command ID
host - Host schedule downtime on
duration - Minutes to schedule downtime for
author - Name to file the downtime as
comment - Reason for running this command (upgrade, reboot, etc)
start - Start of downtime in seconds since 12:00AM Jan 1 1970
Default is to use the entry time (now)
svc - Service to schedule downtime for, omit when for host downtime
fixed - Start now if 1, start when a problem is detected if 0
trigger - Optional ID of event to start downtime from. Leave as 0 for
fixed downtime.
Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
entry_time = self._now()
if start is None:
start = entry_time
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
duration_s = (duration * 60)
end = start + duration_s
if not author:
author = self.author
if not comment:
comment = self.comment
if svc is not None:
dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
else:
# Downtime for a host if no svc specified
dt_args = [str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
dt_arg_str = ";".join(dt_args)
dt_str = hdr + dt_arg_str + "\n"
return dt_str
def _fmt_notif_str(self, cmd, host=None, svc=None):
"""
Format an external-command notification string.
cmd - Nagios command ID.
host - Host to en/disable notifications on.. A value is not required
for global downtime
svc - Service to schedule downtime for. A value is not required
for host downtime.
Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
"""
entry_time = self._now()
notif_str = "[%s] %s" % (entry_time, cmd)
if host is not None:
notif_str += ";%s" % host
if svc is not None:
notif_str += ";%s" % svc
notif_str += "\n"
return notif_str
def schedule_svc_downtime(self, host, services=None, minutes=30):
"""
This command is used to schedule downtime for a particular
service.
During the specified downtime, Nagios will not send
notifications out about the service.
Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SVC_DOWNTIME"
if services is None:
services = []
for service in services:
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service)
self._write_command(dt_cmd_str)
def schedule_host_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for a particular
host.
During the specified downtime, Nagios will not send
notifications out about the host.
Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def schedule_host_svc_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for
all services associated with a particular host.
During the specified downtime, Nagios will not send
notifications out about the host.
SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def disable_host_svc_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for all services on the specified host.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_host_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for the specified host.
Note that this command does not disable notifications for
services associated with this host.
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_svc_notifications(self, host, services=None):
"""
This command is used to prevent notifications from being sent
out for the specified service.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "DISABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
self._write_command(notif_str)
def disable_servicegroup_host_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all hosts in the specified servicegroup.
Note that this command does not disable notifications for
services associated with hosts in this service group.
Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_servicegroup_svc_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all services in the specified servicegroup.
Note that this does not prevent notifications from being sent
out about the hosts in this servicegroup.
Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_hostgroup_host_notifications(self, hostgroup):
"""
Disables notifications for all hosts in a particular
hostgroup.
Note that this does not disable notifications for the services
associated with the hosts in the hostgroup - see the
DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def disable_hostgroup_svc_notifications(self, hostgroup):
"""
Disables notifications for all services associated with hosts
in a particular hostgroup.
Note that this does not disable notifications for the hosts in
the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
command for that.
Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def enable_host_notifications(self, host):
"""
Enables notifications for a particular host.
Note that this command does not enable notifications for
services associated with this host.
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def enable_host_svc_notifications(self, host):
"""
Enables notifications for all services on the specified host.
Note that this does not enable notifications for the host.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_svc_notifications(self, host, services=None):
"""
Enables notifications for a particular service.
Note that this does not enable notifications for the host.
Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "ENABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
nagios_return = True
return_str_list = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def enable_hostgroup_host_notifications(self, hostgroup):
"""
Enables notifications for all hosts in a particular hostgroup.
Note that this command does not enable notifications for
services associated with the hosts in this hostgroup.
Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_hostgroup_svc_notifications(self, hostgroup):
"""
Enables notifications for all services that are associated
with hosts in a particular hostgroup.
Note that this does not enable notifications for the hosts in
this hostgroup.
Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_host_notifications(self, servicegroup):
"""
Enables notifications for all hosts that have services that
are members of a particular servicegroup.
Note that this command does not enable notifications for
services associated with the hosts in this servicegroup.
Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_svc_notifications(self, servicegroup):
"""
Enables notifications for all services that are members of a
particular servicegroup.
Note that this does not enable notifications for the hosts in
this servicegroup.
Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def silence_host(self, host):
"""
This command is used to prevent notifications from being sent
out for the host and all services on the specified host.
This is equivalent to calling disable_host_svc_notifications
and disable_host_notifications.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"DISABLE_HOST_SVC_NOTIFICATIONS",
"DISABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def unsilence_host(self, host):
"""
This command is used to enable notifications for the host and
all services on the specified host.
This is equivalent to calling enable_host_svc_notifications
and enable_host_notifications.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"ENABLE_HOST_SVC_NOTIFICATIONS",
"ENABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def silence_nagios(self):
"""
This command is used to disable notifications for all hosts and services
in nagios.
This is a 'SHUT UP, NAGIOS' command
"""
cmd = 'DISABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def unsilence_nagios(self):
"""
This command is used to enable notifications for all hosts and services
in nagios.
This is a 'OK, NAGIOS, GO'' command
"""
cmd = 'ENABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def nagios_cmd(self, cmd):
"""
This sends an arbitrary command to nagios
It prepends the submitted time and appends a \n
You just have to provide the properly formatted command
"""
pre = '[%s]' % int(time.time())
post = '\n'
cmdstr = '%s %s %s' % (pre, cmd, post)
self._write_command(cmdstr)
def act(self):
"""
Figure out what you want to do from ansible, and then do the
needful (at the earliest).
"""
# host or service downtime?
if self.action == 'downtime':
if self.services == 'host':
self.schedule_host_downtime(self.host, self.minutes)
elif self.services == 'all':
self.schedule_host_svc_downtime(self.host, self.minutes)
else:
self.schedule_svc_downtime(self.host,
services=self.services,
minutes=self.minutes)
elif self.action == "servicegroup_host_downtime":
if self.servicegroup:
self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
elif self.action == "servicegroup_service_downtime":
if self.servicegroup:
self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
# toggle the host AND service alerts
elif self.action == 'silence':
self.silence_host(self.host)
elif self.action == 'unsilence':
self.unsilence_host(self.host)
# toggle host/svc alerts
elif self.action == 'enable_alerts':
if self.services == 'host':
self.enable_host_notifications(self.host)
else:
self.enable_svc_notifications(self.host,
services=self.services)
elif self.action == 'disable_alerts':
if self.services == 'host':
self.disable_host_notifications(self.host)
else:
self.disable_svc_notifications(self.host,
services=self.services)
elif self.action == 'silence_nagios':
self.silence_nagios()
elif self.action == 'unsilence_nagios':
self.unsilence_nagios()
elif self.action == 'command':
self.nagios_cmd(self.command)
# wtf?
else:
self.module.fail_json(msg="unknown action specified: '%s'" % \
self.action)
self.module.exit_json(nagios_commands=self.command_results,
changed=True)
######################################################################
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
borisroman/vdsm | vdsm_hooks/fileinject/before_vm_start.py | 6 | 4140 | #!/usr/bin/python
import os
import sys
import hooking
import traceback
import guestfs
'''
fileinject vdsm hook
====================
hook is getting target file name and its content and
create that file in target machine.
hook will try to add the file only to the operation system
partition, i.e.
Windows: /c/myfile
Linux: /myfile
Please note that in Windows the path is case sensitive!
syntax:
fileinject=/<target file name> : <file content>
fileinject=/myfile:some file content\netc...
'''
def inject_file(filepath, content, drive, diskformat):
injected = False
gfs = guestfs.GuestFS()
try:
gfs.add_drive_opts(drive, format=diskformat)
except RuntimeError as e:
sys.stderr.write('fileinject: [error in inject_file]: %s\n' % e)
else:
gfs.launch()
for root in gfs.inspect_os():
if gfs.inspect_get_type(root) == "windows":
filepath = os.path.join(
gfs.case_sensitive_path(os.path.dirname(filepath)),
os.path.basename(filepath))
gfs.mount_options("", root, "/")
try:
gfs.write(filepath, content)
except RuntimeError as e:
sys.stderr.write('fileinject: [upload failed]: %s\n' % e)
else:
injected = True
finally:
gfs.umount(root)
return injected
if 'fileinject' in os.environ:
try:
pos = os.environ['fileinject'].find(':')
if pos < 0:
sys.stderr.write('fileinject: invalid syntax, '
'expected file-name:file-content, '
'no ":" separation found: %s pos: %d\n' %
(os.environ['fileinject'], pos))
sys.exit(2)
filepath = os.environ['fileinject'][:pos]
content = os.environ['fileinject'][pos + 1:]
if not filepath.startswith('/'):
sys.stderr.write("fileinject: filepath must start with '/', "
"please refer to the README file\n")
sys.exit(2)
domxml = hooking.read_domxml()
disks = domxml.getElementsByTagName('disk')
injected = False
diskformat = 'raw'
rawcount = 0
for disk in disks:
if (disk.hasAttribute('device') and
disk.attributes['device'].value == 'disk'):
sources = disk.getElementsByTagName('source')
if len(sources) > 0:
source = sources[0]
drivers = disk.getElementsByTagName('driver')
if (len(drivers) > 0 and
drivers[0].hasAttribute('type') and
drivers[0].attributes['type'].value == 'qcow2'):
# we can only inject to 'raw' file format
continue
rawcount += 1
# disk format can be raw or qcow2
# http://libguestfs.org/guestfs.3.html#guestfs_add_drive_opts # noqa
path = None
if source.hasAttribute('file'):
path = source.attributes['file'].value
elif source.hasAttribute('dev'):
path = source.attributes['dev'].value
if path is not None:
injected = inject_file(filepath, content,
path, diskformat)
if not injected:
if rawcount == 0:
sys.stderr.write('fileinject: there is no "preallocated" '
'(RAW format) disk in VM, '
'cannot inject data\n')
else:
sys.stderr.write('fileinject: Cannot inject data, '
'path not exists: %s\n' %
os.path.dirname(filepath))
sys.exit(2)
except:
sys.stderr.write('fileinject: [unexpected error]: %s\n' %
traceback.format_exc())
sys.exit(2)
| gpl-2.0 |
HLFH/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/allocine.py | 22 | 6845 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
A module to use Allocine API V3 in Python
Repository: https://github.com/xbgmsharp/allocine
Base on work from: https://github.com/gromez/allocine-api
License: LGPLv2 http://www.gnu.org/licenses/lgpl.html
Sample code:
from allocine import allocine
api = allocine()
api.configure('100043982026','29d185d98c984a359e6e6f26a0474269')
movie = api.movie(27405)
search = api.search("Oblivion")
"""
# Debug
#from pprint import pprint
# standard module
from datetime import date
import urllib2, urllib
import hashlib, base64
import json as simplejson
__version__ = "0.2"
__author__ = "Francois Lacroix"
__license__ = "GPL"
__description__ = "A module to use Allocine API V3 in Python"
class allocine(object):
"""An interface to the Allocine API"""
def __init__(self, partner_key=None, secret_key=None):
"""Init values"""
self._api_url = 'http://api.allocine.fr/rest/v3'
self._partner_key = 'aXBob25lLXYy'
self._secret_key = secret_key
self._user_agent = 'AlloCine/2.9.5 CFNetwork/548.1.4 Darwin/11.0.0'
def configure(self, partner_key=None, secret_key=None):
"""Set the keys"""
self._partner_key = 'aXBob25lLXYy'
self._secret_key = secret_key
def _do_request(self, method=None, params=None):
"""Generate and send the request"""
# build the URL
query_url = self._api_url+'/'+method;
# new algo to build the query
today = date.today()
sed = today.strftime('%Y%m%d')
#print sed
sha1 = hashlib.sha1(self._secret_key+urllib.urlencode(params)+'&sed='+sed).digest()
#print sha1
b64 = base64.b64encode(sha1)
#print b64
sig = urllib2.quote(b64)
#query_url += '?'+urllib.urlencode(params)+'&sed='+sed+'&sig='+sig
query_url += '?'+urllib.urlencode(params, True)
#print query_url;
# do the request
req = urllib2.Request(query_url)
req.add_header('User-agent', self._user_agent)
response = simplejson.load(urllib2.urlopen(req, timeout = 10))
return response;
def search(self, query, filter="movie"):
"""Search for a term
Param:
query -- Term to search for
filter -- Filter by resut type (movie, theater, person, news, tvseries)
"""
# build the params
params = {}
params['format'] = 'json'
params['partner'] = self._partner_key
params['q'] = query
params['filter'] = filter
params['profile'] = "large"
# do the request
response = self._do_request('search', params);
return response;
def movie(self, id, profile="large", mediafmt="mp4-lc:m"):
"""Get the movie details by ID
Param:
id -- Unique ID of the movie your search for
profile -- Level of details to return (small, medium, large)
mediafmt -- The media format (flv, mp4-lc, mp4-hip, mp4-archive, mpeg2-theater, mpeg2)
"""
# build the params
params = {}
params['format'] = 'json'
params['partner'] = self._partner_key
params['mediafmt'] = mediafmt
params['profile'] = profile
params['code'] = id
params['striptags'] = 'synopsis,synopsisshort'
# do the request
response = self._do_request('movie', params);
return response;
def tvseries(self, id, profile="large", mediafmt="mp4-lc:m"):
"""Get the TVshow details by ID
Param:
id -- Unique ID of the tvseries your search for
profile -- Level of details to return (small, medium, large)
mediafmt -- The media format (flv, mp4-lc, mp4-hip, mp4-archive, mpeg2-theater, mpeg2)
"""
# build the params
params = {}
params['format'] = 'json'
params['partner'] = self._partner_key
params['mediafmt'] = mediafmt
params['profile'] = profile
params['code'] = id
params['striptags'] = 'synopsis,synopsisshort'
# do the request
response = self._do_request('tvseries', params);
return response;
def season(self, id, profile="large"):
"""Get the season details by ID
Param:
id -- Unique ID of the season your search for
profile -- Level of details to return (small, medium, large)
"""
# build the params
params = {}
params['format'] = 'json'
params['partner'] = self._partner_key
params['profile'] = profile
params['code'] = id
params['striptags'] = 'synopsis,synopsisshort'
# do the request
response = self._do_request('season', params);
return response;
def episode(self, id, profile="large"):
"""Get the episode details by ID
Param:
id -- Unique ID of the episode your search for
profile -- Level of details to return (small, medium, large)
"""
# build the params
params = {}
params['format'] = 'json'
params['partner'] = self._partner_key
params['profile'] = profile
params['code'] = id
params['striptags'] = 'synopsis,synopsisshort'
# do the request
response = self._do_request('episode', params);
return response;
def trailer(self, id, profile="large", mediafmt="mp4-lc:m"):
"""Get the movie details by ID
Param:
id -- Unique ID of the movie your search for
profile -- Level of details to return (small, medium, large)
mediafmt -- The media format (flv, mp4-lc, mp4-hip, mp4-archive, mpeg2-theater, mpeg2)
"""
# build the params
params = {}
params['format'] = 'json'
params['partner'] = self._partner_key
params['profile'] = profile
params['code'] = id
# do the request
response = self._do_request('media',params);
return response;
def movielist(self, typemovie, profile="large", mediafmt="mp4-lc:m"):
"""Get the movie details by ID
Param:
id -- Unique ID of the movie your search for
profile -- Level of details to return (small, medium, large)
mediafmt -- The media format (flv, mp4-lc, mp4-hip, mp4-archive, mpeg2-theater, mpeg2)
"""
# build the params
params = {}
params['format'] = 'json'
params['partner'] = self._partner_key
params['profile'] = profile
params['filter'] = typemovie
params['order'] = 'toprank'
params['count'] = 30
# do the request
response = self._do_request('movielist',params);
return response;
| gpl-3.0 |
cokirix/p2pool | SOAPpy/Server.py | 289 | 27143 | from __future__ import nested_scopes
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
#import xml.sax
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
import thread
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import Config
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
################################################################################
# Call context dictionary
################################################################################
_contexts = dict()
def GetSOAPContext():
global _contexts
return _contexts[thread.get_ident()]
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Required Header Misunderstood",
"%s" % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServerBase:
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None,
path = ''):
if not funcName : funcName = function.__name__
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None,
path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
def unregisterObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
del self.objmap[namespace]
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def version_string(self):
return '<a href="http://pywebsvcs.sf.net">' + \
'SOAPpy ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
global _contexts
status = 500
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["Content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist()
kw = r._asdict()
if Config.simplify_objects:
args = simplify(args)
kw = simplify(kw)
# Handle mixed named and unnamed arguments by assuming
# that all arguments with names of the form "v[0-9]+"
# are unnamed and should be passed in numeric order,
# other arguments are named and should be passed using
# this name.
# This is a non-standard exension to the SOAP protocol,
# but is supported by Apache AXIS.
# It is enabled by default. To disable, set
# Config.specialArgs to False.
ordered_args = {}
named_args = {}
if Config.specialArgs:
for (k,v) in kw.items():
if k[0]=="v":
try:
i = int(k[1:])
ordered_args[i] = v
except ValueError:
named_args[str(k)] = v
else:
named_args[str(k)] = v
# We have to decide namespace precedence
# I'm happy with the following scenario
# if r._ns is specified use it, if not check for
# a path, if it's specified convert it and use it as the
# namespace. If both are specified, use r._ns.
ns = r._ns
if len(self.path) > 1 and not ns:
ns = self.path.replace("/", ":")
if ns[0] == ":": ns = ns[1:]
# authorization method
a = None
keylist = ordered_args.keys()
keylist.sort()
# create list in proper order w/o names
tmp = map( lambda x: ordered_args[x], keylist)
ordered_args = tmp
#print '<-> Argument Matching Yielded:'
#print '<-> Ordered Arguments:' + str(ordered_args)
#print '<-> Named Arguments :' + str(named_args)
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
# look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(authmethod):
a = self.server.funcmap[ns][authmethod]
else:
# Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
# Look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if hasattr(f, authmethod):
a = getattr(f, authmethod)
# then continue looking for the method
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
info = sys.exc_info()
try:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"Method Not Found",
"%s : %s %s %s" % (nsmethod,
info[0],
info[1],
info[2])),
encoding = self.server.encoding,
config = self.server.config)
finally:
del info
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
fr = 1
# call context book keeping
# We're stuffing the method into the soapaction if there
# isn't one, someday, we'll set that on the client
# and it won't be necessary here
# for now we're doing both
if "SOAPAction".lower() not in self.headers.keys() or \
self.headers["SOAPAction"] == "\"\"":
self.headers["SOAPAction"] = method
thread_id = thread.get_ident()
_contexts[thread_id] = SOAPContext(header, body,
attrs, data,
self.connection,
self.headers,
self.headers["SOAPAction"])
# Do an authorization check
if a != None:
if not apply(a, (), {"_SOAPContext" :
_contexts[thread_id] }):
raise faultType("%s:Server" % NS.ENV_T,
"Authorization failed.",
"%s" % nsmethod)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # retrieve context object
c = _contexts[thread_id]
if Config.specialArgs:
if c:
named_args["_SOAPContext"] = c
fr = apply(f, ordered_args, named_args)
elif f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
if Config.specialArgs:
fr = apply(f, ordered_args, named_args)
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
# Clean up _contexts
if _contexts.has_key(thread_id):
del _contexts[thread_id]
except Exception, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method Failed",
"%s" % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except Exception, e:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
s = 'Internal exception %s' % e
import traceback
debugHeader(s)
info = sys.exc_info()
try:
traceback.print_exception(info[0], info[1], info[2])
finally:
del info
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset=%s' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def do_GET(self):
#print 'command ', self.command
#print 'path ', self.path
#print 'request_version', self.request_version
#print 'headers'
#print ' type ', self.headers.type
#print ' maintype', self.headers.maintype
#print ' subtype ', self.headers.subtype
#print ' params ', self.headers.plist
path = self.path.lower()
if path.endswith('wsdl'):
method = 'wsdl'
function = namespace = None
if self.server.funcmap.has_key(namespace) \
and self.server.funcmap[namespace].has_key(method):
function = self.server.funcmap[namespace][method]
else:
if namespace in self.server.objmap.keys():
function = self.server.objmap[namespace]
l = method.split(".")
for i in l:
function = getattr(function, i)
if function:
self.send_response(200)
self.send_header("Content-type", 'text/plain')
self.end_headers()
response = apply(function, ())
self.wfile.write(str(response))
return
# return error
self.send_response(200)
self.send_header("Content-type", 'text/html')
self.end_headers()
self.wfile.write('''\
<title>
<head>Error!</head>
</title>
<body>
<h1>Oops!</h1>
<p>
This server supports HTTP GET requests only for the the purpose of
obtaining Web Services Description Language (WSDL) for a specific
service.
Either you requested an URL that does not end in "wsdl" or this
server does not implement a wsdl method.
</p>
</body>''')
def log_message(self, format, *args):
if self.server.log:
BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
class SOAPServer(SOAPServerBase, SocketServer.TCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
class ThreadingSOAPServer(SOAPServerBase, SocketServer.ThreadingTCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.ThreadingTCPServer.__init__(self, addr, RequestHandler)
# only define class if Unix domain sockets are available
if hasattr(socket, "AF_UNIX"):
class SOAPUnixSocketServer(SOAPServerBase, SocketServer.UnixStreamServer):
def __init__(self, addr = 8000,
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.UnixStreamServer.__init__(self, str(addr), RequestHandler)
| gpl-3.0 |
openhatch/new-mini-tasks | vendor/packages/Django/django/template/loaders/app_directories.py | 114 | 2362 | """
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import os
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
from django.utils.importlib import import_module
from django.utils import six
# At compile time, cache the directories to search.
if not six.PY3:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
app_template_dirs = []
for app in settings.INSTALLED_APPS:
try:
mod = import_module(app)
except ImportError as e:
raise ImproperlyConfigured('ImportError %s: %s' % (app, e.args[0]))
template_dir = os.path.join(os.path.dirname(mod.__file__), 'templates')
if os.path.isdir(template_dir):
if not six.PY3:
template_dir = template_dir.decode(fs_encoding)
app_template_dirs.append(template_dir)
# It won't change, so convert it to a tuple to save memory.
app_template_dirs = tuple(app_template_dirs)
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
with open(filepath, 'rb') as fp:
return (fp.read().decode(settings.FILE_CHARSET), filepath)
except IOError:
pass
raise TemplateDoesNotExist(template_name)
| apache-2.0 |
RJ15/FixIt | badminton/node_modules/node-gyp/gyp/pylib/gyp/MSVSNew.py | 1835 | 12124 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
| mit |
2014c2g6/c2g6 | wsgi/static/Brython2.1.0-20140419-113919/Lib/multiprocessing/dummy/__init__.py | 693 | 4380 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
#brython fix me
#import array
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
#brython fix me
#def Array(typecode, sequence, lock=True):
# return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
| gpl-2.0 |
topic2k/EventGhost | plugins/Speedlink6399/__init__.py | 1 | 3390 | # -*- coding: utf-8 -*-
#
# This file is a plugin for EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.org/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
ur"""<rst>
Plugin for the Speed-Link Media Remote Control (SL-6399)
"""
import eg
eg.RegisterPlugin(
name = "Speed-Link SL-6399 Media Remote",
author = "Bitmonster",
version = "1.0.0",
kind = "remote",
guid = "{ED814A18-5379-46B5-9A3B-65449C21871E}",
description = __doc__,
hardwareId = "USB\\VID_1241&PID_E000",
)
CODES1 = {
(0, 0, 40): "Ok",
(0, 0, 75): "ChannelUp",
(0, 0, 78): "ChannelDown",
(0, 0, 79): "Right",
(0, 0, 80): "Left",
(0, 0, 81): "Down",
(0, 0, 82): "Up",
(0, 0, 85): "Star",
(0, 0, 30): "Num1",
(0, 0, 31): "Num2",
(0, 0, 32): "Num3",
(0, 0, 33): "Num4",
(0, 0, 34): "Num5",
(0, 0, 35): "Num6",
(0, 0, 36): "Num7",
(0, 0, 37): "Num8",
(0, 0, 38): "Num9",
(0, 0, 39): "Num0",
(0, 0, 42): "Back",
(0, 0, 58): "Help",
(1, 0, 4): "Radio",
(1, 0, 17): "Msn",
(1, 0, 24): "Title",
(2, 0, 32): "Dash",
(3, 0, 4): "Audio",
(3, 0, 16): "DVD",
(3, 0, 29): "Aspect",
(4, 0, 40): "Desktop",
(4, 0, 61): "PC",
}
CODES2 = {
(4, 2, 0): "TV",
(3, 180, 0): "Rewind",
(3, 176, 0): "Play",
(3, 179, 0): "Forward",
(3, 183, 0): "Stop",
(3, 182, 0): "Replay",
(3, 177, 0): "Pause",
(3, 181, 0): "Skip",
(3, 178, 0): "Record",
(3, 9, 2): "More",
(4, 16, 0): "Videos",
(4, 4, 0): "Music",
(4, 8, 0): "Pictures",
(4, 32, 0): "MyTV",
(3, 233, 0): "VolumeUp",
(3, 234, 0): "VolumeDown",
(4, 1, 0): "Start",
(3, 226, 0): "Mute",
(3, 141, 0): "Guide",
(4, 64, 0): "RTV",
}
class Speedlink(eg.PluginBase):
def __start__(self):
self.info.eventPrefix = "SpeedLink"
self.winUsb = eg.WinUsb(self)
self.winUsb.Device(self.Callback1, 8).AddHardwareId(
"SPEEDLINK SL-6399 Media Remote", "USB\\VID_1241&PID_E000&MI_00"
)
self.winUsb.Device(self.Callback2, 4).AddHardwareId(
"SPEEDLINK SL-6399 Media Remote", "USB\\VID_1241&PID_E000&MI_01"
)
self.winUsb.Start()
def __stop__(self):
self.winUsb.Stop()
def Callback1(self, data):
code = data[:3]
if code == (0, 0, 0):
self.EndLastEvent()
elif code in CODES1:
self.TriggerEnduringEvent(CODES1[code])
else:
print "#1", data
def Callback2(self, data):
if data[1:] == (0, 0, 0):
self.EndLastEvent()
return
code = data[:3]
if code in CODES2:
self.TriggerEnduringEvent(CODES2[code])
else:
print "#2", data
| gpl-2.0 |
ebukoz/thrive | erpnext/regional/report/eway_bill/eway_bill.py | 13 | 9749 | # Copyright (c) 2013, FinByz Tech Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import re
from frappe import _
from frappe.utils import nowdate
def execute(filters=None):
if not filters: filters.setdefault('posting_date', [nowdate(), nowdate()])
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
conditions = get_conditions(filters)
data = frappe.db.sql("""
SELECT
dn.name as dn_id, dn.posting_date, dn.company, dn.company_gstin, dn.customer, dn.customer_gstin, dni.item_code, dni.item_name, dni.description, dni.gst_hsn_code, dni.uom, dni.qty, dni.amount, dn.mode_of_transport, dn.distance, dn.transporter_name, dn.gst_transporter_id, dn.lr_no, dn.lr_date, dn.vehicle_no, dn.gst_vehicle_type, dn.company_address, dn.shipping_address_name
FROM
`tabDelivery Note` AS dn join `tabDelivery Note Item` AS dni on (dni.parent = dn.name)
WHERE
dn.docstatus < 2
%s """ % conditions, as_dict=1)
unit = {
'Bag': "BAGS",
'Bottle': "BOTTLES",
'Kg': "KILOGRAMS",
'Liter': "LITERS",
'Meter': "METERS",
'Nos': "NUMBERS",
'PKT': "PACKS",
'Roll': "ROLLS",
'Set': "SETS"
}
# Regular expression set to remove all the special characters
special_characters = "[$%^*()+\\[\]{};':\"\\|<>.?]"
for row in data:
set_defaults(row)
set_taxes(row, filters)
set_address_details(row, special_characters)
# Eway Bill accepts date as dd/mm/yyyy and not dd-mm-yyyy
row.posting_date = '/'.join(str(row.posting_date).replace("-", "/").split('/')[::-1])
row.lr_date = '/'.join(str(row.lr_date).replace("-", "/").split('/')[::-1])
if row.gst_vehicle_type == 'Over Dimensional Cargo (ODC)':
row.gst_vehicle_type = 'ODC'
row.item_name = re.sub(special_characters, " ", row.item_name)
row.description = row.item_name
row.uom = unit.get(row.uom, row.uom)
# For removing special charactes and numbers from customer.
row.customer = re.sub(special_characters[:-1] + "&0-9" + "]", "", row.customer)
return data
def get_conditions(filters):
conditions = ""
conditions += filters.get('company') and " AND dn.company = '%s' " % filters.get('company') or ""
conditions += filters.get('posting_date') and " AND dn.posting_date >= '%s' AND dn.posting_date <= '%s' " % (filters.get('posting_date')[0], filters.get('posting_date')[1]) or ""
conditions += filters.get('delivery_note') and " AND dn.name = '%s' " % filters.get('delivery_note') or ""
conditions += filters.get('customer') and " AND dn.customer = '%s' " % filters.get('customer').replace("'", "\'") or ""
return conditions
def set_defaults(row):
row.setdefault(u'supply_type', "Outward")
row.setdefault(u'sub_type', "Supply")
row.setdefault(u'doc_type', "Delivery Challan")
def set_address_details(row, special_characters):
if row.get('company_address'):
address_line1, address_line2, city, pincode, state = frappe.db.get_value("Address", row.get('company_address'), ['address_line1', 'address_line2', 'city', 'pincode', 'state'])
row.update({'from_address_1': re.sub(special_characters, "", address_line1 or '')})
row.update({'from_address_2': re.sub(special_characters, "", address_line2 or '')})
row.update({'from_place': city and city.upper() or ''})
row.update({'from_pin_code': pincode and pincode.replace(" ", "") or ''})
row.update({'from_state': state and state.upper() or ''})
row.update({'dispatch_state': row.from_state})
if row.get('shipping_address_name'):
address_line1, address_line2, city, pincode, state = frappe.db.get_value("Address", row.get('shipping_address_name'), ['address_line1', 'address_line2', 'city', 'pincode', 'state'])
row.update({'to_address_1': re.sub(special_characters, "", address_line1 or '')})
row.update({'to_address_2': re.sub(special_characters, "", address_line2 or '')})
row.update({'to_place': city and city.upper() or ''})
row.update({'to_pin_code': pincode and pincode.replace(" ", "") or ''})
row.update({'to_state': state and state.upper() or ''})
row.update({'ship_to_state': row.to_state})
def set_taxes(row, filters):
taxes = frappe.get_list("Sales Taxes and Charges",
filters={
'parent': row.dn_id
},
fields=('item_wise_tax_detail', 'account_head'))
account_list = ["cgst_account", "sgst_account", "igst_account", "cess_account"]
taxes_list = frappe.get_list("GST Account",
filters={
"parent": "GST Settings",
"company": filters.company
},
fields=account_list)
if not taxes_list:
frappe.throw(_("Please set GST Accounts in GST Settings"))
item_tax_rate = {}
for tax in taxes:
item_wise_tax = json.loads(tax.item_wise_tax_detail)
item_tax_rate[tax.account_head] = item_wise_tax.get(row.item_code)
tax_rate = []
tax = taxes_list[0]
for key in account_list:
if tax[key] not in item_tax_rate.keys():
item_tax_rate[tax[key]] = [0.0, 0.0]
tax_rate.append(str(item_tax_rate[tax[key]][0]))
row.update({key[:5] + "amount": round(item_tax_rate.get(tax[key], 0.0)[1], 2)})
item_tax_rate.pop(tax[key])
row.amount = float(row.amount) + sum(i[1] for i in item_tax_rate.values())
row.update({'tax_rate': '+'.join(tax_rate)})
def get_columns():
columns = [
{
"fieldname": "supply_type",
"label": _("Supply Type"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "sub_type",
"label": _("Sub Type"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "doc_type",
"label": _("Doc Type"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "dn_id",
"label": _("Doc Name"),
"fieldtype": "Link",
"options": "Delivery Note",
"width": 140
},
{
"fieldname": "posting_date",
"label": _("Doc Date"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "company",
"label": _("From Party Name"),
"fieldtype": "Link",
"options": "Company",
"width": 120
},
{
"fieldname": "company_gstin",
"label": _("From GSTIN"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "from_address_1",
"label": _("From Address 1"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "from_address_2",
"label": _("From Address 2"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "from_place",
"label": _("From Place"),
"fieldtype": "Data",
"width": 80
},
{
"fieldname": "from_pin_code",
"label": _("From Pin Code"),
"fieldtype": "Data",
"width": 80
},
{
"fieldname": "from_state",
"label": _("From State"),
"fieldtype": "Data",
"width": 80
},
{
"fieldname": "dispatch_state",
"label": _("Dispatch State"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "customer",
"label": _("To Party Name"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "customer_gstin",
"label": _("To GSTIN"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "to_address_1",
"label": _("To Address 1"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "to_address_2",
"label": _("To Address 2"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "to_place",
"label": _("To Place"),
"fieldtype": "Data",
"width": 80
},
{
"fieldname": "to_pin_code",
"label": _("To Pin Code"),
"fieldtype": "Data",
"width": 80
},
{
"fieldname": "to_state",
"label": _("To State"),
"fieldtype": "Data",
"width": 80
},
{
"fieldname": "ship_to_state",
"label": _("Ship To State"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "item_name",
"label": _("Product"),
"fieldtype": "Link",
"options": "Item",
"width": 120
},
{
"fieldname": "description",
"label": _("Description"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "gst_hsn_code",
"label": _("HSN"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "uom",
"label": _("Unit"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "qty",
"label": _("Qty"),
"fieldtype": "Float",
"width": 100
},
{
"fieldname": "amount",
"label": _("Accessable Value"),
"fieldtype": "Float",
"width": 120
},
{
"fieldname": "tax_rate",
"label": _("Tax Rate"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "cgst_amount",
"label": _("CGST Amount"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "sgst_amount",
"label": _("SGST Amount"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "igst_amount",
"label": _("IGST Amount"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "cess_amount",
"label": _("CESS Amount"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "mode_of_transport",
"label": _("Mode of Transport"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "distance",
"label": _("Distance"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "transporter_name",
"label": _("Transporter Name"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "gst_transporter_id",
"label": _("Transporter ID"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "lr_no",
"label": _("Transport Receipt No"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "lr_date",
"label": _("Transport Receipt Date"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "vehicle_no",
"label": _("Vehicle No"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "gst_vehicle_type",
"label": _("Vehicle Type"),
"fieldtype": "Data",
"width": 100
},
]
return columns | gpl-3.0 |
WikiTeam/wikiteam | wikimediacommons/commons-update-status.py | 1 | 2773 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
# Copyright (C) 2012-2016 WikiTeam developers
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import urllib
def main():
queryurl = 'https://archive.org/advancedsearch.php?q=collection%3Awikimediacommons&fl[]=identifier&sort[]=&sort[]=&sort[]=&rows=1000&page=1&output=json&callback=callback'
raw = urllib.urlopen(queryurl).read()
raw = raw.split('callback(')[1].strip(')')
result = json.loads(raw)['response']['docs']
identifiers = {}
for item in result:
identifier = item['identifier']
if 'wikimediacommons-20' in identifier:
date = identifier.split('wikimediacommons-')[1]
t = date.split('-')
if len(t) == 1:
if len(t[0]) == 4: # YYYY
identifiers[t[0]] = identifier
elif len(t[0]) == 6: # YYYYMM
identifiers['%s-%s' % (t[0][:4], t[0][4:6])] = identifier
elif len(t[0]) == 8: # YYYYMMDD
identifiers['%s-%s-%s' % (t[0][:4], t[0][4:6], t[0][6:8])] = identifier
else:
print('ERROR, dont understand date format in %s' % (identifier))
elif len(t) == 2:
if len(t[0]) == 4 and len(t[1]) == 2: #YYYY-MM
identifiers['%s-%s' % (t[0], t[1])] = identifier
else:
print('ERROR, dont understand date format in %s' % (identifier))
elif len(t) == 3:
if len(t[0]) == 4 and len(t[1]) == 2 and len(t[2]) == 2: #YYYY-MM-DD
identifiers['%s-%s-%s' % (t[0], t[1], t[2])] = identifier
else:
print('ERROR, dont understand date format in %s' % (identifier))
identifiers_list = [[k, v] for k, v in identifiers.items()]
identifiers_list.sort()
rows = ["|-\n| %s || [https://archive.org/details/%s %s] || ??? || ???" % (k, v, v) for k, v in identifiers_list]
output = """
{| class="wikitable sortable"
! Date !! Identifier !! Files !! Size (GB)
%s
|}""" % ('\n'.join(rows))
print(output)
if __name__ == '__main__':
main()
| gpl-3.0 |
staute/shinken_package | test/test_groups_pickle.py | 18 | 2067 | #!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_groups_pickle.cfg')
def test_dispatch(self):
sub_confs = self.conf.confs
print "NB SUB CONFS", len(sub_confs)
vcfg = None
# Find where hr1 is
for cfg in sub_confs.values():
if 'HR1' in [h.get_name() for h in cfg.hosts]:
print 'FOUNCED', len(cfg.hosts)
vcfg = cfg
# Look ifthe hg in the conf is valid
vhg = vcfg.hostgroups.find_by_name('everyone')
self.assert_(len(vhg.members) == 1)
hr1 = [h for h in vcfg.hosts if h.get_name() == "HR1"][0]
print hr1.hostgroups
hg1 = None
for hg in hr1.hostgroups:
if hg.get_name() == 'everyone':
hg1 = hg
print "Founded hostgroup", hg1
print 'There should be only one host there'
self.assert_(len(hg1.members) == 1)
print 'and should be the same than the vcfg one!'
self.assert_(hg1 == vhg)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
samuelmaudo/yepes | yepes/utils/unidecode/x084.py | 252 | 4646 | data = (
'Hu ', # 0x00
'Qi ', # 0x01
'He ', # 0x02
'Cui ', # 0x03
'Tao ', # 0x04
'Chun ', # 0x05
'Bei ', # 0x06
'Chang ', # 0x07
'Huan ', # 0x08
'Fei ', # 0x09
'Lai ', # 0x0a
'Qi ', # 0x0b
'Meng ', # 0x0c
'Ping ', # 0x0d
'Wei ', # 0x0e
'Dan ', # 0x0f
'Sha ', # 0x10
'Huan ', # 0x11
'Yan ', # 0x12
'Yi ', # 0x13
'Tiao ', # 0x14
'Qi ', # 0x15
'Wan ', # 0x16
'Ce ', # 0x17
'Nai ', # 0x18
'Kutabireru ', # 0x19
'Tuo ', # 0x1a
'Jiu ', # 0x1b
'Tie ', # 0x1c
'Luo ', # 0x1d
'[?] ', # 0x1e
'[?] ', # 0x1f
'Meng ', # 0x20
'[?] ', # 0x21
'Yaji ', # 0x22
'[?] ', # 0x23
'Ying ', # 0x24
'Ying ', # 0x25
'Ying ', # 0x26
'Xiao ', # 0x27
'Sa ', # 0x28
'Qiu ', # 0x29
'Ke ', # 0x2a
'Xiang ', # 0x2b
'Wan ', # 0x2c
'Yu ', # 0x2d
'Yu ', # 0x2e
'Fu ', # 0x2f
'Lian ', # 0x30
'Xuan ', # 0x31
'Yuan ', # 0x32
'Nan ', # 0x33
'Ze ', # 0x34
'Wo ', # 0x35
'Chun ', # 0x36
'Xiao ', # 0x37
'Yu ', # 0x38
'Pian ', # 0x39
'Mao ', # 0x3a
'An ', # 0x3b
'E ', # 0x3c
'Luo ', # 0x3d
'Ying ', # 0x3e
'Huo ', # 0x3f
'Gua ', # 0x40
'Jiang ', # 0x41
'Mian ', # 0x42
'Zuo ', # 0x43
'Zuo ', # 0x44
'Ju ', # 0x45
'Bao ', # 0x46
'Rou ', # 0x47
'Xi ', # 0x48
'Xie ', # 0x49
'An ', # 0x4a
'Qu ', # 0x4b
'Jian ', # 0x4c
'Fu ', # 0x4d
'Lu ', # 0x4e
'Jing ', # 0x4f
'Pen ', # 0x50
'Feng ', # 0x51
'Hong ', # 0x52
'Hong ', # 0x53
'Hou ', # 0x54
'Yan ', # 0x55
'Tu ', # 0x56
'Zhu ', # 0x57
'Zi ', # 0x58
'Xiang ', # 0x59
'Shen ', # 0x5a
'Ge ', # 0x5b
'Jie ', # 0x5c
'Jing ', # 0x5d
'Mi ', # 0x5e
'Huang ', # 0x5f
'Shen ', # 0x60
'Pu ', # 0x61
'Gai ', # 0x62
'Dong ', # 0x63
'Zhou ', # 0x64
'Qian ', # 0x65
'Wei ', # 0x66
'Bo ', # 0x67
'Wei ', # 0x68
'Pa ', # 0x69
'Ji ', # 0x6a
'Hu ', # 0x6b
'Zang ', # 0x6c
'Jia ', # 0x6d
'Duan ', # 0x6e
'Yao ', # 0x6f
'Jun ', # 0x70
'Cong ', # 0x71
'Quan ', # 0x72
'Wei ', # 0x73
'Xian ', # 0x74
'Kui ', # 0x75
'Ting ', # 0x76
'Hun ', # 0x77
'Xi ', # 0x78
'Shi ', # 0x79
'Qi ', # 0x7a
'Lan ', # 0x7b
'Zong ', # 0x7c
'Yao ', # 0x7d
'Yuan ', # 0x7e
'Mei ', # 0x7f
'Yun ', # 0x80
'Shu ', # 0x81
'Di ', # 0x82
'Zhuan ', # 0x83
'Guan ', # 0x84
'Sukumo ', # 0x85
'Xue ', # 0x86
'Chan ', # 0x87
'Kai ', # 0x88
'Kui ', # 0x89
'[?] ', # 0x8a
'Jiang ', # 0x8b
'Lou ', # 0x8c
'Wei ', # 0x8d
'Pai ', # 0x8e
'[?] ', # 0x8f
'Sou ', # 0x90
'Yin ', # 0x91
'Shi ', # 0x92
'Chun ', # 0x93
'Shi ', # 0x94
'Yun ', # 0x95
'Zhen ', # 0x96
'Lang ', # 0x97
'Nu ', # 0x98
'Meng ', # 0x99
'He ', # 0x9a
'Que ', # 0x9b
'Suan ', # 0x9c
'Yuan ', # 0x9d
'Li ', # 0x9e
'Ju ', # 0x9f
'Xi ', # 0xa0
'Pang ', # 0xa1
'Chu ', # 0xa2
'Xu ', # 0xa3
'Tu ', # 0xa4
'Liu ', # 0xa5
'Wo ', # 0xa6
'Zhen ', # 0xa7
'Qian ', # 0xa8
'Zu ', # 0xa9
'Po ', # 0xaa
'Cuo ', # 0xab
'Yuan ', # 0xac
'Chu ', # 0xad
'Yu ', # 0xae
'Kuai ', # 0xaf
'Pan ', # 0xb0
'Pu ', # 0xb1
'Pu ', # 0xb2
'Na ', # 0xb3
'Shuo ', # 0xb4
'Xi ', # 0xb5
'Fen ', # 0xb6
'Yun ', # 0xb7
'Zheng ', # 0xb8
'Jian ', # 0xb9
'Ji ', # 0xba
'Ruo ', # 0xbb
'Cang ', # 0xbc
'En ', # 0xbd
'Mi ', # 0xbe
'Hao ', # 0xbf
'Sun ', # 0xc0
'Zhen ', # 0xc1
'Ming ', # 0xc2
'Sou ', # 0xc3
'Xu ', # 0xc4
'Liu ', # 0xc5
'Xi ', # 0xc6
'Gu ', # 0xc7
'Lang ', # 0xc8
'Rong ', # 0xc9
'Weng ', # 0xca
'Gai ', # 0xcb
'Cuo ', # 0xcc
'Shi ', # 0xcd
'Tang ', # 0xce
'Luo ', # 0xcf
'Ru ', # 0xd0
'Suo ', # 0xd1
'Xian ', # 0xd2
'Bei ', # 0xd3
'Yao ', # 0xd4
'Gui ', # 0xd5
'Bi ', # 0xd6
'Zong ', # 0xd7
'Gun ', # 0xd8
'Za ', # 0xd9
'Xiu ', # 0xda
'Ce ', # 0xdb
'Hai ', # 0xdc
'Lan ', # 0xdd
'[?] ', # 0xde
'Ji ', # 0xdf
'Li ', # 0xe0
'Can ', # 0xe1
'Lang ', # 0xe2
'Yu ', # 0xe3
'[?] ', # 0xe4
'Ying ', # 0xe5
'Mo ', # 0xe6
'Diao ', # 0xe7
'Tiao ', # 0xe8
'Mao ', # 0xe9
'Tong ', # 0xea
'Zhu ', # 0xeb
'Peng ', # 0xec
'An ', # 0xed
'Lian ', # 0xee
'Cong ', # 0xef
'Xi ', # 0xf0
'Ping ', # 0xf1
'Qiu ', # 0xf2
'Jin ', # 0xf3
'Chun ', # 0xf4
'Jie ', # 0xf5
'Wei ', # 0xf6
'Tui ', # 0xf7
'Cao ', # 0xf8
'Yu ', # 0xf9
'Yi ', # 0xfa
'Ji ', # 0xfb
'Liao ', # 0xfc
'Bi ', # 0xfd
'Lu ', # 0xfe
'Su ', # 0xff
)
| bsd-3-clause |
onoga/wm | src/gnue/forms/GFObjects/GFTabStop.py | 2 | 12786 | # GNU Enterprise Forms - GF Object Hierarchy - Navigable Objects
#
# Copyright 2001-2007 Free Software Foundation
#
# This file is part of GNU Enterprise
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: GFTabStop.py,v 1.14 2011/07/14 20:14:54 oleg Exp $
"""
Base class for all objects that can receive the keyboard focus on the UI.
"""
from gnue.common import events
from gnue.forms.input import displayHandlers
from gnue.forms.GFObjects.GFObj import GFObj
from gnue.forms.GFObjects.GFField import InvalidFieldValueError
from toolib import debug
__all__ = ['GFTabStop']
# =============================================================================
# Base class for navigable controls
# =============================================================================
class GFTabStop (GFObj):
"""
A base class for all GFObjects that can receive focus on the UI.
@cvar _navigableInQuery_: If True the object can recieve the keyboard focus
in query mode, otherwise not
"""
# -------------------------------------------------------------------------
# Attributes
# -------------------------------------------------------------------------
navigable = None
# -------------------------------------------------------------------------
# Class variables
# -------------------------------------------------------------------------
_navigableInQuery_ = True
# -------------------------------------------------------------------------
# Constructor
# -------------------------------------------------------------------------
def __init__(self, parent, object_type):
GFObj.__init__(self, parent, object_type)
# The sub-event handler handles the events that are passed from the
# GFInstance. This is the event handler that display handlers
self.subEventHandler = events.EventController()
self.__first_visible_record = 0
self.__current_row_enabled = True
# Trigger exposure
self._validTriggers = {
'PRE-FOCUSOUT' : 'Pre-FocusOut',
'POST-FOCUSOUT' : 'Post-FocusOut',
'PRE-FOCUSIN' : 'Pre-FocusIn',
'POST-FOCUSIN' : 'Post-FocusIn',
'ON-NEXT-ENTRY' : 'On-Next-Entry',
'ON-PREVIOUS-ENTRY': 'On-Previous-Entry',
}
# -------------------------------------------------------------------------
# Implementation of virtual methods
# -------------------------------------------------------------------------
def _phase_1_init_(self):
"""
"""
GFObj._phase_1_init_(self)
self._form._entryList.append(self)
# -------------------------------------------------------------------------
def _is_navigable_ (self, mode):
"""
In general an object is navigable if it is not hidden and it's
navigable xml-attribute is set. If mode is 'query' it additionally
depends wether an object is 'queryable' or not. If mode is 'edit' or
'new' only objects are navigable which are not 'readonly'.
"""
if self.hidden:
return False
else:
if mode == 'query':
return self.navigable and self._navigableInQuery_
else:
return self.navigable
# -------------------------------------------------------------------------
# UI events (called from UIEntry/UIButton)
# -------------------------------------------------------------------------
def _event_set_focus(self):
"""
Notify the object that the user has set the focus to this object with a
mouse click.
This method makes sure that the logical focus follows the physical
focus.
In case the current focus widget vetoes the focus loss, this method
beats the focus back to the old widget.
In fact, this method only calls GFForm._event_focus_changed() with a
target of this object.
"""
self._form._event_focus_changed(self)
# -------------------------------------------------------------------------
# Recalculate the visible index of an object
# -------------------------------------------------------------------------
def recalculate_visible(self, cur_record):
"""
Process a record pointer movement or a result set change for this
entry.
This function sets the C{_visibleIndex} property of this entry. It also
takes care of disabling rows of this entry that are outside the actual
number of available records, and it redisplays the contents of the
entry as needed.
@param cur_record: the currently active record, or -1 if there is no
record active currently.
"""
if self.hidden:
return
if self._form.get_focus_object() is self:
self.ui_focus_out()
try:
if self.uiWidget is not None:
if isinstance(self, GFFieldBound):
# Disable current row if current record is -1
if hasattr(self.uiWidget, '_ui_enable_'):
self.uiWidget._ui_enable_(cur_record != -1)
else:
debug.error("%s has no _ui_enable_ method" % self.uiWidget)
self.refresh_ui()
# Set widgets to editable or non-editable
self.uiWidget._ui_set_editable_(self._field.isEditable())
finally:
# If this was the currently focused widget, move the focus along
if self._form.get_focus_object() is self:
self.ui_focus_in()
self.ui_set_focus()
if hasattr(self, '_displayHandler') and self._displayHandler.editing:
self._displayHandler.generateRefreshEvent()
# -------------------------------------------------------------------------
# Focus handling
# -------------------------------------------------------------------------
def focus_in(self):
"""
Notify the object that it has received the focus.
"""
self.ui_focus_in()
self.processTrigger('PRE-FOCUSIN')
self.processTrigger('POST-FOCUSIN')
# Update tip
if self.get_option('tip'):
tip = self.get_option('tip')
elif isinstance(self, GFFieldBound) and self._field.get_option('tip'):
tip = self._field.get_option('tip')
elif hasattr(self, "_displayHandler"):
tip = self._displayHandler.get_tip()
else:
tip = ""
self._form.update_tip(tip)
# -------------------------------------------------------------------------
def validate(self):
"""
Validate the object to decide whether the focus can be moved away from
it.
This function can raise an exception, in which case the focus change
will be prevented.
"""
self.processTrigger('PRE-FOCUSOUT', ignoreAbort=False)
# -------------------------------------------------------------------------
def focus_out(self):
"""
Notify the object that it is going to lose the focus.
The focus change is already decided at this moment, there is no way to
stop the focus from changing now.
"""
self.processTrigger('POST-FOCUSOUT')
self.ui_focus_out()
# -------------------------------------------------------------------------
# UI focus movement
# -------------------------------------------------------------------------
def ui_set_focus(self):
"""
Set the focus to this widget on the UI layer.
This function is only called when the focus is set from the GF layer.
If the user changes the focus with a mouse click, this function is not
called because the UI focus already is on the target widget.
So the purpose of this function is to make the UI focus follow the GF
focus.
"""
#rint "ui_set_focus: Focus requested from logic", self._field
self.uiWidget._ui_set_focus_()
# -------------------------------------------------------------------------
def ui_focus_in(self):
"""
Notify the UI widget that is is going to receive the focus.
This function is always called, no matter whether the user requested
the focus change via mouse click, keypress, or trigger function.
The purpose of this function is to allow the UI widget to do things
that always must be done when it gets the focus, like changing the
color of the current widget, or activating the current entry in the
grid.
"""
self.uiWidget._ui_focus_in_()
# if have parent notepages, select them
parent = self
while parent:
if parent._type == 'GFNotepage':
parent.select()
parent = parent.getParent()
# -------------------------------------------------------------------------
def ui_focus_out(self):
"""
Notify the UI widget that it has lost the focus.
This function is always called, no matter whether the user requested
the focus change via mouse click, keypress, or trigger function.
The purpose of this function is to allow the UI widget to do things
that always must be done when it loses the focus, like changing the
color of the formerly current widget back to normal, or deactivating
the no-longer-current entry in the grid.
This function works better than the KILL-FOCUS event of the UI, because
KILL-FOCUS runs too often, for example also when the dropdown is opened
(and the focus moves from the dropdown entry to the dropdown list).
"""
self.uiWidget._ui_focus_out_()
# GFTable has this method to listen when entry looses focus to end editing
if hasattr(self.getParent(), 'ui_focus_out'):
self.getParent().ui_focus_out()
# =============================================================================
# Base class for all widgets bound to a field
# =============================================================================
class GFFieldBound(GFTabStop):
# -------------------------------------------------------------------------
# Constructor
# -------------------------------------------------------------------------
def __init__(self, parent, object_type):
GFTabStop.__init__(self, parent, object_type)
self._block = None
self._field = None
# -------------------------------------------------------------------------
# Phase 1 init
# -------------------------------------------------------------------------
def _phase_1_init_(self):
GFTabStop._phase_1_init_(self)
self._block = self.get_block()
assert (self._block is not None), '%s has no block' % self.name
self._block._entryList.append(self)
self._field = self.get_field()
self._field._entryList.append(self)
self._formatmask = ""
self._inputmask = getattr(self, 'inputmask', '')
self._displaymask = getattr(self, 'displaymask', '')
# Associate a display handler with this instance
self._displayHandler = displayHandlers.factory(self,
self._form._instance.eventController,
self.subEventHandler,
self._displaymask,
self._inputmask)
# -------------------------------------------------------------------------
# Clipboard and selection
# -------------------------------------------------------------------------
def cut(self):
if self.uiWidget is not None:
self.uiWidget._ui_cut_()
# -------------------------------------------------------------------------
def copy(self):
if self.uiWidget is not None:
self.uiWidget._ui_copy_()
# -------------------------------------------------------------------------
def paste(self):
if self.uiWidget is not None:
self.uiWidget._ui_paste_()
# -------------------------------------------------------------------------
def select_all(self):
if self.uiWidget is not None:
self.uiWidget._ui_select_all_()
# -------------------------------------------------------------------------
# Refresh the user interface with the current field data
# -------------------------------------------------------------------------
def refresh_ui(self):
if not self.hidden:
# Do not execute if we were editing - would overwrite unsaved change
if not self._displayHandler.editing:
try:
value = self._field.get_value()
except InvalidFieldValueError: # invalid value
value = None
display = self._displayHandler.build_display(value, False)
assert isinstance(display, (unicode, bool)), self._displayHandler
self.uiWidget._ui_set_value_(display)
def refresh_ui_editable(self):
if not self.hidden:
self.uiWidget._ui_set_editable_(self._field.isEditable())
# -------------------------------------------------------------------------
# Update the available list of choices for all uiWidgets
# -------------------------------------------------------------------------
def refresh_ui_choices(self, choices):
self.uiWidget._ui_set_choices_(choices)
| gpl-2.0 |
donNewtonAlpha/onos | tools/test/topos/newFuncTopo.py | 29 | 5180 | #!/usr/bin/python
"""
Custom topology for Mininet
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import Host, RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
from mininet.node import ( UserSwitch, OVSSwitch, IVSSwitch )
class VLANHost( Host ):
def config( self, vlan=100, **params ):
r = super( Host, self ).config( **params )
intf = self.defaultIntf()
self.cmd( 'ifconfig %s inet 0' % intf )
self.cmd( 'vconfig add %s %d' % ( intf, vlan ) )
self.cmd( 'ifconfig %s.%d inet %s' % ( intf, vlan, params['ip'] ) )
newName = '%s.%d' % ( intf, vlan )
intf.name = newName
self.nameToIntf[ newName ] = intf
return r
class IPv6Host( Host ):
def config( self, v6Addr='1000:1/64', **params ):
r = super( Host, self ).config( **params )
intf = self.defaultIntf()
self.cmd( 'ifconfig %s inet 0' % intf )
self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
return r
class dualStackHost( Host ):
def config( self, v6Addr='2000:1/64', **params ):
r = super( Host, self ).config( **params )
intf = self.defaultIntf()
self.cmd( 'ip -6 addr add %s dev %s' % ( v6Addr, intf ) )
return r
class MyTopo( Topo ):
def __init__( self ):
# Initialize topology
Topo.__init__( self )
# Switch S5 Hosts
host1=self.addHost( 'h1', ip='10.1.0.2/24' )
host2=self.addHost( 'h2', cls=IPv6Host, v6Addr='1000::2/64' )
host3=self.addHost( 'h3', ip='10.1.0.3/24', cls=dualStackHost, v6Addr='2000::2/64' )
#VLAN hosts
host4=self.addHost( 'h4', ip='100.1.0.2/24', cls=VLANHost, vlan=100 )
host5=self.addHost( 'h5', ip='200.1.0.2/24', cls=VLANHost, vlan=200 )
#VPN-1 and VPN-2 Hosts
host6=self.addHost( 'h6', ip='11.1.0.2/24' )
host7=self.addHost( 'h7', ip='12.1.0.2/24' )
#Multicast Sender
host8=self.addHost( 'h8', ip='10.1.0.4/24' )
# Switch S6 Hosts
host9=self.addHost( 'h9', ip='10.1.0.5/24' )
host10=self.addHost( 'h10', cls=IPv6Host, v6Addr='1000::3/64' )
host11=self.addHost( 'h11', ip='10.1.0.6/24', cls=dualStackHost, v6Addr='2000::3/64' )
#VLAN hosts
host12=self.addHost( 'h12', ip='100.1.0.3/24', cls=VLANHost, vlan=100 )
host13=self.addHost( 'h13', ip='200.1.0.3/24', cls=VLANHost, vlan=200 )
#VPN-1 and VPN-2 Hosts
host14=self.addHost( 'h14', ip='11.1.0.3/24' )
host15=self.addHost( 'h15', ip='12.1.0.3/24' )
#Multicast Receiver
host16=self.addHost( 'h16', ip='10.1.0.7/24' )
# Switch S7 Hosts
host17=self.addHost( 'h17', ip='10.1.0.8/24' )
host18=self.addHost( 'h18', cls=IPv6Host, v6Addr='1000::4/64' )
host19=self.addHost( 'h19', ip='10.1.0.9/24', cls=dualStackHost, v6Addr='2000::4/64' )
#VLAN hosts
host20=self.addHost( 'h20', ip='100.1.0.4/24', cls=VLANHost, vlan=100 )
host21=self.addHost( 'h21', ip='200.1.0.4/24', cls=VLANHost, vlan=200 )
#VPN-1 and VPN-2 Hosts
host22=self.addHost( 'h22', ip='11.1.0.4/24' )
host23=self.addHost( 'h23', ip='12.1.0.4/24' )
#Multicast Receiver
host24=self.addHost( 'h24', ip='10.1.0.10/24' )
s1 = self.addSwitch( 's1' )
s2 = self.addSwitch( 's2' )
s3 = self.addSwitch( 's3' )
s4 = self.addSwitch( 's4' )
s5 = self.addSwitch( 's5' )
s6 = self.addSwitch( 's6' )
s7 = self.addSwitch( 's7' )
self.addLink(s5,host1)
self.addLink(s5,host2)
self.addLink(s5,host3)
self.addLink(s5,host4)
self.addLink(s5,host5)
self.addLink(s5,host6)
self.addLink(s5,host7)
self.addLink(s5,host8)
self.addLink(s6,host9)
self.addLink(s6,host10)
self.addLink(s6,host11)
self.addLink(s6,host12)
self.addLink(s6,host13)
self.addLink(s6,host14)
self.addLink(s6,host15)
self.addLink(s6,host16)
self.addLink(s7,host17)
self.addLink(s7,host18)
self.addLink(s7,host19)
self.addLink(s7,host20)
self.addLink(s7,host21)
self.addLink(s7,host22)
self.addLink(s7,host23)
self.addLink(s7,host24)
self.addLink(s1,s2)
self.addLink(s1,s3)
self.addLink(s1,s4)
self.addLink(s1,s5)
self.addLink(s2,s3)
self.addLink(s2,s5)
self.addLink(s2,s6)
self.addLink(s3,s4)
self.addLink(s3,s6)
self.addLink(s4,s7)
topos = { 'mytopo': ( lambda: MyTopo() ) }
# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
def setupNetwork():
"Create network"
topo = MyTopo()
network = Mininet(topo=topo, autoSetMacs=True, controller=None)
network.start()
CLI( network )
network.stop()
if __name__ == '__main__':
setLogLevel('info')
#setLogLevel('debug')
setupNetwork()
| apache-2.0 |
flavour/ifrc_qa | languages/en-gb.py | 8 | 15345 | # -*- coding: utf-8 -*-
{
"A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year": "A volunteer is defined as active if they've participated in an average of 8 or more hours of Programme work or Trainings per month in the last year",
'Ability to customize the list of details tracked at a Shelter': 'Ability to customise the list of details tracked at a Shelter',
'Ability to customize the list of human resource tracked at a Shelter': 'Ability to customise the list of human resource tracked at a Shelter',
'Ability to customize the list of important facilities needed at a Shelter': 'Ability to customise the list of important facilities needed at a Shelter',
"Acronym of the organization's name, eg. IFRC.": "Acronym of the organisation's name, eg. IFRC.",
'Add a new program to the catalog.': 'Add a new programme to the catalog.',
'Add all organizations which are involved in different roles in this project': 'Add all organisations which are involved in different roles in this project',
'Add Branch Organization': 'Add New Branch Organisation',
'Add Item to Catalog': 'Add Item to Catalogue',
'Add New Program': 'Add New Programme',
'Add Organization Domain': 'Add Organisation Domain',
'Add Organization to Project': 'Add Organisation to Project',
'Add Program Hours': 'Add Programme Hours',
'Alert Notification': 'Alert Notification',
'Branch Organization Capacity Assessment': 'Branch Organisation Capacity Assessment',
'Branch Organization Capacity Assessments': 'Branch Organisation Capacity Assessments',
'Canceled': 'Cancelled',
'Cannot make an Organization a branch of itself!': 'Cannot make an Organisation a branch of itself!',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organisation is providing and where',
'Catalog': 'Catalogue',
'Catalog added': 'Catalogue added',
'Catalog deleted': 'Catalogue deleted',
'Catalog Details': 'Catalogue Details',
'Catalog Item added': 'Catalogue Item added',
'Catalog Item deleted': 'Catalogue Item deleted',
'Catalog Item updated': 'Catalogue Item updated',
'Catalog Items': 'Catalogue Items',
'Catalog updated': 'Catalogue updated',
'Catalogs': 'Catalogues',
'Certificate Catalog': 'Certificate Catalogue',
'Certifying Organization': 'Certifying Organisation',
'Commitment Canceled': 'Commitment Cancelled',
'Community Mobilization': 'Community Mobilisation',
'Community Organization': 'Community Organisation',
'Competency Rating Catalog': 'Competency Rating Catalogue',
'Configure resources to synchronize, update methods and policies': 'Configure resources to synchronise, update methods and policies',
'Configure/Monitor Synchronization': 'Configure/Monitor Synchronisation',
'Course Catalog': 'Course Catalogue',
'Create Catalog': 'Create Catalogue',
'Create Catalog Item': 'Create Catalogue Item',
'Create Organization': 'Create Organisation',
'Create Organization Type': 'Create Organisation Type',
'Create Partner Organization': 'Create Partner Organisation',
'Create Program': 'Create Programme',
'Credentialling Organization': 'Credentialling Organisation',
'Current Owned By (Organization/Branch)': 'Current Owned By (Organisation/Branch)',
'Currently no programs registered': 'Currently no programmes registered',
'Delete Catalog': 'Delete Catalogue',
'Delete Catalog Item': 'Delete Catalogue Item',
'Delete Organization': 'Delete Organisation',
'Delete Organization Domain': 'Delete Organisation Domain',
'Delete Organization Type': 'Delete Organisation Type',
'Delete Partner Organization': 'Delete Partner Organisation',
'Delete Program': 'Delete Programme',
'Department Catalog': 'Department Catalogue',
'Donating Organization': 'Donating Organisation',
'Edit Catalog': 'Edit Catalogue',
'Edit Catalog Item': 'Edit Catalogue Item',
'Edit Organization': 'Edit Organisation',
'Edit Organization Domain': 'Edit Organisation Domain',
'Edit Organization Type': 'Edit Organisation Type',
'Edit Partner Organization': 'Edit Partner Organisation',
'Edit Program': 'Edit Programme',
'Edit Project Organization': 'Edit Project Organisation',
'Edit Synchronization Settings': 'Edit Synchronisation Settings',
'Enter your organization': 'Enter your organisation',
'Filter': 'Filter',
'From Organization': 'From Organisation',
'Fulfill Requests': 'Fulfil Requests',
'Fulfillment Status': 'Fulfilment Status',
'Funding Organization': 'Funding Organisation',
'Funds Contributed by this Organization': 'Funds Contributed by this Organisation',
'Hair Color': 'Hair Colour',
'Hours by Program Report': 'Hours by Programme Report',
'Identifier which the repository identifies itself with when sending synchronization requests.': 'Identifier which the repository identifies itself with when sending synchronisation requests.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": "If this field is populated then a user who specifies this Organisation when signing up will be assigned as a Staff of this Organisation unless their domain doesn't match the domain field.",
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organisation',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "If you don't see the Organisation in the list, you can add a new one by clicking link 'Add Organisation'.",
'Import Organizations': 'Import Organisations',
'Import Partner Organizations': 'Import Partner Organisations',
'Import Project Organizations': 'Import Project Organisations',
'In Catalogs': 'In Catalogues',
'Intergovernmental Organization': 'Intergovernmental Organisation',
'International Organization': 'International Organisation',
'Item Catalog Details': 'Item Catalogue Details',
'Item Catalogs': 'Item Catalogues',
'Job Role Catalog': 'Job Role Catalogue',
'Job Title Catalog': 'Job Title Catalogue',
'Kit canceled': 'Kit cancelled',
'Last Synchronization': 'Last Synchronisation',
'Last synchronized on': 'Last synchronised on',
'Lead Organization': 'Lead Organisation',
'List All Organization Approvers & Whitelists': 'List All Organisation Approvers & Whitelists',
'List Organization Domains': 'List Organisation Domains',
'List Organization Types': 'List Organisation Types',
'List Organizations': 'List Organisations',
'List Partner Organizations': 'List Partner Organisations',
'List Programs': 'List Programmes',
'List Project Organizations': 'List Project Organisations',
'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'Logo of the organisation. This should be a png or jpeg file and it should be no larger than 400x400',
'Manage Organization Contacts': 'Manage Organisation Contacts',
'Manage Organizations': 'Manage Organisations',
'Manual Synchronization': 'Manual Synchronisation',
'Matching Catalog Items': 'Matching Catalogue Items',
'Monetization': 'Monetisation',
'Monetization Report': 'Monetisation Report',
'No Catalog Items currently registered': 'No Catalogue Items currently registered',
'No Catalogs currently registered': 'No Catalogues currently registered',
'No Matching Catalog Items': 'No Matching Catalogue Items',
'No Organization Domains currently registered': 'No Organisation Domains currently registered',
'No Organization Types currently registered': 'No Organisation Types currently registered',
'No Organizations currently registered': 'No Organisations currently registered',
'No Organizations for this Project': 'No Organisations for this Project',
'No Partner Organizations currently registered': 'No Partner Organisations currently registered',
'Notification': 'Notification',
'Office/Center': 'Office/Centre',
'Order canceled': 'Order cancelled',
'Organization': 'Organisation',
'Organization added': 'Organisation added',
'Organization added to Project': 'Organisation added to Project',
'Organization deleted': 'Organisation deleted',
'Organization Details': 'Organisation Details',
'Organization Domain added': 'Organisation Domain added',
'Organization Domain deleted': 'Organisation Domain deleted',
'Organization Domain Details': 'Organisation Domain Details',
'Organization Domain updated': 'Organisation Domain updated',
'Organization Domains': 'Organisation Domains',
'Organization Registry': 'Organisation Registry',
'Organization removed from Project': 'Organisation removed from Project',
'Organization Type': 'Organisation Type',
'Organization Type added': 'Organisation Type added',
'Organization Type deleted': 'Organisation Type deleted',
'Organization Type Details': 'Organisation Type Details',
'Organization Type updated': 'Organisation Type updated',
'Organization Types': 'Organisation Types',
'Organization Units': 'Organisation Units',
'Organization updated': 'Organisation updated',
'Organization(s)': 'Organisation(s)',
'Organization/Branch': 'Organisation/Branch',
'Organization/Supplier': 'Organisation/Supplier',
'Organizational Development': 'Organisational Development',
'Organizations': 'Organisations',
'Organized By': 'Organised By',
'Owned By (Organization/Branch)': 'Owned By (Organisation/Branch)',
'Owning Organization': 'Owning Organisation',
'Participating Organizations': 'Participating Organisations',
'Partner Organization': 'Partner Organisation',
'Partner Organization added': 'Partner Organisation added',
'Partner Organization deleted': 'Partner Organisation deleted',
'Partner Organization Details': 'Partner Organisation Details',
'Partner Organization updated': 'Partner Organisation updated',
'Partner Organizations': 'Partner Organisations',
"Phone number to donate to this organization's relief efforts.": "Phone number to donate to this organisation's relief efforts.",
'Please enter a %(site)s OR an Organization': 'Please enter a %(site)s OR an Organisation',
'Please enter an Organization/Supplier': 'Please enter an Organisation/Supplier',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Please use this field to record any additional information, including a history of the record if it is updated.',
'Position Catalog': 'Position Catalogue',
'Program': 'Programme',
'Program added': 'Programme added',
'Program deleted': 'Programme deleted',
'Program Details': 'Programme Details',
'Program Hours': 'Programme Hours',
'Program Hours (Month)': 'Programme Hours (Month)',
'Program Hours (Year)': 'Programme Hours (Year)',
'Program updated': 'Programme updated',
'Programs': 'Programmes',
'Project Details including organizations': 'Project Details including organisations',
'Project Details including organizations and communities': 'Project Details including organisations and communities',
'Project Organization Details': 'Project Organisation Details',
'Project Organization updated': 'Project Organisation updated',
'Project Organizations': 'Project Organisations',
'Received Shipment canceled': 'Received Shipment cancelled',
'Request Canceled': 'Request Cancelled',
'Request for Donations Canceled': 'Request for Donations Cancelled',
'Request for Volunteers Canceled': 'Request for Volunteers Cancelled',
'Resource Mobilization': 'Resource Mobilisation',
'Schedule synchronization jobs': 'Schedule synchronisation jobs',
'Search by organization.': 'Search by organisation.',
'Search for an Organization by name or acronym': 'Search for an Organisation by name or acronym',
'Search for an Organization by name or acronym.': 'Search for an Organisation by name or acronym.',
'Search for office by organization or branch.': 'Search for office by organisation or branch.',
'Search for warehouse by organization.': 'Search for warehouse by organisation.',
'Search Organization Domains': 'Search Organisation Domains',
'Search Organization Types': 'Search Organisation Types',
'Search Organizations': 'Search Organisations',
'Search Partner Organizations': 'Search Partner Organisations',
'Search Programs': 'Search Programmes',
'Search Project Organizations': 'Search Project Organisations',
'Sent Shipment canceled': 'Sent Shipment cancelled',
'Sent Shipment canceled and items returned to Warehouse': 'Sent Shipment cancelled and items returned to Warehouse',
'Shipping Organization': 'Shipping Organisation',
'Social Mobilization': 'Social Mobilisation',
'Specialized Hospital': 'Specialised Hospital',
'Synchronization': 'Synchronisation',
'Synchronization Job': 'Synchronisation Job',
'Synchronization Log': 'Synchronisation Log',
'Synchronization mode': 'Synchronisation mode',
'Synchronization Schedule': 'Synchronisation Schedule',
'Synchronization Settings': 'Synchronisation Settings',
'Synchronization settings updated': 'Synchronisation settings updated',
'Synchronize now': 'Synchronise now',
'The default Organization for whom this person is acting.': 'The default Organisation for whom this person is acting.',
'The default Organization for whom you are acting.': 'The default Organisation for whom you are acting.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'The Organisation Registry keeps track of all the relief organisations working in the area.',
'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'The synchronisation module allows the synchronisation of data resources between Sahana Eden instances.',
'This shipment has already been received & subsequently canceled.': 'This shipment has already been received & subsequently cancelled.',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'This shipment has not been received - it has NOT been cancelled because it can still be edited.',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'This shipment has not been sent - it has NOT been cancelled because it can still be edited.',
'To Organization': 'To Organisation',
'Training Course Catalog': 'Training Course Catalogue',
'Transfer Ownership To (Organization/Branch)': 'Transfer Ownership To (Organisation/Branch)',
"Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog.": "Type the name of an existing catalogue item OR Click 'Create Item' to add an item which is not in the catalogue.",
'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronisation',
'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Unique identifier which THIS repository identifies itself with when sending synchronisation requests.',
'User Guidelines Synchronization': 'User Guidelines Synchronisation',
'Utilization Report': 'Utilisation Report',
'Volunteer Role Catalog': 'Volunteer Role Catalogue',
'Work on Program': 'Work on Programme',
'Year that the organization was founded': 'Year that the organisation was founded',
}
| mit |
repotvsupertuga/repo | instal/script.module.stream.tvsupertuga.addon/resources/lib/sources/fr/fullmoviz.py | 7 | 5657 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urllib, urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['fr']
self.domains = ['www.fullmoviz.org']
self.base_link = 'http://www.fullmoviz.org'
self.key_link = '?'
self.search_link = 's=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
print '------------------------------- -------------------------------'
sources = []
print url
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
print data
title = data['title']
year = data['year'] if 'year' in data else data['year']
season = data['season'] if 'season' in data else False
episode = data['episode'] if 'episode' in data else False
localtitle = data['localtitle'] if 'localtitle' in data else False
if season and episode:
localtitle = data['localtvshowtitle'] if 'localtvshowtitle' in data else False
#r = 'http://www.fullmoviz.org/?s=deadpool'
#r = client.request(r)
#r = client.parseDOM(r, 'div', attrs={'class': 'post-thumbnail'})
#r = client.parseDOM(r, 'a', ret='href')
#r = client.request(r[0])
#r = client.parseDOM(r, 'div', attrs={'class': 'tab-me-content-wrapper'})
#r = client.parseDOM(r, 'iframe', ret='src')
t = cleantitle.get(title)
tq = cleantitle.query(localtitle)
tq2 = re.sub(' ', '', cleantitle.query(localtitle).lower())
tq = re.sub(' ', '%20', tq)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
query = 'http://www.fullmoviz.org'
r = client.request('http://www.fullmoviz.org/?s=%s' % tq)
print 'http://www.fullmoviz.org/?s=%s' % tq
r = client.parseDOM(r, 'div', attrs={'class': 'post-thumbnail'})
r0 = client.parseDOM(r, 'a', ret='href')[0]
r2 = client.parseDOM(r, 'a', ret='title')[0]
r1 = re.sub('(\([0-9]{4}\)|streaming|\s+)', '', r2)
#r = sorted(set(r))
r = [(r0, r1) for i in r]
#r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
#r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
#r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:saison|s)\s+(\d+)', i[1])) for i in r]
#r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
#r = [(i[0], re.sub(' \&\#[0-9]{4,6};', '', i[1]), i[2], i[3]) for i in r]
r = [i[0] for i in r if tq2 == cleantitle.get(i[1])][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
r = client.request('http://www.fullmoviz.org' + url)
print 'http://www.fullmoviz.org' + url
r = client.parseDOM(r, 'div', attrs={'class': 'tab-me-content-wrapper'})
r = client.parseDOM(r, 'iframe', ret='src')
for i in r:
url = i
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: continue
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'language': 'FR', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url | gpl-2.0 |
lokirius/python-for-android | python3-alpha/python3-src/Lib/test/test_pep263.py | 62 | 1846 | # -*- coding: koi8-r -*-
import unittest
from test import support
class PEP263Test(unittest.TestCase):
def test_pep263(self):
self.assertEqual(
"ðÉÔÏÎ".encode("utf-8"),
b'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
"\ð".encode("utf-8"),
b'\\\xd0\x9f'
)
def test_compilestring(self):
# see #1882
c = compile(b"\n# coding: utf-8\nu = '\xc3\xb3'\n", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['u'], '\xf3')
def test_issue2301(self):
try:
compile(b"# coding: cp932\nprint '\x94\x4e'", "dummy", "exec")
except SyntaxError as v:
self.assertEqual(v.text, "print '\u5e74'\n")
else:
self.fail()
def test_issue4626(self):
c = compile("# coding=latin-1\n\u00c6 = '\u00c6'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['\xc6'], '\xc6')
def test_issue3297(self):
c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['a'], d['b'])
self.assertEqual(len(d['a']), len(d['b']))
self.assertEqual(ascii(d['a']), ascii(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, b'\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, b'\xef\xbb\x20')
def test_main():
support.run_unittest(PEP263Test)
if __name__=="__main__":
test_main()
| apache-2.0 |
cnsoft/kbengine-cocos2dx | kbe/res/scripts/common/Lib/ctypes/test/test_bytes.py | 65 | 1190 | """Test where byte objects are accepted"""
import unittest
import sys
from ctypes import *
class BytesTest(unittest.TestCase):
def test_c_char(self):
x = c_char(b"x")
x.value = b"y"
c_char.from_param(b"x")
(c_char * 3)(b"a", b"b", b"c")
def test_c_wchar(self):
x = c_wchar("x")
x.value = "y"
c_wchar.from_param("x")
(c_wchar * 3)("a", "b", "c")
def test_c_char_p(self):
c_char_p(b"foo bar")
def test_c_wchar_p(self):
c_wchar_p("foo bar")
def test_struct(self):
class X(Structure):
_fields_ = [("a", c_char * 3)]
x = X(b"abc")
self.assertEqual(x.a, b"abc")
self.assertEqual(type(x.a), bytes)
def test_struct_W(self):
class X(Structure):
_fields_ = [("a", c_wchar * 3)]
x = X("abc")
self.assertEqual(x.a, "abc")
self.assertEqual(type(x.a), str)
if sys.platform == "win32":
def test_BSTR(self):
from _ctypes import _SimpleCData
class BSTR(_SimpleCData):
_type_ = "X"
BSTR("abc")
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
jbassen/edx-platform | common/djangoapps/course_modes/migrations/0008_auto__del_field_coursemodesarchive_description__add_field_coursemode_s.py | 102 | 3098 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.sku'
db.add_column('course_modes_coursemode', 'sku',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseMode.sku'
db.delete_column('course_modes_coursemode', 'sku')
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sku': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'course_modes.coursemodesarchive': {
'Meta': {'object_name': 'CourseModesArchive'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
| agpl-3.0 |
arista-eosplus/ansible | lib/ansible/modules/network/iosxr/_iosxr_template.py | 47 | 5327 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: iosxr_template
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage Cisco IOS XR device configurations over SSH
description:
- Manages network device configurations over SSH. This module
allows implementers to work with the device running-config. It
provides a way to push a set of commands onto a network device
by evaluating the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
deprecated: Deprecated in 2.2. Use M(iosxr_config) instead.
extends_documentation_fragment: iosxr
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will first search for the source
file in role or playbook root folder in templates unless a full
path to the file is given.
required: false
default: null
force:
description:
- The force argument instructs the module not to consider the
current device running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
required: false
default: false
choices: [ "true", "false" ]
backup:
description:
- When this argument is configured true, the module will backup
the running-config from the node prior to making any changes.
The backup file will be written to backup_{{ hostname }} in
the root of the playbook directory.
required: false
default: false
choices: [ "true", "false" ]
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task. The I(config) argument allows the implementer to
pass in the configuration to use as the base config for
comparison.
required: false
default: null
"""
EXAMPLES = """
- name: push a configuration onto the device
iosxr_template:
src: config.j2
- name: forceable push a configuration onto the device
iosxr_template:
src: config.j2
force: yes
- name: provide the base configuration for comparison
iosxr_template:
src: candidate_config.txt
config: current_config.txt
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.iosxr import get_config, load_config
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(),
force=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
config=dict(),
)
argument_spec.update(iosxr_argument_spec)
mutually_exclusive = [('config', 'backup'), ('config', 'force')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
candidate = NetworkConfig(contents=module.params['src'], indent=1)
if module.params['backup']:
result['__backup__'] = get_config(module)
if not module.params['force']:
contents = get_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj)
commands = dumps(commands, 'commands').split('\n')
commands = [str(c).strip() for c in commands if c]
else:
commands = [c.strip() for c in str(candidate).split('\n')]
if commands:
load_config(module, commands, result['warnings'], not module.check_mode)
result['changed'] = not module.check_mode
result['updates'] = commands
result['commands'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
openvapour/ryu | ryu/contrib/ncclient/operations/rpc.py | 31 | 13554 | # Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Event, Lock
from uuid import uuid1
from ncclient.xml_ import *
from ncclient.transport import SessionListener
from errors import OperationError, TimeoutExpiredError, MissingCapabilityError
import logging
logger = logging.getLogger("ncclient.operations.rpc")
class RPCError(OperationError):
"Represents an `rpc-error`. It is a type of :exc:`OperationError` and can be raised as such."
tag_to_attr = {
qualify("error-type"): "_type",
qualify("error-tag"): "_tag",
qualify("error-severity"): "_severity",
qualify("error-info"): "_info",
qualify("error-path"): "_path",
qualify("error-message"): "_message"
}
def __init__(self, raw):
self._raw = raw
for attr in RPCError.tag_to_attr.values():
setattr(self, attr, None)
for subele in raw:
attr = RPCError.tag_to_attr.get(subele.tag, None)
if attr is not None:
setattr(self, attr, subele.text if attr != "_info" else to_xml(subele) )
if self.message is not None:
OperationError.__init__(self, self.message)
else:
OperationError.__init__(self, self.to_dict())
def to_dict(self):
return dict([ (attr[1:], getattr(self, attr)) for attr in RPCError.tag_to_attr.values() ])
@property
def xml(self):
"The `rpc-error` element as returned in XML."
return self._raw
@property
def type(self):
"The contents of the `error-type` element."
return self._type
@property
def tag(self):
"The contents of the `error-tag` element."
return self._tag
@property
def severity(self):
"The contents of the `error-severity` element."
return self._severity
@property
def path(self):
"The contents of the `error-path` element if present or `None`."
return self._path
@property
def message(self):
"The contents of the `error-message` element if present or `None`."
return self._message
@property
def info(self):
"XML string or `None`; representing the `error-info` element."
return self._info
class RPCReply:
"""Represents an *rpc-reply*. Only concerns itself with whether the operation was successful.
.. note::
If the reply has not yet been parsed there is an implicit, one-time parsing overhead to
accessing some of the attributes defined by this class.
"""
ERROR_CLS = RPCError
"Subclasses can specify a different error class, but it should be a subclass of `RPCError`."
def __init__(self, raw):
self._raw = raw
self._parsed = False
self._root = None
self._errors = []
def __repr__(self):
return self._raw
def parse(self):
"Parses the *rpc-reply*."
if self._parsed: return
root = self._root = to_ele(self._raw) # The <rpc-reply> element
# Per RFC 4741 an <ok/> tag is sent when there are no errors or warnings
ok = root.find(qualify("ok"))
if ok is None:
# Create RPCError objects from <rpc-error> elements
error = root.find(qualify("rpc-error"))
if error is not None:
for err in root.getiterator(error.tag):
# Process a particular <rpc-error>
self._errors.append(self.ERROR_CLS(err))
self._parsing_hook(root)
self._parsed = True
def _parsing_hook(self, root):
"No-op by default. Gets passed the *root* element for the reply."
pass
@property
def xml(self):
"*rpc-reply* element as returned."
return self._raw
@property
def ok(self):
"Boolean value indicating if there were no errors."
return not self.errors # empty list => false
@property
def error(self):
"Returns the first :class:`RPCError` and `None` if there were no errors."
self.parse()
if self._errors:
return self._errors[0]
else:
return None
@property
def errors(self):
"List of `RPCError` objects. Will be empty if there were no *rpc-error* elements in reply."
self.parse()
return self._errors
class RPCReplyListener(SessionListener): # internal use
creation_lock = Lock()
# one instance per session -- maybe there is a better way??
def __new__(cls, session):
with RPCReplyListener.creation_lock:
instance = session.get_listener_instance(cls)
if instance is None:
instance = object.__new__(cls)
instance._lock = Lock()
instance._id2rpc = {}
#instance._pipelined = session.can_pipeline
session.add_listener(instance)
return instance
def register(self, id, rpc):
with self._lock:
self._id2rpc[id] = rpc
def callback(self, root, raw):
tag, attrs = root
if tag != qualify("rpc-reply"):
return
for key in attrs: # in the <rpc-reply> attributes
if key == "message-id": # if we found msgid attr
id = attrs[key] # get the msgid
with self._lock:
try:
rpc = self._id2rpc[id] # the corresponding rpc
logger.debug("Delivering to %r", rpc)
rpc.deliver_reply(raw)
except KeyError:
raise OperationError("Unknown 'message-id': %s", id)
# no catching other exceptions, fail loudly if must
else:
# if no error delivering, can del the reference to the RPC
del self._id2rpc[id]
break
else:
raise OperationError("Could not find 'message-id' attribute in <rpc-reply>")
def errback(self, err):
try:
for rpc in self._id2rpc.values():
rpc.deliver_error(err)
finally:
self._id2rpc.clear()
class RaiseMode(object):
NONE = 0
"Don't attempt to raise any type of `rpc-error` as :exc:`RPCError`."
ERRORS = 1
"Raise only when the `error-type` indicates it is an honest-to-god error."
ALL = 2
"Don't look at the `error-type`, always raise."
class RPC(object):
"""Base class for all operations, directly corresponding to *rpc* requests. Handles making the request, and taking delivery of the reply."""
DEPENDS = []
"""Subclasses can specify their dependencies on capabilities as a list of URI's or abbreviated names, e.g. ':writable-running'. These are verified at the time of instantiation. If the capability is not available, :exc:`MissingCapabilityError` is raised."""
REPLY_CLS = RPCReply
"By default :class:`RPCReply`. Subclasses can specify a :class:`RPCReply` subclass."
def __init__(self, session, async=False, timeout=30, raise_mode=RaiseMode.NONE):
"""
*session* is the :class:`~ncclient.transport.Session` instance
*async* specifies whether the request is to be made asynchronously, see :attr:`is_async`
*timeout* is the timeout for a synchronous request, see :attr:`timeout`
*raise_mode* specifies the exception raising mode, see :attr:`raise_mode`
"""
self._session = session
try:
for cap in self.DEPENDS:
self._assert(cap)
except AttributeError:
pass
self._async = async
self._timeout = timeout
self._raise_mode = raise_mode
self._id = uuid1().urn # Keeps things simple instead of having a class attr with running ID that has to be locked
self._listener = RPCReplyListener(session)
self._listener.register(self._id, self)
self._reply = None
self._error = None
self._event = Event()
def _wrap(self, subele):
# internal use
ele = new_ele("rpc", {"message-id": self._id})
ele.append(subele)
return to_xml(ele)
def _request(self, op):
"""Implementations of :meth:`request` call this method to send the request and process the reply.
In synchronous mode, blocks until the reply is received and returns :class:`RPCReply`. Depending on the :attr:`raise_mode` a `rpc-error` element in the reply may lead to an :exc:`RPCError` exception.
In asynchronous mode, returns immediately, returning `self`. The :attr:`event` attribute will be set when the reply has been received (see :attr:`reply`) or an error occured (see :attr:`error`).
*op* is the operation to be requested as an :class:`~xml.etree.ElementTree.Element`
"""
logger.info('Requesting %r', self.__class__.__name__)
req = self._wrap(op)
self._session.send(req)
if self._async:
logger.debug('Async request, returning %r', self)
return self
else:
logger.debug('Sync request, will wait for timeout=%r', self._timeout)
self._event.wait(self._timeout)
if self._event.isSet():
if self._error:
# Error that prevented reply delivery
raise self._error
self._reply.parse()
if self._reply.error is not None:
# <rpc-error>'s [ RPCError ]
if self._raise_mode == RaiseMode.ALL:
raise self._reply.error
elif (self._raise_mode == RaiseMode.ERRORS and self._reply.error.type == "error"):
raise self._reply.error
return self._reply
else:
raise TimeoutExpiredError
def request(self):
"""Subclasses must implement this method. Typically only the request needs to be built as an
:class:`~xml.etree.ElementTree.Element` and everything else can be handed off to
:meth:`_request`."""
pass
def _assert(self, capability):
"""Subclasses can use this method to verify that a capability is available with the NETCONF
server, before making a request that requires it. A :exc:`MissingCapabilityError` will be
raised if the capability is not available."""
if capability not in self._session.server_capabilities:
raise MissingCapabilityError('Server does not support [%s]' % capability)
def deliver_reply(self, raw):
# internal use
self._reply = self.REPLY_CLS(raw)
self._event.set()
def deliver_error(self, err):
# internal use
self._error = err
self._event.set()
@property
def reply(self):
":class:`RPCReply` element if reply has been received or `None`"
return self._reply
@property
def error(self):
""":exc:`Exception` type if an error occured or `None`.
.. note::
This represents an error which prevented a reply from being received. An *rpc-error*
does not fall in that category -- see `RPCReply` for that.
"""
return self._error
@property
def id(self):
"The *message-id* for this RPC."
return self._id
@property
def session(self):
"The `~ncclient.transport.Session` object associated with this RPC."
return self._session
@property
def event(self):
""":class:`~threading.Event` that is set when reply has been received or when an error preventing
delivery of the reply occurs.
"""
return self._event
def __set_async(self, async=True):
self._async = async
if async and not session.can_pipeline:
raise UserWarning('Asynchronous mode not supported for this device/session')
def __set_raise_mode(self, mode):
assert(choice in ("all", "errors", "none"))
self._raise_mode = mode
def __set_timeout(self, timeout):
self._timeout = timeout
raise_mode = property(fget=lambda self: self._raise_mode, fset=__set_raise_mode)
"""Depending on this exception raising mode, an `rpc-error` in the reply may be raised as an :exc:`RPCError` exception. Valid values are the constants defined in :class:`RaiseMode`. """
is_async = property(fget=lambda self: self._async, fset=__set_async)
"""Specifies whether this RPC will be / was requested asynchronously. By default RPC's are synchronous."""
timeout = property(fget=lambda self: self._timeout, fset=__set_timeout)
"""Timeout in seconds for synchronous waiting defining how long the RPC request will block on a reply before raising :exc:`TimeoutExpiredError`.
Irrelevant for asynchronous usage.
"""
| apache-2.0 |
CSE3320/kernel-code | .backup_do_not_remove/tools/testing/selftests/bpf/test_offload.py | 54 | 51622 | #!/usr/bin/python3
# Copyright (C) 2017 Netronome Systems, Inc.
# Copyright (c) 2019 Mellanox Technologies. All rights reserved
#
# This software is licensed under the GNU General License Version 2,
# June 1991 as shown in the file COPYING in the top-level directory of this
# source tree.
#
# THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
# OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
# THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
from datetime import datetime
import argparse
import errno
import json
import os
import pprint
import random
import re
import stat
import string
import struct
import subprocess
import time
import traceback
logfile = None
log_level = 1
skip_extack = False
bpf_test_dir = os.path.dirname(os.path.realpath(__file__))
pp = pprint.PrettyPrinter()
devs = [] # devices we created for clean up
files = [] # files to be removed
netns = [] # net namespaces to be removed
def log_get_sec(level=0):
return "*" * (log_level + level)
def log_level_inc(add=1):
global log_level
log_level += add
def log_level_dec(sub=1):
global log_level
log_level -= sub
def log_level_set(level):
global log_level
log_level = level
def log(header, data, level=None):
"""
Output to an optional log.
"""
if logfile is None:
return
if level is not None:
log_level_set(level)
if not isinstance(data, str):
data = pp.pformat(data)
if len(header):
logfile.write("\n" + log_get_sec() + " ")
logfile.write(header)
if len(header) and len(data.strip()):
logfile.write("\n")
logfile.write(data)
def skip(cond, msg):
if not cond:
return
print("SKIP: " + msg)
log("SKIP: " + msg, "", level=1)
os.sys.exit(0)
def fail(cond, msg):
if not cond:
return
print("FAIL: " + msg)
tb = "".join(traceback.extract_stack().format())
print(tb)
log("FAIL: " + msg, tb, level=1)
os.sys.exit(1)
def start_test(msg):
log(msg, "", level=1)
log_level_inc()
print(msg)
def cmd(cmd, shell=True, include_stderr=False, background=False, fail=True):
"""
Run a command in subprocess and return tuple of (retval, stdout);
optionally return stderr as well as third value.
"""
proc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if background:
msg = "%s START: %s" % (log_get_sec(1),
datetime.now().strftime("%H:%M:%S.%f"))
log("BKG " + proc.args, msg)
return proc
return cmd_result(proc, include_stderr=include_stderr, fail=fail)
def cmd_result(proc, include_stderr=False, fail=False):
stdout, stderr = proc.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
proc.stdout.close()
proc.stderr.close()
stderr = "\n" + stderr
if stderr[-1] == "\n":
stderr = stderr[:-1]
sec = log_get_sec(1)
log("CMD " + proc.args,
"RETCODE: %d\n%s STDOUT:\n%s%s STDERR:%s\n%s END: %s" %
(proc.returncode, sec, stdout, sec, stderr,
sec, datetime.now().strftime("%H:%M:%S.%f")))
if proc.returncode != 0 and fail:
if len(stderr) > 0 and stderr[-1] == "\n":
stderr = stderr[:-1]
raise Exception("Command failed: %s\n%s" % (proc.args, stderr))
if include_stderr:
return proc.returncode, stdout, stderr
else:
return proc.returncode, stdout
def rm(f):
cmd("rm -f %s" % (f))
if f in files:
files.remove(f)
def tool(name, args, flags, JSON=True, ns="", fail=True, include_stderr=False):
params = ""
if JSON:
params += "%s " % (flags["json"])
if ns != "":
ns = "ip netns exec %s " % (ns)
if include_stderr:
ret, stdout, stderr = cmd(ns + name + " " + params + args,
fail=fail, include_stderr=True)
else:
ret, stdout = cmd(ns + name + " " + params + args,
fail=fail, include_stderr=False)
if JSON and len(stdout.strip()) != 0:
out = json.loads(stdout)
else:
out = stdout
if include_stderr:
return ret, out, stderr
else:
return ret, out
def bpftool(args, JSON=True, ns="", fail=True, include_stderr=False):
return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns,
fail=fail, include_stderr=include_stderr)
def bpftool_prog_list(expected=None, ns=""):
_, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
# Remove the base progs
for p in base_progs:
if p in progs:
progs.remove(p)
if expected is not None:
if len(progs) != expected:
fail(True, "%d BPF programs loaded, expected %d" %
(len(progs), expected))
return progs
def bpftool_map_list(expected=None, ns=""):
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
# Remove the base maps
for m in base_maps:
if m in maps:
maps.remove(m)
if expected is not None:
if len(maps) != expected:
fail(True, "%d BPF maps loaded, expected %d" %
(len(maps), expected))
return maps
def bpftool_prog_list_wait(expected=0, n_retry=20):
for i in range(n_retry):
nprogs = len(bpftool_prog_list())
if nprogs == expected:
return
time.sleep(0.05)
raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs))
def bpftool_map_list_wait(expected=0, n_retry=20):
for i in range(n_retry):
nmaps = len(bpftool_map_list())
if nmaps == expected:
return
time.sleep(0.05)
raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps))
def bpftool_prog_load(sample, file_name, maps=[], prog_type="xdp", dev=None,
fail=True, include_stderr=False):
args = "prog load %s %s" % (os.path.join(bpf_test_dir, sample), file_name)
if prog_type is not None:
args += " type " + prog_type
if dev is not None:
args += " dev " + dev
if len(maps):
args += " map " + " map ".join(maps)
res = bpftool(args, fail=fail, include_stderr=include_stderr)
if res[0] == 0:
files.append(file_name)
return res
def ip(args, force=False, JSON=True, ns="", fail=True, include_stderr=False):
if force:
args = "-force " + args
return tool("ip", args, {"json":"-j"}, JSON=JSON, ns=ns,
fail=fail, include_stderr=include_stderr)
def tc(args, JSON=True, ns="", fail=True, include_stderr=False):
return tool("tc", args, {"json":"-p"}, JSON=JSON, ns=ns,
fail=fail, include_stderr=include_stderr)
def ethtool(dev, opt, args, fail=True):
return cmd("ethtool %s %s %s" % (opt, dev["ifname"], args), fail=fail)
def bpf_obj(name, sec=".text", path=bpf_test_dir,):
return "obj %s sec %s" % (os.path.join(path, name), sec)
def bpf_pinned(name):
return "pinned %s" % (name)
def bpf_bytecode(bytecode):
return "bytecode \"%s\"" % (bytecode)
def mknetns(n_retry=10):
for i in range(n_retry):
name = ''.join([random.choice(string.ascii_letters) for i in range(8)])
ret, _ = ip("netns add %s" % (name), fail=False)
if ret == 0:
netns.append(name)
return name
return None
def int2str(fmt, val):
ret = []
for b in struct.pack(fmt, val):
ret.append(int(b))
return " ".join(map(lambda x: str(x), ret))
def str2int(strtab):
inttab = []
for i in strtab:
inttab.append(int(i, 16))
ba = bytearray(inttab)
if len(strtab) == 4:
fmt = "I"
elif len(strtab) == 8:
fmt = "Q"
else:
raise Exception("String array of len %d can't be unpacked to an int" %
(len(strtab)))
return struct.unpack(fmt, ba)[0]
class DebugfsDir:
"""
Class for accessing DebugFS directories as a dictionary.
"""
def __init__(self, path):
self.path = path
self._dict = self._debugfs_dir_read(path)
def __len__(self):
return len(self._dict.keys())
def __getitem__(self, key):
if type(key) is int:
key = list(self._dict.keys())[key]
return self._dict[key]
def __setitem__(self, key, value):
log("DebugFS set %s = %s" % (key, value), "")
log_level_inc()
cmd("echo '%s' > %s/%s" % (value, self.path, key))
log_level_dec()
_, out = cmd('cat %s/%s' % (self.path, key))
self._dict[key] = out.strip()
def _debugfs_dir_read(self, path):
dfs = {}
log("DebugFS state for %s" % (path), "")
log_level_inc(add=2)
_, out = cmd('ls ' + path)
for f in out.split():
if f == "ports":
continue
p = os.path.join(path, f)
if not os.stat(p).st_mode & stat.S_IRUSR:
continue
if os.path.isfile(p):
# We need to init trap_flow_action_cookie before read it
if f == "trap_flow_action_cookie":
cmd('echo deadbeef > %s/%s' % (path, f))
_, out = cmd('cat %s/%s' % (path, f))
dfs[f] = out.strip()
elif os.path.isdir(p):
dfs[f] = DebugfsDir(p)
else:
raise Exception("%s is neither file nor directory" % (p))
log_level_dec()
log("DebugFS state", dfs)
log_level_dec()
return dfs
class NetdevSimDev:
"""
Class for netdevsim bus device and its attributes.
"""
@staticmethod
def ctrl_write(path, val):
fullpath = os.path.join("/sys/bus/netdevsim/", path)
try:
with open(fullpath, "w") as f:
f.write(val)
except OSError as e:
log("WRITE %s: %r" % (fullpath, val), -e.errno)
raise e
log("WRITE %s: %r" % (fullpath, val), 0)
def __init__(self, port_count=1):
addr = 0
while True:
try:
self.ctrl_write("new_device", "%u %u" % (addr, port_count))
except OSError as e:
if e.errno == errno.ENOSPC:
addr += 1
continue
raise e
break
self.addr = addr
# As probe of netdevsim device might happen from a workqueue,
# so wait here until all netdevs appear.
self.wait_for_netdevs(port_count)
ret, out = cmd("udevadm settle", fail=False)
if ret:
raise Exception("udevadm settle failed")
ifnames = self.get_ifnames()
devs.append(self)
self.dfs_dir = "/sys/kernel/debug/netdevsim/netdevsim%u/" % addr
self.nsims = []
for port_index in range(port_count):
self.nsims.append(NetdevSim(self, port_index, ifnames[port_index]))
def get_ifnames(self):
ifnames = []
listdir = os.listdir("/sys/bus/netdevsim/devices/netdevsim%u/net/" % self.addr)
for ifname in listdir:
ifnames.append(ifname)
ifnames.sort()
return ifnames
def wait_for_netdevs(self, port_count):
timeout = 5
timeout_start = time.time()
while True:
try:
ifnames = self.get_ifnames()
except FileNotFoundError as e:
ifnames = []
if len(ifnames) == port_count:
break
if time.time() < timeout_start + timeout:
continue
raise Exception("netdevices did not appear within timeout")
def dfs_num_bound_progs(self):
path = os.path.join(self.dfs_dir, "bpf_bound_progs")
_, progs = cmd('ls %s' % (path))
return len(progs.split())
def dfs_get_bound_progs(self, expected):
progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs"))
if expected is not None:
if len(progs) != expected:
fail(True, "%d BPF programs bound, expected %d" %
(len(progs), expected))
return progs
def remove(self):
self.ctrl_write("del_device", "%u" % (self.addr, ))
devs.remove(self)
def remove_nsim(self, nsim):
self.nsims.remove(nsim)
self.ctrl_write("devices/netdevsim%u/del_port" % (self.addr, ),
"%u" % (nsim.port_index, ))
class NetdevSim:
"""
Class for netdevsim netdevice and its attributes.
"""
def __init__(self, nsimdev, port_index, ifname):
# In case udev renamed the netdev to according to new schema,
# check if the name matches the port_index.
nsimnamere = re.compile("eni\d+np(\d+)")
match = nsimnamere.match(ifname)
if match and int(match.groups()[0]) != port_index + 1:
raise Exception("netdevice name mismatches the expected one")
self.nsimdev = nsimdev
self.port_index = port_index
self.ns = ""
self.dfs_dir = "%s/ports/%u/" % (nsimdev.dfs_dir, port_index)
self.dfs_refresh()
_, [self.dev] = ip("link show dev %s" % ifname)
def __getitem__(self, key):
return self.dev[key]
def remove(self):
self.nsimdev.remove_nsim(self)
def dfs_refresh(self):
self.dfs = DebugfsDir(self.dfs_dir)
return self.dfs
def dfs_read(self, f):
path = os.path.join(self.dfs_dir, f)
_, data = cmd('cat %s' % (path))
return data.strip()
def wait_for_flush(self, bound=0, total=0, n_retry=20):
for i in range(n_retry):
nbound = self.nsimdev.dfs_num_bound_progs()
nprogs = len(bpftool_prog_list())
if nbound == bound and nprogs == total:
return
time.sleep(0.05)
raise Exception("Time out waiting for program counts to stabilize want %d/%d, have %d bound, %d loaded" % (bound, total, nbound, nprogs))
def set_ns(self, ns):
name = "1" if ns == "" else ns
ip("link set dev %s netns %s" % (self.dev["ifname"], name), ns=self.ns)
self.ns = ns
def set_mtu(self, mtu, fail=True):
return ip("link set dev %s mtu %d" % (self.dev["ifname"], mtu),
fail=fail)
def set_xdp(self, bpf, mode, force=False, JSON=True, verbose=False,
fail=True, include_stderr=False):
if verbose:
bpf += " verbose"
return ip("link set dev %s xdp%s %s" % (self.dev["ifname"], mode, bpf),
force=force, JSON=JSON,
fail=fail, include_stderr=include_stderr)
def unset_xdp(self, mode, force=False, JSON=True,
fail=True, include_stderr=False):
return ip("link set dev %s xdp%s off" % (self.dev["ifname"], mode),
force=force, JSON=JSON,
fail=fail, include_stderr=include_stderr)
def ip_link_show(self, xdp):
_, link = ip("link show dev %s" % (self['ifname']))
if len(link) > 1:
raise Exception("Multiple objects on ip link show")
if len(link) < 1:
return {}
fail(xdp != "xdp" in link,
"XDP program not reporting in iplink (reported %s, expected %s)" %
("xdp" in link, xdp))
return link[0]
def tc_add_ingress(self):
tc("qdisc add dev %s ingress" % (self['ifname']))
def tc_del_ingress(self):
tc("qdisc del dev %s ingress" % (self['ifname']))
def tc_flush_filters(self, bound=0, total=0):
self.tc_del_ingress()
self.tc_add_ingress()
self.wait_for_flush(bound=bound, total=total)
def tc_show_ingress(self, expected=None):
# No JSON support, oh well...
flags = ["skip_sw", "skip_hw", "in_hw"]
named = ["protocol", "pref", "chain", "handle", "id", "tag"]
args = "-s filter show dev %s ingress" % (self['ifname'])
_, out = tc(args, JSON=False)
filters = []
lines = out.split('\n')
for line in lines:
words = line.split()
if "handle" not in words:
continue
fltr = {}
for flag in flags:
fltr[flag] = flag in words
for name in named:
try:
idx = words.index(name)
fltr[name] = words[idx + 1]
except ValueError:
pass
filters.append(fltr)
if expected is not None:
fail(len(filters) != expected,
"%d ingress filters loaded, expected %d" %
(len(filters), expected))
return filters
def cls_filter_op(self, op, qdisc="ingress", prio=None, handle=None,
chain=None, cls="", params="",
fail=True, include_stderr=False):
spec = ""
if prio is not None:
spec += " prio %d" % (prio)
if handle:
spec += " handle %s" % (handle)
if chain is not None:
spec += " chain %d" % (chain)
return tc("filter {op} dev {dev} {qdisc} {spec} {cls} {params}"\
.format(op=op, dev=self['ifname'], qdisc=qdisc, spec=spec,
cls=cls, params=params),
fail=fail, include_stderr=include_stderr)
def cls_bpf_add_filter(self, bpf, op="add", prio=None, handle=None,
chain=None, da=False, verbose=False,
skip_sw=False, skip_hw=False,
fail=True, include_stderr=False):
cls = "bpf " + bpf
params = ""
if da:
params += " da"
if verbose:
params += " verbose"
if skip_sw:
params += " skip_sw"
if skip_hw:
params += " skip_hw"
return self.cls_filter_op(op=op, prio=prio, handle=handle, cls=cls,
chain=chain, params=params,
fail=fail, include_stderr=include_stderr)
def set_ethtool_tc_offloads(self, enable, fail=True):
args = "hw-tc-offload %s" % ("on" if enable else "off")
return ethtool(self, "-K", args, fail=fail)
################################################################################
def clean_up():
global files, netns, devs
for dev in devs:
dev.remove()
for f in files:
cmd("rm -f %s" % (f))
for ns in netns:
cmd("ip netns delete %s" % (ns))
files = []
netns = []
def pin_prog(file_name, idx=0):
progs = bpftool_prog_list(expected=(idx + 1))
prog = progs[idx]
bpftool("prog pin id %d %s" % (prog["id"], file_name))
files.append(file_name)
return file_name, bpf_pinned(file_name)
def pin_map(file_name, idx=0, expected=1):
maps = bpftool_map_list(expected=expected)
m = maps[idx]
bpftool("map pin id %d %s" % (m["id"], file_name))
files.append(file_name)
return file_name, bpf_pinned(file_name)
def check_dev_info_removed(prog_file=None, map_file=None):
bpftool_prog_list(expected=0)
ret, err = bpftool("prog show pin %s" % (prog_file), fail=False)
fail(ret == 0, "Showing prog with removed device did not fail")
fail(err["error"].find("No such device") == -1,
"Showing prog with removed device expected ENODEV, error is %s" %
(err["error"]))
bpftool_map_list(expected=0)
ret, err = bpftool("map show pin %s" % (map_file), fail=False)
fail(ret == 0, "Showing map with removed device did not fail")
fail(err["error"].find("No such device") == -1,
"Showing map with removed device expected ENODEV, error is %s" %
(err["error"]))
def check_dev_info(other_ns, ns, prog_file=None, map_file=None, removed=False):
progs = bpftool_prog_list(expected=1, ns=ns)
prog = progs[0]
fail("dev" not in prog.keys(), "Device parameters not reported")
dev = prog["dev"]
fail("ifindex" not in dev.keys(), "Device parameters not reported")
fail("ns_dev" not in dev.keys(), "Device parameters not reported")
fail("ns_inode" not in dev.keys(), "Device parameters not reported")
if not other_ns:
fail("ifname" not in dev.keys(), "Ifname not reported")
fail(dev["ifname"] != sim["ifname"],
"Ifname incorrect %s vs %s" % (dev["ifname"], sim["ifname"]))
else:
fail("ifname" in dev.keys(), "Ifname is reported for other ns")
maps = bpftool_map_list(expected=2, ns=ns)
for m in maps:
fail("dev" not in m.keys(), "Device parameters not reported")
fail(dev != m["dev"], "Map's device different than program's")
def check_extack(output, reference, args):
if skip_extack:
return
lines = output.split("\n")
comp = len(lines) >= 2 and lines[1] == 'Error: ' + reference
fail(not comp, "Missing or incorrect netlink extack message")
def check_extack_nsim(output, reference, args):
check_extack(output, "netdevsim: " + reference, args)
def check_no_extack(res, needle):
fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"),
"Found '%s' in command output, leaky extack?" % (needle))
def check_verifier_log(output, reference):
lines = output.split("\n")
for l in reversed(lines):
if l == reference:
return
fail(True, "Missing or incorrect message from netdevsim in verifier log")
def check_multi_basic(two_xdps):
fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
fail("prog" in two_xdps, "Base program reported in multi program mode")
fail(len(two_xdps["attached"]) != 2,
"Wrong attached program count with two programs")
fail(two_xdps["attached"][0]["prog"]["id"] ==
two_xdps["attached"][1]["prog"]["id"],
"Offloaded and other programs have the same id")
def test_spurios_extack(sim, obj, skip_hw, needle):
res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw,
include_stderr=True)
check_no_extack(res, needle)
res = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1,
skip_hw=skip_hw, include_stderr=True)
check_no_extack(res, needle)
res = sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf",
include_stderr=True)
check_no_extack(res, needle)
def test_multi_prog(simdev, sim, obj, modename, modeid):
start_test("Test multi-attachment XDP - %s + offload..." %
(modename or "default", ))
sim.set_xdp(obj, "offload")
xdp = sim.ip_link_show(xdp=True)["xdp"]
offloaded = sim.dfs_read("bpf_offloaded_id")
fail("prog" not in xdp, "Base program not reported in single program mode")
fail(len(xdp["attached"]) != 1,
"Wrong attached program count with one program")
sim.set_xdp(obj, modename)
two_xdps = sim.ip_link_show(xdp=True)["xdp"]
fail(xdp["attached"][0] not in two_xdps["attached"],
"Offload program not reported after other activated")
check_multi_basic(two_xdps)
offloaded2 = sim.dfs_read("bpf_offloaded_id")
fail(offloaded != offloaded2,
"Offload ID changed after loading other program")
start_test("Test multi-attachment XDP - replace...")
ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
fail(ret == 0, "Replaced one of programs without -force")
check_extack(err, "XDP program already attached.", args)
if modename == "" or modename == "drv":
othermode = "" if modename == "drv" else "drv"
start_test("Test multi-attachment XDP - detach...")
ret, _, err = sim.unset_xdp(othermode, force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Removed program with a bad mode")
check_extack(err, "program loaded with different flags.", args)
sim.unset_xdp("offload")
xdp = sim.ip_link_show(xdp=True)["xdp"]
offloaded = sim.dfs_read("bpf_offloaded_id")
fail(xdp["mode"] != modeid, "Bad mode reported after multiple programs")
fail("prog" not in xdp,
"Base program not reported after multi program mode")
fail(xdp["attached"][0] not in two_xdps["attached"],
"Offload program not reported after other activated")
fail(len(xdp["attached"]) != 1,
"Wrong attached program count with remaining programs")
fail(offloaded != "0", "Offload ID reported with only other program left")
start_test("Test multi-attachment XDP - reattach...")
sim.set_xdp(obj, "offload")
two_xdps = sim.ip_link_show(xdp=True)["xdp"]
fail(xdp["attached"][0] not in two_xdps["attached"],
"Other program not reported after offload activated")
check_multi_basic(two_xdps)
start_test("Test multi-attachment XDP - device remove...")
simdev.remove()
simdev = NetdevSimDev()
sim, = simdev.nsims
sim.set_ethtool_tc_offloads(True)
return [simdev, sim]
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("--log", help="output verbose log to given file")
args = parser.parse_args()
if args.log:
logfile = open(args.log, 'w+')
logfile.write("# -*-Org-*-")
log("Prepare...", "", level=1)
log_level_inc()
# Check permissions
skip(os.getuid() != 0, "test must be run as root")
# Check tools
ret, progs = bpftool("prog", fail=False)
skip(ret != 0, "bpftool not installed")
base_progs = progs
_, base_maps = bpftool("map")
# Check netdevsim
ret, out = cmd("modprobe netdevsim", fail=False)
skip(ret != 0, "netdevsim module could not be loaded")
# Check debugfs
_, out = cmd("mount")
if out.find("/sys/kernel/debug type debugfs") == -1:
cmd("mount -t debugfs none /sys/kernel/debug")
# Check samples are compiled
samples = ["sample_ret0.o", "sample_map_ret0.o"]
for s in samples:
ret, out = cmd("ls %s/%s" % (bpf_test_dir, s), fail=False)
skip(ret != 0, "sample %s/%s not found, please compile it" %
(bpf_test_dir, s))
# Check if iproute2 is built with libmnl (needed by extack support)
_, _, err = cmd("tc qdisc delete dev lo handle 0",
fail=False, include_stderr=True)
if err.find("Error: Failed to find qdisc with specified handle.") == -1:
print("Warning: no extack message in iproute2 output, libmnl missing?")
log("Warning: no extack message in iproute2 output, libmnl missing?", "")
skip_extack = True
# Check if net namespaces seem to work
ns = mknetns()
skip(ns is None, "Could not create a net namespace")
cmd("ip netns delete %s" % (ns))
netns = []
try:
obj = bpf_obj("sample_ret0.o")
bytecode = bpf_bytecode("1,6 0 0 4294967295,")
start_test("Test destruction of generic XDP...")
simdev = NetdevSimDev()
sim, = simdev.nsims
sim.set_xdp(obj, "generic")
simdev.remove()
bpftool_prog_list_wait(expected=0)
simdev = NetdevSimDev()
sim, = simdev.nsims
sim.tc_add_ingress()
start_test("Test TC non-offloaded...")
ret, _ = sim.cls_bpf_add_filter(obj, skip_hw=True, fail=False)
fail(ret != 0, "Software TC filter did not load")
start_test("Test TC non-offloaded isn't getting bound...")
ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
fail(ret != 0, "Software TC filter did not load")
simdev.dfs_get_bound_progs(expected=0)
sim.tc_flush_filters()
start_test("Test TC offloads are off by default...")
ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "TC filter loaded without enabling TC offloads")
check_extack(err, "TC offload is disabled on net device.", args)
sim.wait_for_flush()
sim.set_ethtool_tc_offloads(True)
sim.dfs["bpf_tc_non_bound_accept"] = "Y"
start_test("Test TC offload by default...")
ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
fail(ret != 0, "Software TC filter did not load")
simdev.dfs_get_bound_progs(expected=0)
ingress = sim.tc_show_ingress(expected=1)
fltr = ingress[0]
fail(not fltr["in_hw"], "Filter not offloaded by default")
sim.tc_flush_filters()
start_test("Test TC cBPF bytcode tries offload by default...")
ret, _ = sim.cls_bpf_add_filter(bytecode, fail=False)
fail(ret != 0, "Software TC filter did not load")
simdev.dfs_get_bound_progs(expected=0)
ingress = sim.tc_show_ingress(expected=1)
fltr = ingress[0]
fail(not fltr["in_hw"], "Bytecode not offloaded by default")
sim.tc_flush_filters()
sim.dfs["bpf_tc_non_bound_accept"] = "N"
start_test("Test TC cBPF unbound bytecode doesn't offload...")
ret, _, err = sim.cls_bpf_add_filter(bytecode, skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "TC bytecode loaded for offload")
check_extack_nsim(err, "netdevsim configured to reject unbound programs.",
args)
sim.wait_for_flush()
start_test("Test non-0 chain offload...")
ret, _, err = sim.cls_bpf_add_filter(obj, chain=1, prio=1, handle=1,
skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "Offloaded a filter to chain other than 0")
check_extack(err, "Driver supports only offload of chain 0.", args)
sim.tc_flush_filters()
start_test("Test TC replace...")
sim.cls_bpf_add_filter(obj, prio=1, handle=1)
sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1)
sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_sw=True)
sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_sw=True)
sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=True)
sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_hw=True)
sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
start_test("Test TC replace bad flags...")
for i in range(3):
for j in range(3):
ret, _ = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1,
skip_sw=(j == 1), skip_hw=(j == 2),
fail=False)
fail(bool(ret) != bool(j),
"Software TC incorrect load in replace test, iteration %d" %
(j))
sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
start_test("Test spurious extack from the driver...")
test_spurios_extack(sim, obj, False, "netdevsim")
test_spurios_extack(sim, obj, True, "netdevsim")
sim.set_ethtool_tc_offloads(False)
test_spurios_extack(sim, obj, False, "TC offload is disabled")
test_spurios_extack(sim, obj, True, "TC offload is disabled")
sim.set_ethtool_tc_offloads(True)
sim.tc_flush_filters()
start_test("Test TC offloads work...")
ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
fail=False, include_stderr=True)
fail(ret != 0, "TC filter did not load with TC offloads enabled")
check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
start_test("Test TC offload basics...")
dfs = simdev.dfs_get_bound_progs(expected=1)
progs = bpftool_prog_list(expected=1)
ingress = sim.tc_show_ingress(expected=1)
dprog = dfs[0]
prog = progs[0]
fltr = ingress[0]
fail(fltr["skip_hw"], "TC does reports 'skip_hw' on offloaded filter")
fail(not fltr["in_hw"], "TC does not report 'in_hw' for offloaded filter")
fail(not fltr["skip_sw"], "TC does not report 'skip_sw' back")
start_test("Test TC offload is device-bound...")
fail(str(prog["id"]) != fltr["id"], "Program IDs don't match")
fail(prog["tag"] != fltr["tag"], "Program tags don't match")
fail(fltr["id"] != dprog["id"], "Program IDs don't match")
fail(dprog["state"] != "xlated", "Offloaded program state not translated")
fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
start_test("Test disabling TC offloads is rejected while filters installed...")
ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
start_test("Test qdisc removal frees things...")
sim.tc_flush_filters()
sim.tc_show_ingress(expected=0)
start_test("Test disabling TC offloads is OK without filters...")
ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
fail(ret != 0,
"Driver refused to disable TC offloads without filters installed...")
sim.set_ethtool_tc_offloads(True)
start_test("Test destroying device gets rid of TC filters...")
sim.cls_bpf_add_filter(obj, skip_sw=True)
simdev.remove()
bpftool_prog_list_wait(expected=0)
simdev = NetdevSimDev()
sim, = simdev.nsims
sim.set_ethtool_tc_offloads(True)
start_test("Test destroying device gets rid of XDP...")
sim.set_xdp(obj, "offload")
simdev.remove()
bpftool_prog_list_wait(expected=0)
simdev = NetdevSimDev()
sim, = simdev.nsims
sim.set_ethtool_tc_offloads(True)
start_test("Test XDP prog reporting...")
sim.set_xdp(obj, "drv")
ipl = sim.ip_link_show(xdp=True)
progs = bpftool_prog_list(expected=1)
fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
"Loaded program has wrong ID")
start_test("Test XDP prog replace without force...")
ret, _ = sim.set_xdp(obj, "drv", fail=False)
fail(ret == 0, "Replaced XDP program without -force")
sim.wait_for_flush(total=1)
start_test("Test XDP prog replace with force...")
ret, _ = sim.set_xdp(obj, "drv", force=True, fail=False)
fail(ret != 0, "Could not replace XDP program with -force")
bpftool_prog_list_wait(expected=1)
ipl = sim.ip_link_show(xdp=True)
progs = bpftool_prog_list(expected=1)
fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
"Loaded program has wrong ID")
fail("dev" in progs[0].keys(),
"Device parameters reported for non-offloaded program")
start_test("Test XDP prog replace with bad flags...")
ret, _, err = sim.set_xdp(obj, "generic", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
check_extack(err,
"native and generic XDP can't be active at the same time.",
args)
ret, _, err = sim.set_xdp(obj, "", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
check_extack(err, "program loaded with different flags.", args)
start_test("Test XDP prog remove with bad flags...")
ret, _, err = sim.unset_xdp("", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Removed program with a bad mode")
check_extack(err, "program loaded with different flags.", args)
start_test("Test MTU restrictions...")
ret, _ = sim.set_mtu(9000, fail=False)
fail(ret == 0,
"Driver should refuse to increase MTU to 9000 with XDP loaded...")
sim.unset_xdp("drv")
bpftool_prog_list_wait(expected=0)
sim.set_mtu(9000)
ret, _, err = sim.set_xdp(obj, "drv", fail=False, include_stderr=True)
fail(ret == 0, "Driver should refuse to load program with MTU of 9000...")
check_extack_nsim(err, "MTU too large w/ XDP enabled.", args)
sim.set_mtu(1500)
sim.wait_for_flush()
start_test("Test non-offload XDP attaching to HW...")
bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/nooffload")
nooffload = bpf_pinned("/sys/fs/bpf/nooffload")
ret, _, err = sim.set_xdp(nooffload, "offload",
fail=False, include_stderr=True)
fail(ret == 0, "attached non-offloaded XDP program to HW")
check_extack_nsim(err, "xdpoffload of non-bound program.", args)
rm("/sys/fs/bpf/nooffload")
start_test("Test offload XDP attaching to drv...")
bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload",
dev=sim['ifname'])
offload = bpf_pinned("/sys/fs/bpf/offload")
ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True)
fail(ret == 0, "attached offloaded XDP program to drv")
check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args)
rm("/sys/fs/bpf/offload")
sim.wait_for_flush()
start_test("Test XDP offload...")
_, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
ipl = sim.ip_link_show(xdp=True)
link_xdp = ipl["xdp"]["prog"]
progs = bpftool_prog_list(expected=1)
prog = progs[0]
fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID")
check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
start_test("Test XDP offload is device bound...")
dfs = simdev.dfs_get_bound_progs(expected=1)
dprog = dfs[0]
fail(prog["id"] != link_xdp["id"], "Program IDs don't match")
fail(prog["tag"] != link_xdp["tag"], "Program tags don't match")
fail(str(link_xdp["id"]) != dprog["id"], "Program IDs don't match")
fail(dprog["state"] != "xlated", "Offloaded program state not translated")
fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
start_test("Test removing XDP program many times...")
sim.unset_xdp("offload")
sim.unset_xdp("offload")
sim.unset_xdp("drv")
sim.unset_xdp("drv")
sim.unset_xdp("")
sim.unset_xdp("")
bpftool_prog_list_wait(expected=0)
start_test("Test attempt to use a program for a wrong device...")
simdev2 = NetdevSimDev()
sim2, = simdev2.nsims
sim2.set_xdp(obj, "offload")
pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
ret, _, err = sim.set_xdp(pinned, "offload",
fail=False, include_stderr=True)
fail(ret == 0, "Pinned program loaded for a different device accepted")
check_extack_nsim(err, "program bound to different dev.", args)
simdev2.remove()
ret, _, err = sim.set_xdp(pinned, "offload",
fail=False, include_stderr=True)
fail(ret == 0, "Pinned program loaded for a removed device accepted")
check_extack_nsim(err, "xdpoffload of non-bound program.", args)
rm(pin_file)
bpftool_prog_list_wait(expected=0)
simdev, sim = test_multi_prog(simdev, sim, obj, "", 1)
simdev, sim = test_multi_prog(simdev, sim, obj, "drv", 1)
simdev, sim = test_multi_prog(simdev, sim, obj, "generic", 2)
start_test("Test mixing of TC and XDP...")
sim.tc_add_ingress()
sim.set_xdp(obj, "offload")
ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "Loading TC when XDP active should fail")
check_extack_nsim(err, "driver and netdev offload states mismatch.", args)
sim.unset_xdp("offload")
sim.wait_for_flush()
sim.cls_bpf_add_filter(obj, skip_sw=True)
ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
fail(ret == 0, "Loading XDP when TC active should fail")
check_extack_nsim(err, "TC program is already loaded.", args)
start_test("Test binding TC from pinned...")
pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
sim.tc_flush_filters(bound=1, total=1)
sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True)
sim.tc_flush_filters(bound=1, total=1)
start_test("Test binding XDP from pinned...")
sim.set_xdp(obj, "offload")
pin_file, pinned = pin_prog("/sys/fs/bpf/tmp2", idx=1)
sim.set_xdp(pinned, "offload", force=True)
sim.unset_xdp("offload")
sim.set_xdp(pinned, "offload", force=True)
sim.unset_xdp("offload")
start_test("Test offload of wrong type fails...")
ret, _ = sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True, fail=False)
fail(ret == 0, "Managed to attach XDP program to TC")
start_test("Test asking for TC offload of two filters...")
sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
ret, _, err = sim.cls_bpf_add_filter(obj, da=True, skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "Managed to offload two TC filters at the same time")
check_extack_nsim(err, "driver and netdev offload states mismatch.", args)
sim.tc_flush_filters(bound=2, total=2)
start_test("Test if netdev removal waits for translation...")
delay_msec = 500
sim.dfs["dev/bpf_bind_verifier_delay"] = delay_msec
start = time.time()
cmd_line = "tc filter add dev %s ingress bpf %s da skip_sw" % \
(sim['ifname'], obj)
tc_proc = cmd(cmd_line, background=True, fail=False)
# Wait for the verifier to start
while simdev.dfs_num_bound_progs() <= 2:
pass
simdev.remove()
end = time.time()
ret, _ = cmd_result(tc_proc, fail=False)
time_diff = end - start
log("Time", "start:\t%s\nend:\t%s\ndiff:\t%s" % (start, end, time_diff))
fail(ret == 0, "Managed to load TC filter on a unregistering device")
delay_sec = delay_msec * 0.001
fail(time_diff < delay_sec, "Removal process took %s, expected %s" %
(time_diff, delay_sec))
# Remove all pinned files and reinstantiate the netdev
clean_up()
bpftool_prog_list_wait(expected=0)
simdev = NetdevSimDev()
sim, = simdev.nsims
map_obj = bpf_obj("sample_map_ret0.o")
start_test("Test loading program with maps...")
sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
start_test("Test bpftool bound info reporting (own ns)...")
check_dev_info(False, "")
start_test("Test bpftool bound info reporting (other ns)...")
ns = mknetns()
sim.set_ns(ns)
check_dev_info(True, "")
start_test("Test bpftool bound info reporting (remote ns)...")
check_dev_info(False, ns)
start_test("Test bpftool bound info reporting (back to own ns)...")
sim.set_ns("")
check_dev_info(False, "")
prog_file, _ = pin_prog("/sys/fs/bpf/tmp_prog")
map_file, _ = pin_map("/sys/fs/bpf/tmp_map", idx=1, expected=2)
simdev.remove()
start_test("Test bpftool bound info reporting (removed dev)...")
check_dev_info_removed(prog_file=prog_file, map_file=map_file)
# Remove all pinned files and reinstantiate the netdev
clean_up()
bpftool_prog_list_wait(expected=0)
simdev = NetdevSimDev()
sim, = simdev.nsims
start_test("Test map update (no flags)...")
sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
maps = bpftool_map_list(expected=2)
array = maps[0] if maps[0]["type"] == "array" else maps[1]
htab = maps[0] if maps[0]["type"] == "hash" else maps[1]
for m in maps:
for i in range(2):
bpftool("map update id %d key %s value %s" %
(m["id"], int2str("I", i), int2str("Q", i * 3)))
for m in maps:
ret, _ = bpftool("map update id %d key %s value %s" %
(m["id"], int2str("I", 3), int2str("Q", 3 * 3)),
fail=False)
fail(ret == 0, "added too many entries")
start_test("Test map update (exists)...")
for m in maps:
for i in range(2):
bpftool("map update id %d key %s value %s exist" %
(m["id"], int2str("I", i), int2str("Q", i * 3)))
for m in maps:
ret, err = bpftool("map update id %d key %s value %s exist" %
(m["id"], int2str("I", 3), int2str("Q", 3 * 3)),
fail=False)
fail(ret == 0, "updated non-existing key")
fail(err["error"].find("No such file or directory") == -1,
"expected ENOENT, error is '%s'" % (err["error"]))
start_test("Test map update (noexist)...")
for m in maps:
for i in range(2):
ret, err = bpftool("map update id %d key %s value %s noexist" %
(m["id"], int2str("I", i), int2str("Q", i * 3)),
fail=False)
fail(ret == 0, "updated existing key")
fail(err["error"].find("File exists") == -1,
"expected EEXIST, error is '%s'" % (err["error"]))
start_test("Test map dump...")
for m in maps:
_, entries = bpftool("map dump id %d" % (m["id"]))
for i in range(2):
key = str2int(entries[i]["key"])
fail(key != i, "expected key %d, got %d" % (key, i))
val = str2int(entries[i]["value"])
fail(val != i * 3, "expected value %d, got %d" % (val, i * 3))
start_test("Test map getnext...")
for m in maps:
_, entry = bpftool("map getnext id %d" % (m["id"]))
key = str2int(entry["next_key"])
fail(key != 0, "next key %d, expected %d" % (key, 0))
_, entry = bpftool("map getnext id %d key %s" %
(m["id"], int2str("I", 0)))
key = str2int(entry["next_key"])
fail(key != 1, "next key %d, expected %d" % (key, 1))
ret, err = bpftool("map getnext id %d key %s" %
(m["id"], int2str("I", 1)), fail=False)
fail(ret == 0, "got next key past the end of map")
fail(err["error"].find("No such file or directory") == -1,
"expected ENOENT, error is '%s'" % (err["error"]))
start_test("Test map delete (htab)...")
for i in range(2):
bpftool("map delete id %d key %s" % (htab["id"], int2str("I", i)))
start_test("Test map delete (array)...")
for i in range(2):
ret, err = bpftool("map delete id %d key %s" %
(htab["id"], int2str("I", i)), fail=False)
fail(ret == 0, "removed entry from an array")
fail(err["error"].find("No such file or directory") == -1,
"expected ENOENT, error is '%s'" % (err["error"]))
start_test("Test map remove...")
sim.unset_xdp("offload")
bpftool_map_list_wait(expected=0)
simdev.remove()
simdev = NetdevSimDev()
sim, = simdev.nsims
sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
simdev.remove()
bpftool_map_list_wait(expected=0)
start_test("Test map creation fail path...")
simdev = NetdevSimDev()
sim, = simdev.nsims
sim.dfs["bpf_map_accept"] = "N"
ret, _ = sim.set_xdp(map_obj, "offload", JSON=False, fail=False)
fail(ret == 0,
"netdevsim didn't refuse to create a map with offload disabled")
simdev.remove()
start_test("Test multi-dev ASIC program reuse...")
simdevA = NetdevSimDev()
simA, = simdevA.nsims
simdevB = NetdevSimDev(3)
simB1, simB2, simB3 = simdevB.nsims
sims = (simA, simB1, simB2, simB3)
simB = (simB1, simB2, simB3)
bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA",
dev=simA['ifname'])
progA = bpf_pinned("/sys/fs/bpf/nsimA")
bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB",
dev=simB1['ifname'])
progB = bpf_pinned("/sys/fs/bpf/nsimB")
simA.set_xdp(progA, "offload", JSON=False)
for d in simdevB.nsims:
d.set_xdp(progB, "offload", JSON=False)
start_test("Test multi-dev ASIC cross-dev replace...")
ret, _ = simA.set_xdp(progB, "offload", force=True, JSON=False, fail=False)
fail(ret == 0, "cross-ASIC program allowed")
for d in simdevB.nsims:
ret, _ = d.set_xdp(progA, "offload", force=True, JSON=False, fail=False)
fail(ret == 0, "cross-ASIC program allowed")
start_test("Test multi-dev ASIC cross-dev install...")
for d in sims:
d.unset_xdp("offload")
ret, _, err = simA.set_xdp(progB, "offload", force=True, JSON=False,
fail=False, include_stderr=True)
fail(ret == 0, "cross-ASIC program allowed")
check_extack_nsim(err, "program bound to different dev.", args)
for d in simdevB.nsims:
ret, _, err = d.set_xdp(progA, "offload", force=True, JSON=False,
fail=False, include_stderr=True)
fail(ret == 0, "cross-ASIC program allowed")
check_extack_nsim(err, "program bound to different dev.", args)
start_test("Test multi-dev ASIC cross-dev map reuse...")
mapA = bpftool("prog show %s" % (progA))[1]["map_ids"][0]
mapB = bpftool("prog show %s" % (progB))[1]["map_ids"][0]
ret, _ = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_",
dev=simB3['ifname'],
maps=["idx 0 id %d" % (mapB)],
fail=False)
fail(ret != 0, "couldn't reuse a map on the same ASIC")
rm("/sys/fs/bpf/nsimB_")
ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA_",
dev=simA['ifname'],
maps=["idx 0 id %d" % (mapB)],
fail=False, include_stderr=True)
fail(ret == 0, "could reuse a map on a different ASIC")
fail(err.count("offload device mismatch between prog and map") == 0,
"error message missing for cross-ASIC map")
ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_",
dev=simB1['ifname'],
maps=["idx 0 id %d" % (mapA)],
fail=False, include_stderr=True)
fail(ret == 0, "could reuse a map on a different ASIC")
fail(err.count("offload device mismatch between prog and map") == 0,
"error message missing for cross-ASIC map")
start_test("Test multi-dev ASIC cross-dev destruction...")
bpftool_prog_list_wait(expected=2)
simdevA.remove()
bpftool_prog_list_wait(expected=1)
ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
fail(ifnameB != simB1['ifname'], "program not bound to original device")
simB1.remove()
bpftool_prog_list_wait(expected=1)
start_test("Test multi-dev ASIC cross-dev destruction - move...")
ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
fail(ifnameB not in (simB2['ifname'], simB3['ifname']),
"program not bound to remaining devices")
simB2.remove()
ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
fail(ifnameB != simB3['ifname'], "program not bound to remaining device")
simB3.remove()
simdevB.remove()
bpftool_prog_list_wait(expected=0)
start_test("Test multi-dev ASIC cross-dev destruction - orphaned...")
ret, out = bpftool("prog show %s" % (progB), fail=False)
fail(ret == 0, "got information about orphaned program")
fail("error" not in out, "no error reported for get info on orphaned")
fail(out["error"] != "can't get prog info: No such device",
"wrong error for get info on orphaned")
print("%s: OK" % (os.path.basename(__file__)))
finally:
log("Clean up...", "", level=1)
log_level_inc()
clean_up()
| gpl-2.0 |
chandolia/python-social-auth | social/tests/backends/test_tripit.py | 91 | 4218 | import json
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class TripitOAuth1Test(OAuth1Test):
backend_path = 'social.backends.tripit.TripItOAuth'
user_data_url = 'https://api.tripit.com/v1/get/profile'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_content_type = 'text/xml'
user_data_body = \
'<Response>' \
'<timestamp>1363590451</timestamp>' \
'<num_bytes>1040</num_bytes>' \
'<Profile ref="ignore-me">' \
'<ProfileEmailAddresses>' \
'<ProfileEmailAddress>' \
'<address>foobar@gmail.com</address>' \
'<is_auto_import>false</is_auto_import>' \
'<is_confirmed>true</is_confirmed>' \
'<is_primary>true</is_primary>' \
'<is_auto_inbox_eligible>' \
'true' \
'</is_auto_inbox_eligible>' \
'</ProfileEmailAddress>' \
'</ProfileEmailAddresses>' \
'<is_client>true</is_client>' \
'<is_pro>false</is_pro>' \
'<screen_name>foobar</screen_name>' \
'<public_display_name>Foo Bar</public_display_name>' \
'<profile_url>people/foobar</profile_url>' \
'<home_city>Foo, Barland</home_city>' \
'<activity_feed_url>' \
'https://www.tripit.com/feed/activities/private/' \
'ignore-this/activities.atom' \
'</activity_feed_url>' \
'<alerts_feed_url>' \
'https://www.tripit.com/feed/alerts/private/' \
'ignore-this/alerts.atom' \
'</alerts_feed_url>' \
'<ical_url>' \
'webcal://www.tripit.com/feed/ical/private/' \
'ignore-this/tripit.ics' \
'</ical_url>' \
'</Profile>' \
'</Response>'
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class TripitOAuth1UsernameAlternativesTest(TripitOAuth1Test):
user_data_body = \
'<Response>' \
'<timestamp>1363590451</timestamp>' \
'<num_bytes>1040</num_bytes>' \
'<Profile ref="ignore-me">' \
'<ProfileEmailAddresses>' \
'<ProfileEmailAddress>' \
'<address>foobar@gmail.com</address>' \
'<is_auto_import>false</is_auto_import>' \
'<is_confirmed>true</is_confirmed>' \
'<is_primary>true</is_primary>' \
'<is_auto_inbox_eligible>' \
'true' \
'</is_auto_inbox_eligible>' \
'</ProfileEmailAddress>' \
'</ProfileEmailAddresses>' \
'<is_client>true</is_client>' \
'<is_pro>false</is_pro>' \
'<screen_name>foobar</screen_name>' \
'<public_display_name>Foobar</public_display_name>' \
'<profile_url>people/foobar</profile_url>' \
'<home_city>Foo, Barland</home_city>' \
'<activity_feed_url>' \
'https://www.tripit.com/feed/activities/private/' \
'ignore-this/activities.atom' \
'</activity_feed_url>' \
'<alerts_feed_url>' \
'https://www.tripit.com/feed/alerts/private/' \
'ignore-this/alerts.atom' \
'</alerts_feed_url>' \
'<ical_url>' \
'webcal://www.tripit.com/feed/ical/private/' \
'ignore-this/tripit.ics' \
'</ical_url>' \
'</Profile>' \
'</Response>'
| bsd-3-clause |
csrocha/OpenUpgrade | addons/survey/__openerp__.py | 261 | 2391 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Survey',
'version': '2.0',
'category': 'Marketing',
'description': """
Create beautiful web surveys and visualize answers
==================================================
It depends on the answers or reviews of some questions by different users. A
survey may have multiple pages. Each page may contain multiple questions and
each question may have multiple answers. Different users may give different
answers of question and according to that survey is done. Partners are also
sent mails with personal token for the invitation of the survey.
""",
'summary': 'Create surveys, collect answers and print statistics',
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/survey',
'depends': ['email_template', 'mail', 'website', 'marketing'],
'data': [
'security/survey_security.xml',
'security/ir.model.access.csv',
'views/survey_views.xml',
'views/survey_templates.xml',
'views/survey_result.xml',
'wizard/survey_email_compose_message.xml',
'data/survey_stages.xml',
'data/survey_cron.xml'
],
'demo': ['data/survey_demo_user.xml',
'data/survey_demo_feedback.xml',
'data/survey.user_input.csv',
'data/survey.user_input_line.csv'],
'installable': True,
'auto_install': False,
'application': True,
'sequence': 10,
}
| agpl-3.0 |
AlphaSmartDog/DeepLearningNotes | Note-1 RNN-DNC择时/Note-1 初学RqAlpha——PonderLSTM和PonderDNC日频期货的简单应用/sonnet/python/modules/scale_gradient.py | 9 | 2303 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op that scales gradient for backwards pass."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import function
def scale_gradient(net, scale, name="scale_gradient"):
"""Scales gradients for the backwards pass.
This might be used to, for example, allow one part of a model to learn at a
lower rate than the rest.
WARNING: Think carefully about how your optimizer works. If, for example, you
use rmsprop, the gradient is always rescaled (with some additional epsilon)
towards unity. This means `scale_gradient` won't have the effect of
lowering the learning rate.
If `scale` is `0.0`, this op reduces to `tf.stop_gradient`. If `scale`
is `1.0`, this op reduces to `tf.identity`.
Args:
net: A `tf.Tensor`.
scale: The scale factor for the gradient on the backwards pass.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` with the same type as the input tensor.
"""
if scale == 0.0:
return tf.stop_gradient(net, name=name)
elif scale == 1.0:
return tf.identity(net, name=name)
else:
scale_tensor = tf.convert_to_tensor(scale)
@function.Defun(tf.float32, tf.float32,
python_grad_func=lambda op, g: (g * op.inputs[1], None),
func_name="ScaleGradient")
def gradient_scaler(x, unused_scale):
return x
output = gradient_scaler(net, scale_tensor, name=name) # pylint:disable=unexpected-keyword-arg
output.set_shape(net.get_shape())
return output
| mit |
pjdufour/geonode | geonode/layers/urls.py | 2 | 3186 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
from django.conf import settings
from django.views.generic import TemplateView
js_info_dict = {
'packages': ('geonode.layers',),
}
urlpatterns = patterns(
'geonode.layers.views',
url(r'^$',
TemplateView.as_view(template_name='layers/layer_list.html'),
{'facet_type': 'layers', 'is_layer': True},
name='layer_browse'),
url(r'^upload$', 'layer_upload', name='layer_upload'),
url(r'^upload_metadata$', 'layer_metadata_upload', name='layer_metadata_upload'),
url(r'^(?P<layername>[^/]*)$', 'layer_detail', name="layer_detail"),
url(r'^(?P<layername>[^/]*)/metadata$', 'layer_metadata', name="layer_metadata"),
url(r'^(?P<layername>[^/]*)/metadata_advanced$', 'layer_metadata_advanced', name="layer_metadata_advanced"),
url(r'^(?P<layername>[^/]*)/remove$', 'layer_remove', name="layer_remove"),
url(r'^(?P<granule_id>[^/]*)/(?P<layername>[^/]*)/granule_remove$', 'layer_granule_remove',
name="layer_granule_remove"),
url(r'^(?P<layername>[^/]*)/replace$', 'layer_replace', name="layer_replace"),
url(r'^(?P<layername>[^/]*)/thumbnail$', 'layer_thumbnail', name='layer_thumbnail'),
url(r'^(?P<layername>[^/]*)/get$', 'get_layer', name='get_layer'),
url(r'^(?P<layername>[^/]*)/metadata_detail$', 'layer_metadata_detail', name='layer_metadata_detail'),
url(r'^(?P<layername>[^/]*)/metadata_upload$', 'layer_metadata_upload', name='layer_metadata_upload'),
url(r'^(?P<layername>[^/]*)/feature_catalogue$', 'layer_feature_catalogue', name='layer_feature_catalogue'),
# url(r'^api/batch_permissions/?$', 'batch_permissions',
# name='batch_permssions'),
# url(r'^api/batch_delete/?$', 'batch_delete', name='batch_delete'),
)
# -- Deprecated url routes for Geoserver authentication -- remove after GeoNode 2.1
# -- Use /gs/acls, gs/resolve_user/, gs/download instead
if 'geonode.geoserver' in settings.INSTALLED_APPS:
urlpatterns = patterns('geonode.geoserver.views',
url(r'^acls/?$', 'layer_acls', name='layer_acls_dep'),
url(r'^resolve_user/?$', 'resolve_user', name='layer_resolve_user_dep'),
url(r'^download$', 'layer_batch_download', name='layer_batch_download_dep'),
) + urlpatterns
| gpl-3.0 |
mwickert/SP-Comm-Tutorial-using-scikit-dsp-comm | hardware_configure/sigsys.py | 1 | 81778 | """
Signals and Systems Function Module
Copyright (c) March 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
"""
Notes
-----
The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time.
The formatted docstrings for the library follow. Click index in the upper right to get an
alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options.
Examples
--------
>>> import ssd
>>> # Commands then need to be prefixed with ssd., i.e.,
>>> ssd.tri(t,tau)
>>> # A full import of the module, to avoid the the need to prefix with ssd, is:
>>> from ssd import *
Function Catalog
----------------
"""
from matplotlib import pylab
from matplotlib import mlab
import numpy as np
from numpy import fft
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
def CIC(M,K):
"""
% b = CIC(M,K)
% A functional form implementation of a cascade of integrator comb (CIC)
% filters. Commonly used in multirate signal processing digital
% down-converters and digital up-converters. A true CIC filter requires no
% multiplies, only add and subtract operations. The functional form created
% here is a simple FIR requiring real coefficient multiplies via filter()
% ========================================================================
% M = Effective number of taps per section (typically the decimation
% factor).
% K = The number of CIC sections cascaded (larger K gives the filter a
% wider image rejection bandwidth.
% b = FIR filter coefficients for a simple direct form implementation
% using the filter() function.
% ========================================================================
%
% Mark Wickert November 2007
"""
if K == 1:
b = np.ones(M)
else:
h = np.ones(M)
b = h
for i in range(1,K):
b = signal.convolve(b,h) # cascade by convolving impulse responses
# Make filter have unity gain at DC
return b/np.sum(b)
def ten_band_eq_filt(x,GdB,Q=3.5):
"""
Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB.
The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and
stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate
is assumed to be 44.1 kHz.
Parameters
----------
x : ndarray of the input signal samples
GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB]
Q : Quality factor vector for each of the NB peaking filters
Returns
-------
y : ndarray of output signal samples
Examples
--------
>>> # Test with white noise
>>> w = randn(100000)
>>> y = ten_band_eq_filt(x,GdB)
>>> psd(y,2**10,44.1)
"""
fs = 44100.0 # Hz
NB = len(GdB)
Fc = 31.25*2**np.arange(10)
B = np.zeros((NB,3))
A = np.zeros((NB,3))
# Create matrix of cascade coefficients
for k in range(NB):
[b,a] = peaking(GdB[k],Fc[k],Q)
B[k,:] = b
A[k,:] = a
#Pass signal x through the cascade of ten filters
y = np.zeros(len(x))
for k in range(NB):
if k == 0:
y = signal.lfilter(B[k,:],A[k,:],x)
else:
y = signal.lfilter(B[k,:],A[k,:],y)
return y
def ten_band_eq_resp(GdB,Q=3.5):
"""
Create a frequency response magnitude plot in dB of a ten band equalizer
using a semilogplot (semilogx()) type plot
Parameters
----------
GdB : Gain vector for 10 peaking filters [G0,...,G9]
Q : Quality factor for each peaking filter (default 3.5)
Returns
-------
Nothing : two plots are created
Examples
--------
>>> ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])
"""
fs = 44100.0 # Hz
Fc = 31.25*2**np.arange(10)
NB = len(GdB)
B = np.zeros((NB,3));
A = np.zeros((NB,3));
# Create matrix of cascade coefficients
for k in range(NB):
b,a = peaking(GdB[k],Fc[k],Q,fs)
B[k,:] = b
A[k,:] = a
# Create the cascade frequency response
F = np.logspace(1,np.log10(20e3),1000)
H = np.ones(len(F))*np.complex(1.0,0.0)
for k in range(NB):
w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs)
H *= Htemp
plt.figure(figsize=(6,4))
plt.subplot(211)
plt.semilogx(F,20*np.log10(abs(H)))
plt.axis([10, fs/2, -12, 12])
plt.grid()
plt.title('Ten-Band Equalizer Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.subplot(212)
plt.stem(np.arange(NB),GdB,'b','bs')
#plt.bar(np.arange(NB)-.1,GdB,0.2)
plt.axis([0, NB-1, -12, 12])
plt.xlabel('Equalizer Band Number')
plt.ylabel('Gain Set (dB)')
plt.grid()
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> b,a = peaking(-5.0,500,4)
>>> # Assuming pylab imported
>>> f = logspace(1,5,400)
>>> .w,H = signal.freqz(b,a,2*pi*f/44100)
>>> semilogx(f,20*log10(abs(H)))
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*np.array([1, b1, b2])
a = np.array([1, a1, a2])
return b,a
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index array n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndarray covering at least -2 to +5.
Returns
-------
x : ndarray of signal samples in x
Examples
--------
>>> n = arange(-5,8)
>>> x = ex6_2(n)
>>> stem(n,x) # creates a stem plot of x vs n
"""
x = np.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x
def position_CD(Ka,out_type = 'fb_exact'):
"""
CD sled position control case study of Chapter 18.
The function returns the closed-loop and open-loop
system function for a CD/DVD sled position control
system. The loop amplifier gain is the only variable
that may be changed. The returned system function can
however be changed.
Parameters
----------
Ka : loop amplifier gain, start with 50.
out_type : 'open_loop' for open loop system function
out_type : 'fb_approx' for closed-loop approximation
out_type : 'fb_exact' for closed-loop exact
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
With the exception of the loop amplifier gain, all
other parameters are hard-coded from Case Study example.
Examples
------
>>> b,a = position_CD(Ka,'fb_approx')
>>> b,a = position_CD(Ka,'fb_exact')
"""
rs = 10/(2*np.pi)
# Load b and a ndarrays with the coefficients
if out_type.lower() == 'open_loop':
b = np.array([Ka*4000*rs])
a = np.array([1,1275,31250,0])
elif out_type.lower() == 'fb_approx':
b = np.array([3.2*Ka*rs])
a = np.array([1, 25, 3.2*Ka*rs])
elif out_type.lower() == 'fb_exact':
b = np.array([4000*Ka*rs])
a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs])
else:
print('out_type must be: open_loop, fb_approx, or fc_exact')
return 1
return b, a
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natuarl frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, nominally 0.1
zeta : closed-loop damping factor, nominally 1.0
T : vehicle time constant, nominally 10 s
vcruise : cruise velocity set point, nominally 75 mph
vmax : maximum vehicle velocity, nominally 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Examples
--------
>>> # return the closed-loop system function output/input velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
"""
tau = T/2.*vmax/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T/vmax*(2*zeta*wn-1/tau)
Ki = T/vmax*wn**2
K = Kp*vmax/T
print('wn = ', np.sqrt(K/(Kp/Ki)))
print('zeta = ', (K + 1/tau)/(2*wn))
a = np.array([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = np.array([K, wn**2])
elif tf_mode == 'HE':
b = np.array([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = np.array([g, 0])
else:
print('tf_mode must be: H, HE, HVU, or HED')
return 1
return b, a
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]):
"""
Create an s-plane pole-zero plot.
As input the function uses the numerator and denominator
s-domain system function coefficient ndarrays b and a respectively.
Assumed to be stored in descending powers of s.
Parameters
----------
b : numerator coefficient ndarray.
a : denominator coefficient ndarray.
auto_scale : True
size : [xmin,xmax,ymin,ymax] plot scaling when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> splane(b,a)
>>> # Here the plot is generated using manual scaling
>>> splane(b,a,False,[-10,1,-10,10])
"""
M = len(b) - 1
N = len(a) - 1
plt.figure(figsize=(5,5))
#plt.axis('equal')
N_roots = np.array([0.0])
if M > 0:
N_roots = np.roots(b)
D_roots = np.array([0.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5
size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5
size[1] = max(size[1],0.5)
size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5
size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5
plt.plot([size[0],size[1]],[0,0],'k--')
plt.plot([0,0],[size[2],size[3]],'r--')
# Plot labels if multiplicity greater than 1
x_scale = size[1]-size[0]
y_scale = size[3]-size[2]
x_off = 0.03
y_off = 0.01
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = mlab.find(N_mult>1)
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = mlab.find(D_mult>1)
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis(np.array(size))
return M,N
def OS_filter(x,h,N,mode=0):
"""
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OS_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OS_filter(x,h,N,1)
"""
P = len(h)
# zero pad start of x so first frame can recover first true samples of x
x = np.hstack((np.zeros(P-1),x))
L = N - P + 1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad end of x to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and save behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:k*L+N]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk)) # imag part should be zero
y[k*L+P-1:k*L+N] = yk[P-1:]
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[P-1:Nx], y_mat[:,P-1:Nx]
else:
return y[P-1:Nx]
def OA_filter(x,h,N,mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OA_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OA_filter(x,h,N,1)
"""
P = len(h)
L = N - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx]
def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)):
"""
Lowpass sampling theorem plotting function.
Display the spectrum of a sampled signal after setting the bandwidth,
sampling frequency, maximum display frequency, and spectral shape.
Parameters
----------
fb : spectrum lowpass bandwidth in Hz
fs : sampling frequency in Hz
fmax : plot over [-fmax,fmax]
shape : 'tri' or 'line'
N : number of translates, N positive and N negative
fsize : the size of the figure window, default (6,4)
Returns
-------
Nothing : A plot window opens containing the spectrum plot
Examples
--------
>>> # No aliasing as 10 < 25/2
>>> lp_samp(10,25,50,10)
>>> # Aliasing as 15 > 25/2
>>> lp_samp(15,25,50,10)
"""
plt.figure(figsize=fsize)
# define the plot interval
f = np.arange(-fmax,fmax+fmax/200.,fmax/200.)
A = 1.0;
line_ampl = A/2.*np.array([0, 1])
# plot the lowpass spectrum in black
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f,fb))
elif shape.lower() == 'line':
plt.plot([fb, fb],line_ampl,'b', linewidth=2)
plt.plot([-fb, -fb],line_ampl,'b', linewidth=2)
else:
print('shape must be tri or line')
# overlay positive and negative frequency translates
for n in range(N):
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f-(n+1)*fs,fb),'--r')
plt.plot(f,lp_tri(f+(n+1)*fs,fb),'--g')
elif shape.lower() == 'line':
plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
else:
print('shape must be tri or line')
#plt.title('Lowpass Sampling Theorem for a Real Signal: Blk = orig, dotted = translates')
plt.ylabel('Spectrum Magnitude')
plt.xlabel('Frequency in Hz')
plt.axis([-fmax,fmax,0,1])
plt.grid()
def lp_tri(f, fb):
"""
Triangle spectral shape function used by lp_spec.
This is a support function for the lowpass spectrum plotting function
lp_spec().
Parameters
----------
f : ndarray containing frequency samples
fb : the bandwidth as a float constant
Returns
-------
x : ndarray of spectrum samples for a single triangle shape
Examples
--------
>>> x = lp_tri(f, fb)
"""
x = np.zeros(len(f))
for k in range(len(f)):
if abs(f[k]) <= fb:
x[k] = 1 - abs(f[k])/float(fb)
return x
def sinusoidAWGN(x,SNRdB):
"""
Add white Gaussian noise to a single real sinusoid.
Input a single sinusoid to this function and it returns a noisy
sinusoid at a specific SNR value in dB. Sinusoid power is calculated
using np.var.
Parameters
----------
x : Input signal as ndarray consisting of a single sinusoid
SNRdB : SNR in dB for output sinusoid
Returns
-------
y : Noisy sinusoid return vector
Examples
--------
>>> # set the SNR to 10 dB
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.04*n)
>>> y = sinusoidAWGN(x,10.0)
"""
# Estimate signal power
x_pwr = np.var(x)
# Create noise vector
noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x));
return x + noise
def simpleQuant(x,Btot,Xmax,Limit):
"""
A simple rounding quantizer for bipolar signals having Btot = B + 1 bits.
This function models a quantizer that employs Btot bits that has one of
three selectable limiting types: saturation, overflow, and none.
The quantizer is bipolar and implements rounding.
Parameters
----------
x : input signal ndarray to be quantized
Btot : total number of bits in the quantizer, e.g. 16
Xmax : quantizer full-scale dynamic range is [-Xmax, Xmax]
Limit = Limiting of the form 'sat', 'over', 'none'
Returns
-------
xq : quantized output ndarray
Notes
-----
The quantization can be formed as e = xq - x
Examples
--------
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.211*n)
>>> y = sinusoidAWGN(x,90)
>>> yq = simpleQuant(y,12,1,sat)
>>> psd(y,2**10,Fs=1);
>>> psd(yq,2**10,Fs=1)
"""
B = Btot-1
x = x/Xmax
if Limit.lower() == 'over':
xq = (np.mod(np.round(x*2**B)+2**B,2**Btot)-2**B)/2**B
elif Limit.lower() == 'sat':
xq = np.round(x*2**B)+2**B
s1 = mlab.find(xq >= 2**Btot-1)
s2 = mlab.find(xq < 0)
xq[s1] = (2**Btot - 1)*np.ones(len(s1))
xq[s2] = np.zeros(len(s2))
xq = (xq - 2**B)/2**B
elif Limit.lower() == 'none':
xq = np.round(x*2**B)/2**B
else:
print('limit must be the string over, sat, or none')
return xq*Xmax
def prin_alias(f_in,fs):
"""
Calculate the principle alias frequencies.
Given an array of input frequencies the function returns an
array of principle alias frequencies.
Parameters
----------
f_in : ndarray of input frequencies
fs : sampling frequency
Returns
-------
f_out : ndarray of principle alias frequencies
Examples
--------
>>> # Linear frequency sweep from 0 to 50 Hz
>>> f_in = arange(0,50,0.1)
>>> # Calculate principle alias with fs = 10 Hz
>>> f_out = prin_alias(f_in,10)
"""
return abs(np.rint(f_in/fs)*fs - f_in)
"""
Principle alias via recursion
f_out = np.copy(f_in)
for k in range(len(f_out)):
while f_out[k] > fs/2.:
f_out[k] = abs(f_out[k] - fs)
return f_out
"""
def cascade_filters(b1,a1,b2,a2):
"""
Cascade two IIR digital filters into a single (b,a) coefficient set.
To cascade two digital filters (system functions) given their numerator
and denominator coefficients you simply convolve the coefficient arrays.
Parameters
----------
b1 : ndarray of numerator coefficients for filter 1
a1 : ndarray of denominator coefficients for filter 1
b2 : ndarray of numerator coefficients for filter 2
a2 : ndarray of denominator coefficients for filter 2
Returns
-------
b : ndarray of numerator coefficients for the cascade
a : ndarray of denominator coefficients for the cascade
Examples
--------
>>> from scipy import signal
>>> b1,a1 = signal.butter(3, 0.1)
>>> b2,a2 = signal.butter(3, 0.15)
>>> b,a = cascade_filters(b1,a1,b2,a2)
"""
return signal.convolve(b1,b2), signal.convolve(a1,a2)
def soi_snoi_gen(s,SIR_dB,N,fi,fs = 8000):
"""
Add an interfering sinusoidal tone to the input signal at a given SIR_dB.
The input is the signal of interest (SOI) and number of sinsuoid signals
not of interest (SNOI) are addedto the SOI at a prescribed signal-to-
intereference SIR level in dB.
Parameters
----------
s : ndarray of signal of SOI
SIR_dB : interference level in dB
N : Trim input signal s to length N + 1 samples
fi : ndarray of intereference frequencies in Hz
fs : sampling rate in Hz, default is 8000 Hz
Returns
-------
r : ndarray of combined signal plus intereference of length N+1 samples
Examples
--------
>>> # load a speech ndarray and trim to 5*8000 + 1 samples
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
"""
n = np.arange(0,N+1)
K = len(fi)
si = np.zeros(N+1)
for k in range(K):
si += np.cos(2*np.pi*fi[k]/fs*n);
s = s[:N+1]
Ps = np.var(s)
Psi = np.var(si)
r = s + np.sqrt(Ps/Psi*10**(-SIR_dB/10))*si
return r
def lms_ic(r,M,mu,delta=1):
"""
Least mean square (LMS) interference canceller adaptive filter.
A complete LMS adaptive filter simulation function for the case of
interference cancellation. Used in the digital filtering case study.
Parameters
----------
M : FIR Filter length (order M-1)
delta : Delay used to generate the reference signal
mu : LMS step-size
delta : decorrelation delay between input and FIR filter input
Returns
-------
n : ndarray Index vector
r : ndarray noisy (with interference) input signal
r_hat : ndarray filtered output (NB_hat[n])
e : ndarray error sequence (WB_hat[n])
ao : ndarray final value of weight vector
F : ndarray frequency response axis vector
Ao : ndarray frequency response of filter
Examples
----------
>>> # import a speech signal
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> # add interference at 1kHz and 1.5 kHz and
>>> # truncate to 5 seconds
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
>>> # simulate with a 64 tap FIR and mu = 0.005
>>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005)
"""
N = len(r)-1;
# Form the reference signal y via delay delta
y = signal.lfilter(np.hstack((np.zeros(delta), np.array([1]))),1,r)
# Initialize output vector x_hat to zero
r_hat = np.zeros(N+1)
# Initialize error vector e to zero
e = np.zeros(N+1)
# Initialize weight vector to zero
ao = np.zeros(M+1)
# Initialize filter memory to zero
z = np.zeros(M)
# Initialize a vector for holding ym of length M+1
ym = np.zeros(M+1)
for k in range(N+1):
# Filter one sample at a time
r_hat[k],z = signal.lfilter(ao,np.array([1]),np.array([y[k]]),zi=z)
# Form the error sequence
e[k] = r[k] - r_hat[k]
# Update the weight vector
ao = ao + 2*mu*e[k]*ym
# Update vector used for correlation with e(k)
ym = np.hstack((np.array([y[k]]), ym[:-1]))
# Create filter frequency response
F, Ao = signal.freqz(ao,1,1024)
F/= (2*np.pi)
Ao = 20*np.log10(abs(Ao))
return np.arange(0,N+1), r, r_hat, e, ao, F, Ao
def fir_iir_notch(fi,fs,r=0.95):
"""
Design a second-order FIR or IIR notch filter.
A second-order FIR notch filter is created by placing conjugate
zeros on the unit circle at angle corresponidng to the notch center
frequency. The IIR notch variation places a pair of conjugate poles
at the same angle, but with radius r < 1 (typically 0.9 to 0.95).
Parameters
----------
fi : notch frequency is Hz relative to fs
fs : the sampling frequency in Hz, e.g. 8000
r : pole radius for IIR version, default = 0.95
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
If the pole radius is 0 then an FIR version is created, that is
there are no poles except at z = 0.
Examples
--------
>>> b_FIR, a_FIR = fir_iir_notch(1000,8000,0)
>>> b_IIR, a_IIR = fir_iir_notch(1000,8000)
"""
w0 = 2*np.pi*fi/float(fs)
if r >= 1:
print('Poles on or outside unit circle.')
if r == 0:
a = np.array([1.0])
else:
a = np.array([1, -2*r*np.cos(w0), r**2])
b = np.array([1, -2*np.cos(w0), 1])
return b, a
def simple_SA(x,NS,NFFT,fs,NAVG=1,window='boxcar'):
"""
Spectral estimation using windowing and averaging.
This function implements averaged periodogram spectral estimation
estimation similar to the NumPy's psd() function, but more
specialized for the the windowing case study of Chapter 16.
Parameters
----------
x : ndarray containing the input signal
NS : The subrecord length less zero padding, e.g. NS < NFFT
NFFT : FFT length, e.g., 1024 = 2**10
fs : sampling rate in Hz
NAVG : the number of averages, e.g., 1 for deterministic signals
window : hardcoded window 'boxcar' (default) or 'hanning'
Returns
-------
f : ndarray frequency axis in Hz on [0, fs/2]
Sx : ndarray the power spectrum estimate
Notes
-----
The function also prints the maximum number of averages K possible
for the input data record.
Examples
--------
>>> n = arange(0,2048)
>>> x = cos(2*pi*1000/10000*n) + 0.01*cos(2*pi*3000/10000*n)
>>> f, Sx = simple_SA(x,128,512,10000)
>>> f, Sx = simple_SA(x,256,1024,10000,window='hanning')
>>> plot(f, 10*log10(Sx))
"""
Nx = len(x)
K = Nx/NS
print('K = ', K)
if NAVG > K:
print('NAVG exceeds number of available subrecords')
return 0,0
if window.lower() == 'boxcar' or window.lower() == 'rectangle':
w = signal.boxcar(NS)
elif window.lower() == 'hanning':
w = signal.hanning(NS)
xsw = np.zeros((K,NS)) + 1j*np.zeros((K,NS))
for k in range(NAVG):
xsw[k,] = w*x[k*NS:(k+1)*NS]
Sx = np.zeros(NFFT)
for k in range(NAVG):
X = fft.fft(xsw[k,],NFFT)
Sx += abs(X)**2
Sx /= float(NAVG)
Sx /= float(NFFT**2)
if x.dtype != 'complex128':
n = np.arange(NFFT/2)
f = fs*n/float(NFFT)
Sx = Sx[0:NFFT/2]
else:
n = np.arange(NFFT/2)
f = fs*np.hstack((np.arange(-NFFT/2,0),np.arange(NFFT/2)))/float(NFFT)
Sx = np.hstack((Sx[NFFT/2:],Sx[0:NFFT/2]))
return f, Sx
def line_spectra(fk,Xk,mode,sides=2,linetype='b',lwidth=2,floor_dB=-100,fsize=(6,4)):
"""
Plot the Fouier series line spectral given the coefficients.
This function plots two-sided and one-sided line spectra of a periodic
signal given the complex exponential Fourier series coefficients and
the corresponding harmonic frequencies.
Parameters
----------
fk : vector of real sinusoid frequencies
Xk : magnitude and phase at each positive frequency in fk
mode : 'mag' => magnitude plot, 'magdB' => magnitude in dB plot,
mode cont : 'magdBn' => magnitude in dB normalized, 'phase' => a phase plot in radians
sides : 2; 2-sided or 1-sided
linetype : line type per Matplotlib definitions, e.g., 'b';
lwidth : 2; linewidth in points
fsize : optional figure size in inches, default = (6,4) inches
Returns
-------
Nothing : A plot window opens containing the line spectrum plot
Notes
-----
Since real signals are assumed the frequencies of fk are 0 and/or positive
numbers. The supplied Fourier coefficients correspond.
Examples
--------
>>> n = arange(0,25)
>>> # a pulse train with 10 Hz fundamental and 20% duty cycle
>>> fk = n*10
>>> Xk = sinc(n*10*.02)*exp(-1j*2*pi*n*10*.01) # 1j = sqrt(-1)
>>> line_spectra(fk,Xk,'mag')
>>> line_spectra(fk,Xk,'phase')
"""
plt.figure(figsize=fsize)
# Eliminate zero valued coefficients
idx = pylab.find(Xk != 0)
Xk = Xk[idx]
fk = fk[idx]
if mode == 'mag':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, 2.*np.abs(Xk[k])],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), 0, 1.05*max(abs(Xk))])
elif sides == 1:
plt.axis([0, 1.2*max(fk), 0, 1.05*2*max(abs(Xk))])
else:
print('Invalid sides type')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdB':
Xk_dB = 20*np.log10(np.abs(Xk))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
print('Invalid sides type')
plt.ylabel('Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdBn':
Xk_dB = 20*np.log10(np.abs(Xk)/max(np.abs(Xk)))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
print('Invalid sides type')
plt.ylabel('Normalized Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'phase':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, -np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
if sides == 2:
plt.plot([-1.2*max(fk), 1.2*max(fk)], [0, 0],'k')
plt.axis([-1.2*max(fk), 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
elif sides == 1:
plt.plot([0, 1.2*max(fk)], [0, 0],'k')
plt.axis([0, 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
else:
print('Invalid sides type')
plt.ylabel('Phase (rad)')
plt.xlabel('Frequency (Hz)')
else:
print('Invalid mode type')
def fs_coeff(xp,N,f0,one_side=True):
"""
Numerically approximate the Fourier series coefficients given periodic x(t).
The input is assummed to represent one period of the waveform
x(t) that has been uniformly sampled. The number of samples supplied
to represent one period of the waveform sets the sampling rate.
Parameters
----------
xp : ndarray of one period of the waveform x(t)
N : maximum Fourier series coefficient, [0,...,N]
f0 : fundamental frequency used to form fk.
Returns
-------
Xk : ndarray of the coefficients over indices [0,1,...,N]
fk : ndarray of the harmonic frequencies [0, f0,2f0,...,Nf0]
Notes
-----
len(xp) >= 2*N+1 as len(xp) is the fft length.
Examples
--------
>>> t = arange(0,1,1/1024.)
>>> # a 20% duty cycle pulse starting at t = 0
>>> x_rect = rect(t-.1,0.2)
>>> Xk, fk = fs_coeff(x_rect,25,10)
>>> # plot the spectral lines
>>> line_spectra(fk,Xk,'mag')
"""
Nint = len(xp)
if Nint < 2*N+1:
print('Number of samples in xp insufficient for requested N.')
return 0,0
Xp = fft.fft(xp,Nint)/float(Nint)
# To interface with the line_spectra function use one_side mode
if one_side:
Xk = Xp[0:N+1]
fk = f0*np.arange(0,N+1)
else:
Xk = np.hstack((Xp[-N:],Xp[0:N+1]))
fk = f0*np.arange(-N,N+1)
return Xk, fk
def fs_approx(Xk,fk,t):
"""
Synthesize periodic signal x(t) using Fourier series coefficients at harmonic frequencies
Assume the signal is real so coefficients Xk are supplied for nonnegative
indicies. The negative index coefficients are assumed to be complex
conjugates.
Parameters
----------
Xk : ndarray of complex Fourier series coefficients
fk : ndarray of harmonic frequencies in Hz
t : ndarray time axis corresponding to output signal array x_approx
Returns
-------
x_approx : ndarray of periodic waveform approximation over time span t
Examples
--------
>>> t = arange(0,2,.002)
>>> # a 20% duty cycle pulse train
>>> n = arange(0,20,1) # 0 to 19th harmonic
>>> fk = 1*n % period = 1s
>>> t, x_approx = fs_approx(Xk,fk,t)
>>> plot(t,x_approx)
"""
x_approx = np.zeros(len(t))
for k,Xkk in enumerate(Xk):
if fk[k] == 0:
x_approx += Xkk*np.ones(len(t))
else:
x_approx += 2*np.abs(Xkk)*np.cos(2*np.pi*fk[k]*t+np.angle(Xkk))
return x_approx
def conv_sum(x1,nx1,x2,nx2,extent=('f','f')):
"""
Discrete convolution of x1 and x2 with proper tracking of the output time axis.
Convolve two discrete-time signals using the SciPy function signal.convolution.
The time (sequence axis) are managed from input to output. y[n] = x1[n]*x2[n].
Parameters
----------
x1 : ndarray of signal x1 corresponding to nx1
nx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to nx2
nx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ny : ndarray of the corresponding sequence index n
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
a^n*u[n] is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> nx = arange(-5,10)
>>> x = drect(nx,4)
>>> y,ny = conv_sum(x,nx,x,nx)
>>> stem(ny,y)
>>> # Consider a pulse convolved with an exponential ('r' type extent)
>>> h = 0.5**nx*dstep(nx)
>>> y,ny = conv_sum(x,nx,h,nx,('f','r')) # note extents set
>>> stem(ny,y) # expect a pulse charge and discharge sequence
"""
nnx1 = np.arange(0,len(nx1))
nnx2 = np.arange(0,len(nx2))
n1 = nnx1[0]
n2 = nnx1[-1]
n3 = nnx2[0]
n4 = nnx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n4+1-1)
ny = np.arange(0,len(x1)+len(x2)-1) + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
nny = np.arange(n1+n3,n1+1+n4+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n3+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
nny = np.arange(n2+n3,n2+1+n4+1-1)
ny = nny + nx1[-1]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
nny = np.arange(n1+n4,n2+1+n4+1-1)
ny = nny + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
nny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
nny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ny = nny + max(nx1[0]+nx2[-1],nx1[-1]+nx2[0])
else:
print('Invalid x1 x2 extents specified or valid extent not found!')
return 0,0
# Finally convolve the sequences
y = signal.convolve(x1, x2)
print('Output support: (%+d, %+d)' % (ny[0],ny[-1]))
return y[nny], ny
def conv_integral(x1,tx1,x2,tx2,extent = ('f','f')):
"""
Continuous-time convolution of x1 and x2 with proper tracking of the output time axis.
Appromimate the convolution integral for the convolution of two continuous-time signals using the SciPy function signal. The time (sequence axis) are managed from input to output. y(t) = x1(t)*x2(t).
Parameters
----------
x1 : ndarray of signal x1 corresponding to tx1
tx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to tx2
tx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ty : ndarray of the corresponding time axis for y
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The time steps used in
x1(t) and x2(t) must match. The default extents of ('f','f') are used for signals
that are active (have support) on or within t1 and t2 respectively. A right-sided
signal such as exp(-a*t)*u(t) is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> tx = arange(-5,10,.01)
>>> x = rect(tx-2,4) # pulse starts at t = 0
>>> y,ty = conv_integral(x,tx,x,tx)
>>> plot(ty,y) # expect a triangle on [0,8]
>>> # Consider a pulse convolved with an exponential ('r' type extent)
>>> h = 4*exp(-4*tx)*step(tx)
>>> y,ty = conv_integral(x,tx,h,tx,extent=('f','r')) # note extents set
>>> plot(ty,y) # expect a pulse charge and discharge waveform
"""
dt = tx1[1] - tx1[0]
nx1 = np.arange(0,len(tx1))
nx2 = np.arange(0,len(tx2))
n1 = nx1[0]
n2 = nx1[-1]
n3 = nx2[0]
n4 = nx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n4+1-1)
ty = np.arange(0,len(x1)+len(x2)-1)*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
ny = np.arange(n1+n3,n1+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n3+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
ny = np.arange(n2+n3,n2+1+n4+1-1)
ty = ny*dt + tx1[-1]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
ny = np.arange(n1+n4,n2+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
ny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
ny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ty = ny*dt + max(tx1[0]+tx2[-1],tx1[-1]+tx2[0])
else:
print('Invalid x1 x2 extents specified or valid extent not found!')
return 0,0
# Finally convolve the sampled sequences and scale by dt
y = signal.convolve(x1, x2)*dt
print('Output support: (%+2.2f, %+2.2f)' % (ty[0],ty[-1]))
return y[ny], ty
def delta_eps(t,eps):
"""
Rectangular pulse approximation to impulse function.
Parameters
----------
t : ndarray of time axis
eps : pulse width
Returns
-------
d : ndarray containing the impulse approximation
Examples
--------
>>> t = arange(-2,2,.001)
>>> d = delta_eps(t,.1)
>>> plot(t,d)
"""
d = np.zeros(len(t))
for k,tt in enumerate(t):
if abs(tt) <= eps/2.:
d[k] = 1/float(eps)
return d
def step(t):
"""
Approximation to step function signal u(t).
In this numerical version of u(t) the step turns on at t = 0.
Parameters
----------
t : ndarray of the time axis
Returns
-------
x : ndarray of the step function signal u(t)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = step(t)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = step(t - 1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tt in enumerate(t):
if tt >= 0:
x[k] = 1.0
return x
def rect(t,tau):
"""
Approximation to the rectangle pulse Pi(t/tau).
In this numerical version of Pi(t/tau) the pulse is active
over -tau/2 <= t <= tau/2.
Parameters
----------
t : ndarray of the time axis
tau : the pulse width
Returns
-------
x : ndarray of the signal Pi(t/tau)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = rect(t,1.0)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = rect(t - 1.0,1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/2.:
x[k] = 0
else:
x[k] = 1
return x
def tri(t,tau):
"""
Approximation to the triangle pulse Lambda(t/tau).
In this numerical version of Lambda(t/tau) the pulse is active
over -tau <= t <= tau.
Parameters
----------
t : ndarray of the time axis
tau : one half the triangle base width
Returns
-------
x : ndarray of the signal Lambda(t/tau)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = tri(t,1.0)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = tri(t - 1.0,1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/1.:
x[k] = 0
else:
x[k] = 1 - np.abs(tk)/tau
return x
def dimpulse(n):
"""
Discrete impulse function delta[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal delta[n]
Examples
--------
>>> n = arange(-5,5)
>>> x = dimpulse(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = dimpulse(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn == 0:
x[k] = 1.0
return x
def dstep(n):
"""
Discrete step function u[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal u[n]
Examples
--------
>>> n = arange(-5,5)
>>> x = dstep(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = dstep(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0:
x[k] = 1.0
return x
def drect(n,N):
"""
Discrete rectangle function of duration N samples.
The signal is active on the interval 0 <= n <= N-1. Also known
as the rectangular window function, which is available in
scipy.signal.
Parameters
----------
n : ndarray of the time axis
N : the pulse duration
Returns
-------
x : ndarray of the signal
Notes
-----
The discrete rectangle turns on at n = 0, off at n = N-1 and
has duration of exactly N samples.
Examples
--------
>>> n = arange(-5,5)
>>> x = drect(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = drect(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0 and nn < N:
x[k] = 1.0
return x
def rc_imp(Ns,alpha,M=6):
"""
A truncated raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> b = rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> stem(n,b)
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n));
a = alpha;
Ns *= 1.0
for i in range(len(n)):
if (1 - 4*(a*n[i]/Ns)**2) == 0:
b[i] = np.pi/4*np.sinc(1/(2.*a))
else:
b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(1 - 4*(a*n[i]/Ns)**2)
return b
def sqrt_rc_imp(Ns,alpha,M=6):
"""
A truncated square root raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform. When
square root raised cosine (SRC) pulse is used generate Tx signals and
at the receiver used as a matched filter (receiver FIR filter), the
received signal is now raised cosine shaped, this having zero
intersymbol interference and the optimum removal of additive white
noise if present at the receiver input.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> b = sqrt_rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> stem(n,b)
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n))
Ns *= 1.0
a = alpha
for i in range(len(n)):
if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2:
b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a)))
else:
b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2))
b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a))
return b
def PN_gen(N_bits,m=5):
"""
Maximal length sequence signal generator.
Generates a sequence 0/1 bits of N_bit duration. The bits themselves
are obtained from an m-sequence of length m. Available m-sequence
(PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
N_bits : the number of bits to generate
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
PN : ndarray of the generator output over N_bits
Notes
-----
The sequence is periodic having period 2**m - 1 (2^m - 1).
Examples
--------
>>> # A 15 bit period signal nover 50 bits
>>> PN = PN_gen(50,4)
"""
c = m_seq(m)
Q = len(c)
max_periods = int(np.ceil(N_bits/float(Q)))
PN = np.zeros(max_periods*Q)
for k in range(max_periods):
PN[k*Q:(k+1)*Q] = c
PN = np.resize(PN, (1,N_bits))
return PN.flatten()
def m_seq(m):
"""
Generate an m-sequence ndarray using an all-ones initialization.
Available m-sequence (PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
c : ndarray of one period of the m-sequence
Notes
-----
The sequence period is 2**m - 1 (2^m - 1).
Examples
--------
>>> c = m_seq(5)
"""
# Load shift register with all ones to start
sr = np.ones(m)
# M-squence length is:
Q = 2**m - 1
c = np.zeros(Q)
if m == 2:
taps = np.array([1, 1, 1])
elif m == 3:
taps = np.array([1, 0, 1, 1])
elif m == 4:
taps = np.array([1, 0, 0, 1, 1])
elif m == 5:
taps = np.array([1, 0, 0, 1, 0, 1])
elif m == 6:
taps = np.array([1, 0, 0, 0, 0, 1, 1])
elif m == 7:
taps = np.array([1, 0, 0, 0, 1, 0, 0, 1])
elif m == 8:
taps = np.array([1, 0, 0, 0, 1, 1, 1, 0, 1])
elif m == 9:
taps = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 1])
elif m == 10:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1])
elif m == 11:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1])
elif m == 12:
taps = np.array([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1])
elif m == 16:
taps = np.array([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1])
else:
print('Invalid length specified')
for n in range(Q):
tap_xor = 0
c[n] = sr[-1]
for k in range(1,m):
if taps[k] == 1:
tap_xor = np.bitwise_xor(tap_xor,np.bitwise_xor(int(sr[-1]),int(sr[m-1-k])))
sr[1:] = sr[:-1]
sr[0] = tap_xor
return c
def BPSK_tx(N_bits,Ns,ach_fc=2.0,ach_lvl_dB=-100,pulse='rect',alpha = 0.25,M=6):
"""
Genrates biphase shift keyed (BPSK) transmitter with adjacent channel interference.
Generates three BPSK signals with rectangular or square root raised cosine (SRC)
pulse shaping of duration N_bits and Ns samples per bit. The desired signal is
centered on f = 0, which the adjacent channel signals to the left and right
are also generated at dB level relative to the desired signal. Used in the
digital communications Case Study supplement.
Parameters
----------
N_bits : the number of bits to simulate
Ns : the number of samples per bit
ach_fc : the frequency offset of the adjacent channel signals (default 2.0)
ach_lvl_dB : the level of the adjacent channel signals in dB (default -100)
pulse :the pulse shape 'rect' or 'src'
alpha : square root raised cosine pulse shape factor (default = 0.25)
M : square root raised cosine pulse truncation factor (default = 6)
Returns
-------
x : ndarray of the composite signal x0 + ach_lvl*(x1p + x1m)
b : the transmit pulse shape
data0 : the data bits used to form the desired signal; used for error checking
Notes
-----
Examples
--------
>>> x,b,data0 = BPSK_tx(1000,10,'src')
"""
x0,b,data0 = NRZ_bits(N_bits,Ns,pulse,alpha,M)
x1p,b,data1p = NRZ_bits(N_bits,Ns,pulse,alpha,M)
x1m,b,data1m = NRZ_bits(N_bits,Ns,pulse,alpha,M)
n = np.arange(len(x0))
x1p = x1p*np.exp(1j*2*np.pi*ach_fc/float(Ns)*n)
x1m = x1m*np.exp(-1j*2*np.pi*ach_fc/float(Ns)*n)
ach_lvl = 10**(ach_lvl_dB/20.)
return x0 + ach_lvl*(x1p + x1m), b, data0
#def BPSK_rx(r,b,):
def NRZ_bits(N_bits,Ns,pulse='rect',alpha = 0.25,M=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping.
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping.
Parameters
----------
N_bits : number of NRZ +/-1 data bits to produce
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
data : ndarray of the underlying data bits
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
This function is used by BPSK_tx in the Case Study article.
Examples
--------
>>> x,b,data = NRZ_bits(100,10)
>>> t = arange(len(x))
>>> plot(t,x)
"""
data = np.random.randint(0,2,N_bits)
x = np.hstack((2*data.reshape(N_bits,1)-1,np.zeros((N_bits,Ns-1))))
x =x.flatten()
if pulse.lower() == 'rect':
b = np.ones(Ns)
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
print('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns),data
def NRZ_bits2(data,Ns,pulse='rect',alpha = 0.25,M=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping with user data
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping. The data sequence is user supplied.
Parameters
----------
data : ndarray of the data bits as 0/1 values
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
Examples
--------
>>> x,b = NRZ_bits2([m_seq(5),10)
>>> t = arange(len(x))
>>> plot(t,x)
"""
N_bits = len(data)
x = np.hstack((2*data.reshape(N_bits,1)-1,np.zeros((N_bits,Ns-1))))
x = x.flatten()
if pulse.lower() == 'rect':
b = np.ones(Ns)
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
print('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns)
def eye_plot(x,L,S=0):
"""
Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
L : display length in samples (usually two symbols)
S : start index
Returns
-------
Nothing : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
>>> # 1000 bits at 10 samples per bit with 'rc' shaping
>>> x,b, data = NRZ_bits(1000,10,'rc')
>>> eye_plot(x,20,60)
"""
plt.figure(figsize=(6,4))
idx = np.arange(0,L+1)
plt.plot(idx,x[S:S+L+1],'b')
k_max = int((len(x) - S)/L)-1
for k in range(1,k_max):
plt.plot(idx,x[S+k*L:S+L+1+k*L],'b')
plt.grid()
plt.xlabel('Time Index - n')
plt.ylabel('Amplitude')
plt.title('Eye Plot')
return 0
def scatter(x,Ns,start):
"""
Sample a baseband digital communications waveform at the symbol spacing.
Parameters
----------
x : ndarray of the input digital comm signal
Ns : number of samples per symbol (bit)
start : the array index to start the sampling
Returns
-------
xI : ndarray of the real part of x following sampling
xQ : ndarray of the imaginary part of x following sampling
Notes
-----
Normally the signal is complex, so the scatter plot contains
clusters at point in the complex plane. For a binary signal
such as BPSK, the point centers are nominally +/-1 on the real
axis. Start is used to eliminate transients from the FIR
pulse shaping filters from appearing in the scatter plot.
Examples
--------
>>> x,b, data = NRZ_bits(1000,10,'rc')
>>> # add some noise so points are now scattered about +/-1
>>> y = cpx_AWGN(x,20,10)
>>> yI,yQ = scatter(y,10,60)
>>> plot(yI,yQ,'.')
>>> axis('equal')
"""
xI = np.real(x[start::Ns])
xQ = np.imag(x[start::Ns])
return xI, xQ
def bit_errors(z,data,start,Ns):
"""
A simple bit error counting function.
In its present form this function counts bit errors between
hard decision BPSK bits in +/-1 form and compares them with
0/1 binary data that was transmitted. Timing between the Tx
and Rx data is the responsibility of the user. An enhanced
version of this function, which features automatic synching
will be created in the future.
Parameters
----------
z : ndarray of hard decision BPSK data prior to symbol spaced sampling
data : ndarray of reference bits in 1/0 format
start : timing reference for the received
Ns : the number of samples per symbol
Returns
-------
Pe_hat : the estimated probability of a bit error
Notes
-----
The Tx and Rx data streams are exclusive-or'd and the then the bit errors
are summed, and finally divided by the number of bits observed to form an
estimate of the bit error probability. This function needs to be
enhanced to be more useful.
Examples
--------
>>> from scipy import signal
>>> x,b, data = NRZ_bits(1000,10)
>>> # set Eb/N0 to 8 dB
>>> y = cpx_AWGN(x,8,10)
>>> # matched filter the signal
>>> z = signal.lfilter(b,1,y)
>>> # make bit decisions at 10 and Ns multiples thereafter
>>> Pe_hat = bit_errors(z,data,10,10)
"""
Pe_hat = np.sum(data[0:len(z[start::Ns])]^np.int64((np.sign(np.real(z[start::Ns]))+1)/2))/float(len(z[start::Ns]))
return Pe_hat
def cpx_AWGN(x,EsN0,Ns):
"""
Apply white Gaussian noise to a digital communications signal.
This function represents a complex baseband white Gaussian noise
digital communications channel. The input signal array may be real
or complex.
Parameters
----------
x : ndarray noise free complex baseband input signal.
EsNO : set the channel Es/N0 (Eb/N0 for binary) level in dB
Ns : number of samples per symbol (bit)
Returns
-------
y : ndarray x with additive noise added.
Notes
-----
Set the channel energy per symbol-to-noise power spectral
density ratio (Es/N0) in dB.
Examples
--------
>>> x,b, data = NRZ_bits(1000,10)
>>> # set Eb/N0 = 10 dB
>>> y = cpx_AWGN(x,10,10)
"""
w = np.sqrt(Ns*np.var(x)*10**(-EsN0/10.)/2.)*(np.random.randn(len(x)) + 1j*np.random.randn(len(x)))
return x+w
def my_psd(x,NFFT=2**10,Fs=1):
"""
A local version of NumPy's PSD function that returns the plot arrays.
A mlab.psd wrapper function that returns two ndarrays;
makes no attempt to auto plot anything.
Parameters
----------
x : ndarray input signal
NFFT : a power of two, e.g., 2**10 = 1024
Fs : the sampling rate in Hz
Returns
-------
Px : ndarray of the power spectrum estimate
f : ndarray of frequency values
Notes
-----
This function makes it easier to overlay spectrum plots because
you have better control over the axis scaling than when using psd()
in the autoscale mode.
Examples
--------
>>> x,b, data = NRZ_bits(10000,10)
>>> Px,f = my_psd(x,2**10,10)
>>> plot(f, 10*log10(Px))
"""
Px,f = pylab.mlab.psd(x,NFFT,Fs)
return Px.flatten(), f
def am_tx(m,a_mod,fc=75e3):
"""
AM transmitter for Case Study of Chapter 17.
Assume input is sampled at 8 Ksps and upsampling
by 24 is performed to arrive at fs_out = 192 Ksps.
Parameters
----------
m : ndarray of the input message signal
a_mod : AM modulation index, between 0 and 1
fc : the carrier frequency in Hz
Returns
-------
x192 : ndarray of the upsampled by 24 and modulated carrier
t192 : ndarray of the upsampled by 24 time axis
m24 : ndarray of the upsampled by 24 message signal
Notes
-----
The sampling rate of the input signal is assumed to be 8 kHz.
Examples
--------
>>> n = arange(0,1000)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192 = am_tx(m,0.8,fc=75e3)
"""
m24 = interp24(m)
t192 = np.arange(len(m24))/192.0e3
#m24 = np.cos(2*np.pi*2.0e3*t192)
m_max = np.max(np.abs(m24))
x192 = (1 + a_mod*m24/m_max)*np.cos(2*np.pi*fc*t192)
return x192, t192, m24
def am_rx(x192):
"""
AM envelope detector receiver for the Chapter 17 Case Study
The receiver bandpass filter is not included in this function.
Parameters
----------
x192 : ndarray of the AM signal at sampling rate 192 ksps
Returns
-------
m_rx8 : ndarray of the demodulated message at 8 ksps
t8 : ndarray of the time axis at 8 ksps
m_rx192 : ndarray of the demodulated output at 192 ksps
x_edet192 : ndarray of the envelope detector output at 192 ksps
Notes
-----
The bandpass filter needed at the receiver front-end can be designed
using b_bpf,a_bpf = am_rx_BPF().
Examples
--------
>>> n = arange(0,1000)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> m_rx8,t8,m_rx192,x_edet192 = am_rx(x192)
"""
x_edet192 = env_det(x192)
m_rx8 = deci24(x_edet192)
# remove DC offset from the env_det + LPF output
m_rx8 -= np.mean(m_rx8)
t8 = np.arange(len(m_rx8))/8.0e3
"""
For performance testing also filter x_env_det
192e3 using a Butterworth cascade.
The filter cutoff is 5kHz, the message BW.
"""
b192,a192 = signal.butter(5,2*5.0e3/192.0e3)
m_rx192 = signal.lfilter(b192,a192,x_edet192)
m_rx192 = signal.lfilter(b192,a192,m_rx192)
m_rx192 -= np.mean(m_rx192)
return m_rx8,t8,m_rx192,x_edet192
def am_rx_BPF(N_order = 7, ripple_dB = 1, B = 10e3, fs = 192e3):
"""
Bandpass filter design for the AM receiver Case Study of Chapter 17.
Design a 7th-order Chebyshev type 1 bandpass filter to remove/reduce
adjacent channel intereference at the envelope detector input.
Parameters
----------
N_order : the filter order (default = 7)
ripple_dB : the passband ripple in dB (default = 1)
B : the RF bandwidth (default = 10e3)
fs : the sampling frequency
Returns
-------
b_bpf : ndarray of the numerator filter coefficients
a_bpf : ndarray of the denominator filter coefficients
Examples
--------
>>> from scipy import signal
>>> # Use the default values
>>> b_bpf,a_bpf = am_rx_BPF()
>>> # plot the filter pole-zero plot
>>> zplane(b_bpf,a_bpf)
>>> # plot the frequency response
>>> f = arange(0,192/2.,.1)
>>> w, Hbpf = signal.freqz(b_bpf,a_bpf,2*pi*f/192)
>>> plot(f,20*log10(abs(Hbpf)))
>>> axis([0,192/2.,-80,10])
"""
b_bpf,a_bpf = signal.cheby1(N_order,ripple_dB,2*np.array([75e3-B/2.,75e3+B/2.])/fs,'bandpass')
return b_bpf,a_bpf
def env_det(x):
"""
Ideal envelope detector.
This function retains the positive half cycles of the input signal.
Parameters
----------
x : ndarray of the input sugnal
Returns
-------
y : ndarray of the output signal
Examples
--------
>>> n = arange(0,100)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192 = am_tx(m,0.8,fc=75e3)
>>> y = env_det(x192)
"""
y = np.zeros(len(x))
for k,xx in enumerate(x):
if xx >= 0:
y[k] = xx
return y
def interp24(x):
"""
Interpolate by L = 24 using Butterworth filters.
The interpolation is done using three stages. Upsample by
L = 2 and lowpass filter, upsample by 3 and lowpass filter, then
upsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = interp24(x)
"""
# Stage 1: L = 2
b2,a2 = signal.butter(10,1/2.)
y1 = upsample(x,2)
y1 = signal.lfilter(b2,a2,2*y1)
# Stage 2: L = 3
b3,a3 = signal.butter(10,1/3.)
y2 = upsample(y1,3)
y2 = signal.lfilter(b3,a3,3*y2)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = upsample(y2,4)
y3 = signal.lfilter(b4,a4,4*y3)
return y3
def deci24(x):
"""
Decimate by L = 24 using Butterworth filters.
The decimation is done using two three stages. Downsample sample by
L = 2 and lowpass filter, downsample by 3 and lowpass filter, then
downsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = deci24(x)
"""
# Stage 1: M = 2
b2,a2 = signal.butter(10,1/2.)
y1 = signal.lfilter(b2,a2,x)
y1 = downsample(y1,2)
# Stage 2: M = 3
b3,a3 = signal.butter(10,1/3.)
y2 = signal.lfilter(b3,a3,y1)
y2 = downsample(y2,3)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = signal.lfilter(b4,a4,y2)
y3 = downsample(y3,4)
return y3
def upsample(x,L):
"""
Upsample by factor L
Insert L - 1 zero samples in between each input sample.
Parameters
----------
x : ndarray of input signal values
L : upsample factor
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = upsample(x,3)
"""
N_input = len(x)
y = np.hstack((x.reshape(N_input,1),np.zeros((N_input,L-1))))
y = y.flatten()
return y
def downsample(x,M,p=0):
"""
Downsample by factor M
Keep every Mth sample of the input. The phase of the input samples
kept can be selected.
Parameters
----------
x : ndarray of input signal values
M : upsample factor
p : phase of decimated value, 0 (default), 1, ..., M-1
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = downsample(x,3)
>>> y = downsample(x,3,1)
"""
x = x[0:int(np.floor(len(x)/M))*M]
x = x.reshape((int(np.floor(len(x)/M)),M))
y = x[:,p]
return y
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
Mark Wickert October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def zplane(b,a,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> zplane(b,a)
>>> # Here the plot is generated using manual scaling
>>> zplane(b,a,False,1.5)
"""
M = len(b) - 1
N = len(a) - 1
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
#N_roots = np.array([1.0])
if M > 0:
N_roots = np.roots(b)
#D_roots = np.array([1.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
if detect_mult == True:
N_uniq, N_mult = unique_cpx_roots(N_roots,tol=tol)
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = mlab.find(N_mult>1)
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
else:
plt.plot(np.real(N_roots),np.imag(N_roots),'ko',mfc='None',ms=8)
if N > 0:
if detect_mult == True:
D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = mlab.find(D_mult>1)
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
else:
plt.plot(np.real(D_roots),np.imag(D_roots),'kx',ms=8)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N
def rect_conv(n,N_len):
"""
The theoretical result of convolving two rectangle sequences.
The result is a triangle. The solution is
based on pure analysis. Simply coded as opposed
to efficiently coded.
Parameters
----------
n : ndarray of time axis
N_len : rectangle pulse duration
Returns
-------
y : ndarray of of output signal
Examples
--------
>>> n = arange(-5,20)
>>> y = rect_conv(n,6)
"""
y = np.zeros(len(n))
for k in range(len(n)):
if n[k] >= 0 and n[k] < N_len-1:
y[k] = n[k] + 1
elif n[k] >= N_len-1 and n[k] <= 2*N_len-2:
y[k] = 2*N_len-1-n[k]
return y
def biquad2(w_num, r_num, w_den, r_den):
"""
A biquadratic filter in terms of conjugate pole and zero pairs.
Parameters
----------
w_num : zero frequency (angle) in rad/sample
r_num : conjugate zeros radius
w_den : pole frequency (angle) in rad/sample
r_den : conjugate poles radius; less than 1 for stability
Returns
-------
b : ndarray of numerator coefficients
a : ndarray of denominator coefficients
Examples
--------
b,a = biquad2(pi/4., 1, pi/4., 0.95)
"""
b = np.array([1, -2*r_num*np.cos(w_num), r_num**2])
a = np.array([1, -2*r_den*np.cos(w_den), r_den**2])
return b, a
def plot_na(x,y,mode='stem'):
pylab.figure(figsize=(5,2))
frame1 = pylab.gca()
if mode.lower() == 'stem':
pylab.stem(x,y)
else:
pylab.plot(x,y)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
pylab.show()
def from_wav(filename):
"""
Read a wave file.
A wrapper function for scipy.io.wavfile.read
that also includes int16 to float [-1,1] scaling.
Parameters
----------
filename : file name string
Returns
-------
fs : sampling frequency in Hz
x : ndarray of normalized to 1 signal samples
Examples
--------
>>> fs,x = from_wav('test_file.wav')
"""
fs, x = wavfile.read(filename)
return fs, x/32767.
def to_wav(filename,rate,x):
"""
Write a wave file.
A wrapper function for scipy.io.wavfile.write
that also includes int16 scaling and conversion.
Assume input x is [-1,1] values.
Parameters
----------
filename : file name string
rate : sampling frequency in Hz
Returns
-------
Nothing : writes only the *.wav file
Examples
--------
>>> to_wav('test_file.wav', 8000, x)
"""
x16 = np.int16(x*32767)
wavfile.write(filename, rate, x16)
if __name__ == '__main__':
b = CIC(10,1)
print(b)
"""
x = np.random.randn(10)
print(x)
b = signal.remez(16,[0,.1,.2,.5], [1,0], [1,1], 1)
w,H = signal.freqz(b,[1],512)
plot(w,20*log10(abs(H)))
figure(figsize=(6,4))
#plot(arange(0,len(b)),b)
y = signal.lfilter(b, [1], x,)
print(y)
zplane([1,1,1,1,1],[1,-.8],1.25)
"""
| bsd-2-clause |
R4stl1n/allianceauth | allianceauth/templatetags/admin_status.py | 5 | 5004 | import requests
import logging
import amqp.exceptions
import semantic_version as semver
from django import template
from django.conf import settings
from django.core.cache import cache
from celery.app import app_or_default
from allianceauth import __version__
register = template.Library()
TAG_CACHE_TIME = 10800 # 3 hours
NOTIFICATION_CACHE_TIME = 300 # 5 minutes
logger = logging.getLogger(__name__)
def get_github_tags():
request = requests.get('https://api.github.com/repos/allianceauth/allianceauth/releases')
request.raise_for_status()
return request.json()
def get_github_notification_issues():
# notification
request = requests.get(
'https://api.github.com/repos/allianceauth/allianceauth/issues?labels=announcement&state=all')
request.raise_for_status()
return request.json()
@register.inclusion_tag('allianceauth/admin-status/overview.html', takes_context=True)
def status_overview(context):
response = {
'notifications': list(),
'latest_major': True,
'latest_minor': True,
'latest_patch': True,
'current_version': __version__,
'task_queue_length': -1,
}
response.update(get_notifications())
response.update(get_version_info())
response.update({'task_queue_length': get_celery_queue_length()})
return response
def get_celery_queue_length():
try:
app = app_or_default(None)
with app.connection_or_acquire() as conn:
return conn.default_channel.queue_declare(
queue=getattr(settings, 'CELERY_DEFAULT_QUEUE', 'celery'), passive=True).message_count
except amqp.exceptions.ChannelError:
# Queue doesn't exist, probably empty
return 0
except Exception:
logger.exception("Failed to get celery queue length")
return -1
def get_notifications():
response = {
'notifications': list(),
}
try:
notifications = cache.get_or_set('github_notification_issues', get_github_notification_issues,
NOTIFICATION_CACHE_TIME)
# Limit notifications to those posted by repo owners and members
response['notifications'] += [n for n in notifications if n['author_association'] in ['OWNER', 'MEMBER']][:5]
except requests.RequestException:
logger.exception('Error while getting github notifications')
return response
def get_version_info():
response = {
'latest_major': True,
'latest_minor': True,
'latest_patch': True,
'current_version': __version__,
}
try:
tags = cache.get_or_set('github_release_tags', get_github_tags, TAG_CACHE_TIME)
current_ver = semver.Version.coerce(__version__)
# Set them all to the current version to start
# If the server has only earlier or the same version
# then this will become the major/minor/patch versions
latest_major = current_ver
latest_minor = current_ver
latest_patch = current_ver
response.update({
'latest_major_version': str(latest_major),
'latest_minor_version': str(latest_minor),
'latest_patch_version': str(latest_patch),
})
for tag in tags:
tag_name = tag.get('tag_name')
if tag_name[0] == 'v':
# Strip 'v' off front of verison if it exists
tag_name = tag_name[1:]
try:
tag_ver = semver.Version.coerce(tag_name)
except ValueError:
tag_ver = semver.Version('0.0.0', partial=True)
if tag_ver > current_ver:
if latest_major is None or tag_ver > latest_major:
latest_major = tag_ver
response['latest_major_version'] = tag_name
response['latest_major_url'] = tag['html_url']
if tag_ver.major > current_ver.major:
response['latest_major'] = False
elif tag_ver.major == current_ver.major:
if latest_minor is None or tag_ver > latest_minor:
latest_minor = tag_ver
response['latest_minor_version'] = tag_name
response['latest_minor_url'] = tag['html_url']
if tag_ver.minor > current_ver.minor:
response['latest_minor'] = False
elif tag_ver.minor == current_ver.minor:
if latest_patch is None or tag_ver > latest_patch:
latest_patch = tag_ver
response['latest_patch_version'] = tag_name
response['latest_patch_url'] = tag['html_url']
if tag_ver.patch > current_ver.patch:
response['latest_patch'] = False
except requests.RequestException:
logger.exception('Error while getting github release tags')
return response
| gpl-2.0 |
vishdha/erpnext | erpnext/accounts/report/purchase_register/purchase_register.py | 19 | 9186 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import msgprint, _
def execute(filters=None):
return _execute(filters)
def _execute(filters=None, additional_table_columns=None, additional_query_columns=None):
if not filters: filters = {}
invoice_list = get_invoices(filters, additional_query_columns)
columns, expense_accounts, tax_accounts = get_columns(invoice_list, additional_table_columns)
if not invoice_list:
msgprint(_("No record found"))
return columns, invoice_list
invoice_expense_map = get_invoice_expense_map(invoice_list)
invoice_expense_map, invoice_tax_map = get_invoice_tax_map(invoice_list,
invoice_expense_map, expense_accounts)
invoice_po_pr_map = get_invoice_po_pr_map(invoice_list)
suppliers = list(set([d.supplier for d in invoice_list]))
supplier_details = get_supplier_details(suppliers)
company_currency = frappe.db.get_value("Company", filters.company, "default_currency")
data = []
for inv in invoice_list:
# invoice details
purchase_order = list(set(invoice_po_pr_map.get(inv.name, {}).get("purchase_order", [])))
purchase_receipt = list(set(invoice_po_pr_map.get(inv.name, {}).get("purchase_receipt", [])))
project = list(set(invoice_po_pr_map.get(inv.name, {}).get("project", [])))
row = [inv.name, inv.posting_date, inv.supplier, inv.supplier_name]
if additional_query_columns:
for col in additional_query_columns:
row.append(inv.get(col))
row += [
supplier_details.get(inv.supplier), # supplier_type
inv.credit_to, inv.mode_of_payment, ", ".join(project),
inv.bill_no, inv.bill_date, inv.remarks,
", ".join(purchase_order), ", ".join(purchase_receipt), company_currency
]
# map expense values
base_net_total = 0
for expense_acc in expense_accounts:
expense_amount = flt(invoice_expense_map.get(inv.name, {}).get(expense_acc))
base_net_total += expense_amount
row.append(expense_amount)
# net total
row.append(base_net_total or inv.base_net_total)
# tax account
total_tax = 0
for tax_acc in tax_accounts:
if tax_acc not in expense_accounts:
tax_amount = flt(invoice_tax_map.get(inv.name, {}).get(tax_acc))
total_tax += tax_amount
row.append(tax_amount)
# total tax, grand total, outstanding amount & rounded total
row += [total_tax, inv.base_grand_total, flt(inv.base_grand_total, 2), inv.outstanding_amount]
data.append(row)
return columns, data
def get_columns(invoice_list, additional_table_columns):
"""return columns based on filters"""
columns = [
_("Invoice") + ":Link/Purchase Invoice:120", _("Posting Date") + ":Date:80",
_("Supplier Id") + "::120", _("Supplier Name") + "::120"]
if additional_table_columns:
columns += additional_table_columns
columns += [
_("Supplier Type") + ":Link/Supplier Type:120", _("Payable Account") + ":Link/Account:120",
_("Mode of Payment") + ":Link/Mode of Payment:80", _("Project") + ":Link/Project:80",
_("Bill No") + "::120", _("Bill Date") + ":Date:80", _("Remarks") + "::150",
_("Purchase Order") + ":Link/Purchase Order:100",
_("Purchase Receipt") + ":Link/Purchase Receipt:100",
{
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Data",
"width": 80
}
]
expense_accounts = tax_accounts = expense_columns = tax_columns = []
if invoice_list:
expense_accounts = frappe.db.sql_list("""select distinct expense_account
from `tabPurchase Invoice Item` where docstatus = 1
and (expense_account is not null and expense_account != '')
and parent in (%s) order by expense_account""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]))
tax_accounts = frappe.db.sql_list("""select distinct account_head
from `tabPurchase Taxes and Charges` where parenttype = 'Purchase Invoice'
and docstatus = 1 and (account_head is not null and account_head != '')
and category in ('Total', 'Valuation and Total')
and parent in (%s) order by account_head""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]))
expense_columns = [(account + ":Currency/currency:120") for account in expense_accounts]
for account in tax_accounts:
if account not in expense_accounts:
tax_columns.append(account + ":Currency/currency:120")
columns = columns + expense_columns + [_("Net Total") + ":Currency/currency:120"] + tax_columns + \
[_("Total Tax") + ":Currency/currency:120", _("Grand Total") + ":Currency/currency:120",
_("Rounded Total") + ":Currency/currency:120", _("Outstanding Amount") + ":Currency/currency:120"]
return columns, expense_accounts, tax_accounts
def get_conditions(filters):
conditions = ""
if filters.get("company"): conditions += " and company=%(company)s"
if filters.get("supplier"): conditions += " and supplier = %(supplier)s"
if filters.get("from_date"): conditions += " and posting_date>=%(from_date)s"
if filters.get("to_date"): conditions += " and posting_date<=%(to_date)s"
if filters.get("mode_of_payment"): conditions += " and ifnull(mode_of_payment, '') = %(mode_of_payment)s"
return conditions
def get_invoices(filters, additional_query_columns):
if additional_query_columns:
additional_query_columns = ', ' + ', '.join(additional_query_columns)
conditions = get_conditions(filters)
return frappe.db.sql("""
select
name, posting_date, credit_to, supplier, supplier_name, bill_no, bill_date,
remarks, base_net_total, base_grand_total, outstanding_amount,
mode_of_payment {0}
from `tabPurchase Invoice`
where docstatus = 1 %s
order by posting_date desc, name desc""".format(additional_query_columns or '') % conditions, filters, as_dict=1)
def get_invoice_expense_map(invoice_list):
expense_details = frappe.db.sql("""
select parent, expense_account, sum(base_net_amount) as amount
from `tabPurchase Invoice Item`
where parent in (%s)
group by parent, expense_account
""" % ', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_expense_map = {}
for d in expense_details:
invoice_expense_map.setdefault(d.parent, frappe._dict()).setdefault(d.expense_account, [])
invoice_expense_map[d.parent][d.expense_account] = flt(d.amount)
return invoice_expense_map
def get_invoice_tax_map(invoice_list, invoice_expense_map, expense_accounts):
tax_details = frappe.db.sql("""
select parent, account_head, case add_deduct_tax when "Add" then sum(base_tax_amount_after_discount_amount)
else sum(base_tax_amount_after_discount_amount) * -1 end as tax_amount
from `tabPurchase Taxes and Charges`
where parent in (%s) and category in ('Total', 'Valuation and Total')
group by parent, account_head, add_deduct_tax
""" % ', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_tax_map = {}
for d in tax_details:
if d.account_head in expense_accounts:
if invoice_expense_map[d.parent].has_key(d.account_head):
invoice_expense_map[d.parent][d.account_head] += flt(d.tax_amount)
else:
invoice_expense_map[d.parent][d.account_head] = flt(d.tax_amount)
else:
invoice_tax_map.setdefault(d.parent, frappe._dict()).setdefault(d.account_head, [])
invoice_tax_map[d.parent][d.account_head] = flt(d.tax_amount)
return invoice_expense_map, invoice_tax_map
def get_invoice_po_pr_map(invoice_list):
pi_items = frappe.db.sql("""
select parent, purchase_order, purchase_receipt, po_detail, project
from `tabPurchase Invoice Item`
where parent in (%s) and (ifnull(purchase_order, '') != '' or ifnull(purchase_receipt, '') != '')
""" % ', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_po_pr_map = {}
for d in pi_items:
if d.purchase_order:
invoice_po_pr_map.setdefault(d.parent, frappe._dict()).setdefault(
"purchase_order", []).append(d.purchase_order)
pr_list = None
if d.purchase_receipt:
pr_list = [d.purchase_receipt]
elif d.po_detail:
pr_list = frappe.db.sql_list("""select distinct parent from `tabPurchase Receipt Item`
where docstatus=1 and purchase_order_item=%s""", d.po_detail)
if pr_list:
invoice_po_pr_map.setdefault(d.parent, frappe._dict()).setdefault("purchase_receipt", pr_list)
if d.project:
invoice_po_pr_map.setdefault(d.parent, frappe._dict()).setdefault(
"project", []).append(d.project)
return invoice_po_pr_map
def get_account_details(invoice_list):
account_map = {}
accounts = list(set([inv.credit_to for inv in invoice_list]))
for acc in frappe.db.sql("""select name, parent_account from tabAccount
where name in (%s)""" % ", ".join(["%s"]*len(accounts)), tuple(accounts), as_dict=1):
account_map[acc.name] = acc.parent_account
return account_map
def get_supplier_details(suppliers):
supplier_details = {}
for supp in frappe.db.sql("""select name, supplier_type from `tabSupplier`
where name in (%s)""" % ", ".join(["%s"]*len(suppliers)), tuple(suppliers), as_dict=1):
supplier_details.setdefault(supp.name, supp.supplier_type)
return supplier_details
| gpl-3.0 |
disqus/django-old | django/contrib/formtools/wizard/storage/cookie.py | 5 | 1058 | from django.core.exceptions import SuspiciousOperation
from django.core.signing import BadSignature
from django.utils import simplejson as json
from django.contrib.formtools.wizard import storage
class CookieStorage(storage.BaseStorage):
encoder = json.JSONEncoder(separators=(',', ':'))
def __init__(self, *args, **kwargs):
super(CookieStorage, self).__init__(*args, **kwargs)
self.data = self.load_data()
if self.data is None:
self.init_data()
def load_data(self):
try:
data = self.request.get_signed_cookie(self.prefix)
except KeyError:
data = None
except BadSignature:
raise SuspiciousOperation('FormWizard cookie manipulated')
if data is None:
return None
return json.loads(data, cls=json.JSONDecoder)
def update_response(self, response):
if self.data:
response.set_signed_cookie(self.prefix, self.encoder.encode(self.data))
else:
response.delete_cookie(self.prefix)
| bsd-3-clause |
cnobile2012/django-pam | django_pam/auth/tests/base_test.py | 1 | 3836 | # -*- coding: utf-8 -*-
#
# django_pam/auth/tests/base_test.py
#
import os
import sys
import json
import types
import getpass
import six
from io import open
from collections import OrderedDict
from django.conf import settings
from django.test import TestCase
from django.utils.translation import ugettext
class BaseDjangoPAM(TestCase):
_CONFIG = '.django_pam'
def __init__(self, name):
super(BaseDjangoPAM, self).__init__(name)
def _prompt(self, need_email=False):
home = os.path.join(settings.BASE_DIR, '..', self._CONFIG)
fields = ('username', 'password', 'email',)
lines = {}
if os.path.exists(home):
with open(home, 'rb') as file:
for idx, line in enumerate(file):
lines[fields[idx]] = line.decode('utf-8').strip()
username = lines.get('username')
password = lines.get('password')
email = lines.get('email')
else:
temp_username = getpass.getuser()
sys.stderr.write("Username ({}): ".format(temp_username))
username = six.moves.input() # Prompt goes to stdout which is off.
if not username:
username = temp_username
password = getpass.getpass()
if need_email:
sys.stderr.write("Email: ")
email = six.moves.input() # Prompt goes to stdout which is off.
else:
email = None
return username, password, email
def _clean_data(self, data):
if data is not None:
if isinstance(data, (list, tuple,)):
data = self.__clean_value(data)
else:
for key in data:
data[key] = self.__clean_value(data.get(key))
return data
def __clean_value(self, value):
if isinstance(value, (list, tuple,)):
value = [self.__clean_value(item) for item in value]
elif isinstance(value, (dict, OrderedDict,)):
for key in value:
value[key] = self.__clean_value(value.get(key))
elif (isinstance(value, (int, long, bool, types.TypeType,)) or
value is None):
pass
else:
value = ugettext(value)
return value
def _has_error(self, response):
result = False
if hasattr(response, 'context_data'):
if response.context_data.get('form').errors:
result = True
return result
def _test_errors(self, response, tests={}):
if hasattr(response, 'context_data'):
errors = dict(response.context_data.get('form').errors)
for key, value in tests.items():
err_msg = errors.pop(key, None)
self.assertTrue(err_msg, "Could not find key: {}".format(key))
err_msg = err_msg.as_text()
msg = "For key '{}' value '{}' not found in '{}'".format(
key, value, err_msg)
self.assertTrue(value in err_msg, msg)
elif hasattr(response, 'content'):
errors = json.loads(response.content.decode('utf-8'))
for key, value in tests.items():
err_msg = errors.pop(key, None)
self.assertTrue(err_msg, "Could not find key: {}".format(key))
msg = "For key '{}' value '{}' not found in '{}'".format(
key, value, err_msg)
if isinstance(err_msg, (list, tuple)):
err_msg = err_msg[0]
self.assertTrue(value in err_msg, msg)
else:
msg = "No context_data"
self.assertTrue(False, msg)
msg = "Unaccounted for errors: {}".format(errors)
self.assertFalse(len(errors) != 0 and True or False, msg)
| mit |
wnesl/gnuradio-IA | gnuradio-core/src/lib/gengen/generate_common.py | 17 | 3125 | #!/usr/bin/env python
#
# Copyright 2004,2006,2007,2008,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from build_utils import expand_template, standard_dict
from build_utils_codes import *
import re
# sources and sinks
ss_signatures = ['s', 'i', 'f', 'c']
ss_roots = [
'gr_vector_source_X',
'gr_vector_sink_X',
'gr_noise_source_X',
'gr_sig_source_X',
'gr_probe_signal_X',
'gr_probe_signal_vX'
]
# regular blocks
reg_signatures = ['ss', 'ii', 'ff', 'cc']
reg_roots = [
'gr_add_const_XX',
'gr_sub_XX',
'gr_divide_XX',
'gr_mute_XX',
'gr_add_const_vXX',
'gr_multiply_const_vXX',
'gr_integrate_XX',
'gr_moving_average_XX',
]
# other blocks
others = (
('gr_chunks_to_symbols_XX', ('bf', 'bc', 'sf', 'sc', 'if', 'ic')),
('gr_unpacked_to_packed_XX', ('bb','ss','ii')),
('gr_packed_to_unpacked_XX', ('bb','ss','ii')),
('gr_xor_XX', ('bb','ss','ii')),
('gr_and_XX', ('bb','ss','ii')),
('gr_and_const_XX', ('bb','ss','ii')),
('gr_or_XX', ('bb','ss','ii')),
('gr_not_XX', ('bb','ss','ii')),
('gr_sample_and_hold_XX', ('bb','ss','ii','ff')),
('gr_argmax_XX', ('fs','is','ss')),
('gr_max_XX', ('ff','ii','ss')),
('gr_peak_detector_XX', ('fb','ib','sb')),
('gr_multiply_XX', ('ss','ii')),
('gr_multiply_const_XX', ('ss','ii')),
('gr_add_XX', ('ss','cc','ii'))
)
def expand_h_cc_i (root, sig):
# root looks like 'gr_vector_sink_X'
name = re.sub ('X+', sig, root)
d = standard_dict (name, sig)
expand_template (d, root + '.h.t')
expand_template (d, root + '.cc.t')
expand_template (d, root + '.i.t')
def generate ():
expand_h_cc_i ('gr_add_const_XX', 'sf') # for MC4020
expand_h_cc_i ('gr_vector_sink_X', 'b')
expand_h_cc_i ('gr_vector_source_X', 'b')
expand_h_cc_i ('gr_probe_signal_X', 'b')
expand_h_cc_i ('gr_probe_signal_vX', 'b')
for r in ss_roots:
for s in ss_signatures:
expand_h_cc_i (r, s)
for r in reg_roots :
for s in reg_signatures:
expand_h_cc_i (r, s)
for root, sigs in others:
for s in sigs:
expand_h_cc_i (root, s)
if __name__ == '__main__':
generate ()
| gpl-3.0 |
F5Networks/f5-ansible-modules | ansible_collections/f5networks/f5_modules/plugins/modules/bigip_device_auth_radius_server.py | 1 | 14733 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_device_auth_radius_server
short_description: Manages the RADIUS server configuration of the device
description:
- Manages a device's RADIUS server configuration.
- Used in tandem with the C(bigip_device_auth_radius) module.
version_added: "1.3.0"
options:
name:
description:
- Specifies the name of the RADIUS server to manage.
type: str
required: True
description:
description:
- The description of the RADIUS server.
type: str
ip:
description:
- The IP address of the server.
- This parameter is mandatory when creating a new resource.
type: str
port:
description:
- The port of the server.
- Valid range of values is between C(0) and C(65535) inclusive.
type: int
secret:
description:
- Specifies the secret used for accessing RADIUS server.
- This parameter is mandatory when creating a new resource.
type: str
timeout:
description:
- Specifies the timeout value in seconds.
- Valid range of values is between C(1) and C(60) inclusive.
type: int
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures the RADIUS server exists.
- When C(state) is C(absent), ensures the RADIUS server is removed.
type: str
choices:
- present
- absent
default: present
update_secret:
description:
- C(always) will update passwords if the C(secret) is specified.
- C(on_create) will only set the password for newly created servers.
type: str
choices:
- always
- on_create
default: always
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a RADIUS server configuration
bigip_device_auth_radius_server:
name: "ansible_test"
ip: "1.1.1.1"
port: 1812
secret: "secret"
timeout: 5
update_secret: on_create
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Update RADIUS server configuration
bigip_device_auth_radius_server:
name: "ansible_test"
ip: "10.10.10.1"
description: "this is a test"
port: 1813
timeout: 10
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove RADIUS server configuration
bigip_device_auth_radius_server:
name: "ansible_test"
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
ip:
description: IP address of the RADIUS Server.
returned: changed
type: str
sample: 1.1.1.1
port:
description: RADIUS service port.
returned: changed
type: int
sample: 1812
timeout:
description: Timeout value.
returned: changed
type: int
sample: 3
description:
description: User defined description of the RADIUS server.
returned: changed
type: str
sample: "this is my server"
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec
)
from ..module_utils.compare import cmp_str_with_none
from ..module_utils.ipaddress import is_valid_ip
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'server': 'ip',
}
api_attributes = [
'secret',
'server',
'port',
'timeout',
'description',
]
returnables = [
'ip',
'port',
'timeout',
'secret',
'description',
]
updatables = [
'secret',
'ip',
'port',
'timeout',
'description',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def timeout(self):
if self._values['timeout'] is None:
return None
if 1 > self._values['timeout'] > 60:
raise F5ModuleError(
"Timeout value must be between 1 and 60."
)
return self._values['timeout']
@property
def ip(self):
if self._values['ip'] is None:
return None
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
if 0 <= self._values['port'] <= 65535:
return self._values['port']
raise F5ModuleError(
"Valid ports must be in range 0 - 65535"
)
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
returnables = [
'ip',
'port',
'timeout',
'description',
]
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def secret(self):
if self.want.secret != self.have.secret:
if self.want.update_secret == 'always':
result = self.want.secret
return result
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/auth/radius-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/auth/radius-server/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/auth/radius-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/radius-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/radius-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True
),
description=dict(),
ip=dict(),
port=dict(
type='int'
),
timeout=dict(
type='int'
),
secret=dict(
no_log=True,
),
update_secret=dict(
default='always',
choices=['always', 'on_create']
),
state=dict(
default='present',
choices=['absent', 'present']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| mit |
nwjs/nw-gyp | gyp/pylib/gyp/xcodeproj_file.py | 1366 | 120842 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit |
xxd3vin/spp-sdk | opt/Python27/Lib/test/test_abc.py | 3 | 8024 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Unit tests for abc.py."""
import unittest, weakref
from test import test_support
import abc
from inspect import isabstract
class TestABC(unittest.TestCase):
def test_abstractmethod_basics(self):
@abc.abstractmethod
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
def test_abstractproperty_basics(self):
@abc.abstractproperty
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
class C:
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def foo(self): return 3
class D(C):
@property
def foo(self): return super(D, self).foo
self.assertEqual(D().foo, 3)
def test_abstractmethod_integration(self):
for abstractthing in [abc.abstractmethod, abc.abstractproperty]:
class C:
__metaclass__ = abc.ABCMeta
@abstractthing
def foo(self): pass # abstract
def bar(self): pass # concrete
self.assertEqual(C.__abstractmethods__, set(["foo"]))
self.assertRaises(TypeError, C) # because foo is abstract
self.assertTrue(isabstract(C))
class D(C):
def bar(self): pass # concrete override of concrete
self.assertEqual(D.__abstractmethods__, set(["foo"]))
self.assertRaises(TypeError, D) # because foo is still abstract
self.assertTrue(isabstract(D))
class E(D):
def foo(self): pass
self.assertEqual(E.__abstractmethods__, set())
E() # now foo is concrete, too
self.assertFalse(isabstract(E))
class F(E):
@abstractthing
def bar(self): pass # abstract override of concrete
self.assertEqual(F.__abstractmethods__, set(["bar"]))
self.assertRaises(TypeError, F) # because bar is abstract now
self.assertTrue(isabstract(F))
def test_subclass_oldstyle_class(self):
class A:
__metaclass__ = abc.ABCMeta
class OldstyleClass:
pass
self.assertFalse(issubclass(OldstyleClass, A))
self.assertFalse(issubclass(A, OldstyleClass))
def test_type_has_no_abstractmethods(self):
# type pretends not to have __abstractmethods__.
self.assertRaises(AttributeError, getattr, type, "__abstractmethods__")
class meta(type):
pass
self.assertRaises(AttributeError, getattr, meta, "__abstractmethods__")
def test_isinstance_class(self):
class A:
__metaclass__ = abc.ABCMeta
class OldstyleClass:
pass
self.assertFalse(isinstance(OldstyleClass, A))
self.assertTrue(isinstance(OldstyleClass, type(OldstyleClass)))
self.assertFalse(isinstance(A, OldstyleClass))
# This raises a recursion depth error, but is low-priority:
# self.assertTrue(isinstance(A, abc.ABCMeta))
def test_registration_basics(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
pass
b = B()
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
self.assertNotIsInstance(b, A)
self.assertNotIsInstance(b, (A,))
A.register(B)
self.assertTrue(issubclass(B, A))
self.assertTrue(issubclass(B, (A,)))
self.assertIsInstance(b, A)
self.assertIsInstance(b, (A,))
class C(B):
pass
c = C()
self.assertTrue(issubclass(C, A))
self.assertTrue(issubclass(C, (A,)))
self.assertIsInstance(c, A)
self.assertIsInstance(c, (A,))
def test_isinstance_invalidation(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
pass
b = B()
self.assertFalse(isinstance(b, A))
self.assertFalse(isinstance(b, (A,)))
A.register(B)
self.assertTrue(isinstance(b, A))
self.assertTrue(isinstance(b, (A,)))
def test_registration_builtins(self):
class A:
__metaclass__ = abc.ABCMeta
A.register(int)
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
self.assertTrue(issubclass(int, A))
self.assertTrue(issubclass(int, (A,)))
class B(A):
pass
B.register(basestring)
self.assertIsInstance("", A)
self.assertIsInstance("", (A,))
self.assertTrue(issubclass(str, A))
self.assertTrue(issubclass(str, (A,)))
def test_registration_edge_cases(self):
class A:
__metaclass__ = abc.ABCMeta
A.register(A) # should pass silently
class A1(A):
pass
self.assertRaises(RuntimeError, A1.register, A) # cycles not allowed
class B(object):
pass
A1.register(B) # ok
A1.register(B) # should pass silently
class C(A):
pass
A.register(C) # should pass silently
self.assertRaises(RuntimeError, C.register, A) # cycles not allowed
C.register(B) # ok
def test_register_non_class(self):
class A(object):
__metaclass__ = abc.ABCMeta
self.assertRaisesRegexp(TypeError, "Can only register classes",
A.register, 4)
def test_registration_transitiveness(self):
class A:
__metaclass__ = abc.ABCMeta
self.assertTrue(issubclass(A, A))
self.assertTrue(issubclass(A, (A,)))
class B:
__metaclass__ = abc.ABCMeta
self.assertFalse(issubclass(A, B))
self.assertFalse(issubclass(A, (B,)))
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
class C:
__metaclass__ = abc.ABCMeta
A.register(B)
class B1(B):
pass
self.assertTrue(issubclass(B1, A))
self.assertTrue(issubclass(B1, (A,)))
class C1(C):
pass
B1.register(C1)
self.assertFalse(issubclass(C, B))
self.assertFalse(issubclass(C, (B,)))
self.assertFalse(issubclass(C, B1))
self.assertFalse(issubclass(C, (B1,)))
self.assertTrue(issubclass(C1, A))
self.assertTrue(issubclass(C1, (A,)))
self.assertTrue(issubclass(C1, B))
self.assertTrue(issubclass(C1, (B,)))
self.assertTrue(issubclass(C1, B1))
self.assertTrue(issubclass(C1, (B1,)))
C1.register(int)
class MyInt(int):
pass
self.assertTrue(issubclass(MyInt, A))
self.assertTrue(issubclass(MyInt, (A,)))
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
def test_all_new_methods_are_called(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
counter = 0
def __new__(cls):
B.counter += 1
return super(B, cls).__new__(cls)
class C(A, B):
pass
self.assertEqual(B.counter, 0)
C()
self.assertEqual(B.counter, 1)
def test_cache_leak(self):
# See issue #2521.
class A(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def f(self):
pass
class C(A):
def f(self):
A.f(self)
r = weakref.ref(C)
# Trigger cache.
C().f()
del C
test_support.gc_collect()
self.assertEqual(r(), None)
def test_main():
test_support.run_unittest(TestABC)
if __name__ == "__main__":
unittest.main()
| mit |
mateor/pdroid | android-2.3.4_r1/tags/1.23/build/tools/merge-event-log-tags.py | 12 | 5397 | #!/usr/bin/env python
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: merge-event-log-tags.py [-o output_file] [input_files...]
Merge together zero or more event-logs-tags files to produce a single
output file, stripped of comments. Checks that no tag numbers conflict
and fails if they do.
-h to display this usage message and exit.
"""
import cStringIO
import getopt
import md5
import struct
import sys
import event_log_tags
errors = []
warnings = []
output_file = None
pre_merged_file = None
# Tags with a tag number of ? are assigned a tag in the range
# [ASSIGN_START, ASSIGN_LIMIT).
ASSIGN_START = 900000
ASSIGN_LIMIT = 1000000
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:m:")
except getopt.GetoptError, err:
print str(err)
print __doc__
sys.exit(2)
for o, a in opts:
if o == "-h":
print __doc__
sys.exit(2)
elif o == "-o":
output_file = a
elif o == "-m":
pre_merged_file = a
else:
print >> sys.stderr, "unhandled option %s" % (o,)
sys.exit(1)
# Restrictions on tags:
#
# Tag names must be unique. (If the tag number and description are
# also the same, a warning is issued instead of an error.)
#
# Explicit tag numbers must be unique. (If the tag name is also the
# same, no error is issued because the above rule will issue a
# warning or error.)
by_tagname = {}
by_tagnum = {}
pre_merged_tags = {}
if pre_merged_file:
for t in event_log_tags.TagFile(pre_merged_file).tags:
pre_merged_tags[t.tagname] = t
for fn in args:
tagfile = event_log_tags.TagFile(fn)
for t in tagfile.tags:
tagnum = t.tagnum
tagname = t.tagname
description = t.description
if t.tagname in by_tagname:
orig = by_tagname[t.tagname]
# Allow an explicit tag number to define an implicit tag number
if orig.tagnum is None:
orig.tagnum = t.tagnum
elif t.tagnum is None:
t.tagnum = orig.tagnum
if (t.tagnum == orig.tagnum and
t.description == orig.description):
# if the name and description are identical, issue a warning
# instead of failing (to make it easier to move tags between
# projects without breaking the build).
tagfile.AddWarning("tag \"%s\" (%s) duplicated in %s:%d" %
(t.tagname, t.tagnum, orig.filename, orig.linenum),
linenum=t.linenum)
else:
tagfile.AddError(
"tag name \"%s\" used by conflicting tag %s from %s:%d" %
(t.tagname, orig.tagnum, orig.filename, orig.linenum),
linenum=t.linenum)
continue
if t.tagnum is not None and t.tagnum in by_tagnum:
orig = by_tagnum[t.tagnum]
if t.tagname != orig.tagname:
tagfile.AddError(
"tag number %d used by conflicting tag \"%s\" from %s:%d" %
(t.tagnum, orig.tagname, orig.filename, orig.linenum),
linenum=t.linenum)
continue
by_tagname[t.tagname] = t
if t.tagnum is not None:
by_tagnum[t.tagnum] = t
errors.extend(tagfile.errors)
warnings.extend(tagfile.warnings)
if errors:
for fn, ln, msg in errors:
print >> sys.stderr, "%s:%d: error: %s" % (fn, ln, msg)
sys.exit(1)
if warnings:
for fn, ln, msg in warnings:
print >> sys.stderr, "%s:%d: warning: %s" % (fn, ln, msg)
# Python's hash function (a) isn't great and (b) varies between
# versions of python. Using md5 is overkill here but is the same from
# platform to platform and speed shouldn't matter in practice.
def hashname(str):
d = md5.md5(str).digest()[:4]
return struct.unpack("!I", d)[0]
# Assign a tag number to all the entries that say they want one
# assigned. We do this based on a hash of the tag name so that the
# numbers should stay relatively stable as tags are added.
# If we were provided pre-merged tags (w/ the -m option), then don't
# ever try to allocate one, just fail if we don't have a number
for name, t in sorted(by_tagname.iteritems()):
if t.tagnum is None:
if pre_merged_tags:
try:
t.tagnum = pre_merged_tags[t.tagname]
except KeyError:
print >> sys.stderr, ("Error: Tag number not defined for tag `%s'."
+" Have you done a full build?") % t.tagname
sys.exit(1)
else:
while True:
x = (hashname(name) % (ASSIGN_LIMIT - ASSIGN_START - 1)) + ASSIGN_START
if x not in by_tagnum:
t.tagnum = x
by_tagnum[x] = t
break
name = "_" + name
# by_tagnum should be complete now; we've assigned numbers to all tags.
buffer = cStringIO.StringIO()
for n, t in sorted(by_tagnum.iteritems()):
if t.description:
buffer.write("%d %s %s\n" % (t.tagnum, t.tagname, t.description))
else:
buffer.write("%d %s\n" % (t.tagnum, t.tagname))
event_log_tags.WriteOutput(output_file, buffer)
| gpl-3.0 |
ptemplier/ansible | lib/ansible/modules/cloud/ovirt/ovirt_affinity_label_facts.py | 71 | 5541 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_affinity_label_facts
short_description: Retrieve facts about one or more oVirt/RHV affinity labels
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV affinity labels."
notes:
- "This module creates a new top-level C(ovirt_affinity_labels) fact, which
contains a list of affinity labels."
options:
name:
description:
- "Name of the affinity labels which should be listed."
vm:
description:
- "Name of the VM, which affinity labels should be listed."
host:
description:
- "Name of the host, which affinity labels should be listed."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all affinity labels, which names start with C(label):
- ovirt_affinity_label_facts:
name: label*
- debug:
var: affinity_labels
# Gather facts about all affinity labels, which are assigned to VMs
# which names start with C(postgres):
- ovirt_affinity_label_facts:
vm: postgres*
- debug:
var: affinity_labels
# Gather facts about all affinity labels, which are assigned to hosts
# which names start with C(west):
- ovirt_affinity_label_facts:
host: west*
- debug:
var: affinity_labels
# Gather facts about all affinity labels, which are assigned to hosts
# which names start with C(west) or VMs which names start with C(postgres):
- ovirt_affinity_label_facts:
host: west*
vm: postgres*
- debug:
var: affinity_labels
'''
RETURN = '''
ovirt_affinity_labels:
description: "List of dictionaries describing the affinity labels. Affinity labels attribues are mapped to dictionary keys,
all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
name=dict(default=None),
host=dict(default=None),
vm=dict(default=None),
)
module = AnsibleModule(argument_spec)
if module._name == 'ovirt_affinity_labels_facts':
module.deprecate("The 'ovirt_affinity_labels_facts' module is being renamed 'ovirt_affinity_label_facts'", version=2.8)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
affinity_labels_service = connection.system_service().affinity_labels_service()
labels = []
all_labels = affinity_labels_service.list()
if module.params['name']:
labels.extend([
l for l in all_labels
if fnmatch.fnmatch(l.name, module.params['name'])
])
if module.params['host']:
hosts_service = connection.system_service().hosts_service()
labels.extend([
label
for label in all_labels
for host in connection.follow_link(label.hosts)
if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
])
if module.params['vm']:
vms_service = connection.system_service().vms_service()
labels.extend([
label
for label in all_labels
for vm in connection.follow_link(label.vms)
if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
])
if not (module.params['vm'] or module.params['host'] or module.params['name']):
labels = all_labels
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_affinity_labels=[
get_dict_of_struct(
struct=l,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for l in labels
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
tysonholub/twilio-python | tests/integration/api/v2010/account/sip/test_credential_list.py | 1 | 9645 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class CredentialListTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/CredentialLists.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"credential_lists": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 11 Sep 2013 17:51:38 -0000",
"date_updated": "Wed, 11 Sep 2013 17:51:38 -0000",
"friendly_name": "Low Rises",
"sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"subresource_uris": {
"credentials": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json"
},
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
],
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists.json?PageSize=50&Page=0",
"next_page_uri": null,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"credential_lists": [],
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists.json?PageSize=50&Page=0",
"next_page_uri": null,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists.create(friendly_name="friendly_name")
values = {'FriendlyName': "friendly_name", }
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/CredentialLists.json',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 11 Sep 2013 17:51:38 -0000",
"date_updated": "Wed, 11 Sep 2013 17:51:38 -0000",
"friendly_name": "Low Rises",
"sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"subresource_uris": {
"credentials": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json"
},
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists.create(friendly_name="friendly_name")
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/CredentialLists/CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 11 Sep 2013 17:51:38 -0000",
"date_updated": "Wed, 11 Sep 2013 17:51:38 -0000",
"friendly_name": "Low Rises",
"sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"subresource_uris": {
"credentials": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json"
},
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(friendly_name="friendly_name")
values = {'FriendlyName': "friendly_name", }
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/CredentialLists/CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
data=values,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 11 Sep 2013 17:51:38 -0000",
"date_updated": "Wed, 11 Sep 2013 17:51:38 -0000",
"friendly_name": "Low Rises",
"sid": "CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"subresource_uris": {
"credentials": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Credentials.json"
},
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP/CredentialLists/CLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(friendly_name="friendly_name")
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SIP/CredentialLists/CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sip \
.credential_lists(sid="CLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
| mit |
ndawe/root_numpy | root_numpy/_tree.py | 3 | 23233 | import warnings
from glob import glob
import numpy as np
from .extern.six import string_types
from . import _librootnumpy
__all__ = [
'root2array',
'root2rec',
'list_trees',
'list_branches',
'list_structures',
'list_directories',
'tree2array',
'tree2rec',
'array2tree',
'array2root',
]
def _glob(filenames):
"""Glob a filename or list of filenames but always return the original
string if the glob didn't match anything so URLs for remote file access
are not clobbered.
"""
if isinstance(filenames, string_types):
filenames = [filenames]
matches = []
for name in filenames:
matched_names = glob(name)
if not matched_names:
# use the original string
matches.append(name)
else:
matches.extend(matched_names)
return matches
def list_trees(filename):
"""Get list of the tree names in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
Returns
-------
trees : list
List of tree names
"""
return _librootnumpy.list_trees(filename)
def list_branches(filename, treename=None):
"""Get a list of the branch names of a tree in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
treename : str, optional (default=None)
Name of tree in the ROOT file.
(optional if the ROOT file has only one tree).
Returns
-------
branches : list
List of branch names
"""
return _librootnumpy.list_branches(filename, treename)
def list_directories(filename):
"""Get a list of the directories in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
Returns
-------
directories : list
List of directory names.
"""
return _librootnumpy.list_directories(filename)
def list_structures(filename, treename=None):
"""Get a dictionary mapping branch names to leaf structures.
.. warning:: ``list_structures`` is deprecated and will be removed in
release 5.0.0.
Parameters
----------
filename : str
Path to ROOT file.
treename : str, optional (default=None)
Name of tree in the ROOT file
(optional if the ROOT file has only one tree).
Returns
-------
structures : OrderedDict
An ordered dictionary mapping branch names to leaf structures.
"""
warnings.warn("list_structures is deprecated and will be "
"removed in 5.0.0.", DeprecationWarning)
return _librootnumpy.list_structures(filename, treename)
def root2array(filenames,
treename=None,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1,
warn_missing_tree=False):
"""Convert trees in ROOT files into a numpy structured array.
Refer to the documentation of :func:`tree2array`.
Parameters
----------
filenames : str or list
ROOT file name pattern or list of patterns. Wildcarding is supported by
Python globbing.
treename : str, optional (default=None)
Name of the tree to convert (optional if each file contains exactly one
tree).
branches : list of strings and tuples or a string or tuple, optional (default=None)
List of branches and expressions to include as columns of the array or
a single branch or expression in which case a nonstructured array is
returned. If None then include all branches that can be converted.
Branches or expressions that result in variable-length subarrays can be
truncated at a fixed length by using the tuple ``(branch_or_expression,
fill_value, length)`` or converted into a single value with
``(branch_or_expression, fill_value)`` where ``length==1`` is implied.
``fill_value`` is used when the original array is shorter than
``length``. This truncation is after any object selection performed
with the ``object_selection`` argument.
selection : str, optional (default=None)
Only include entries fulfilling this condition. If the condition
evaluates to multiple values per tree entry (e.g. conditions on array
branches) then an entry will be included if the condition evaluates to
true for at least one array element.
object_selection : dict, optional (default=None)
A dictionary mapping selection strings to branch names or lists of
branch names. Only array elements passing the selection strings will be
included in the output array per entry in the tree. The branches
specified must be variable-length array-type branches and the length of
the selection and branches it acts on must match for each tree entry.
For example ``object_selection={'a > 0': ['a', 'b']}`` will include all
elements of 'a' and corresponding elements of 'b' where 'a > 0' for
each tree entry. 'a' and 'b' must have the same length in every tree
entry.
start, stop, step: int, optional (default=None)
The meaning of the ``start``, ``stop`` and ``step`` parameters is the
same as for Python slices. If a range is supplied (by setting some of
the ``start``, ``stop`` or ``step`` parameters), only the entries in
that range and fulfilling the ``selection`` condition (if defined) are
used.
include_weight : bool, optional (default=False)
Include a column containing the tree weight ``TTree::GetWeight()``.
Note that this will be the same value for all entries unless the tree
is actually a TChain containing multiple trees with different weights.
weight_name : str, optional (default='weight')
The field name for the weight column if ``include_weight=True``.
cache_size : int, optional (default=-1)
Set the size (in bytes) of the TTreeCache used while reading a TTree. A
value of -1 uses ROOT's default cache size. A value of 0 disables the
cache.
warn_missing_tree : bool, optional (default=False)
If True, then warn when a tree is missing from an input file instead of
raising an IOError.
Notes
-----
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
array2tree
array2root
"""
filenames = _glob(filenames)
if not filenames:
raise ValueError("specify at least one filename")
if treename is None:
trees = list_trees(filenames[0])
if len(trees) > 1:
raise ValueError(
"treename must be specified if the file "
"contains more than one tree")
elif not trees:
raise IOError(
"no trees present in {0}".format(filenames[0]))
treename = trees[0]
if isinstance(branches, string_types):
# single branch selected
flatten = branches
branches = [branches]
elif isinstance(branches, tuple):
if len(branches) not in (2, 3):
raise ValueError(
"invalid branch tuple: {0}. "
"A branch tuple must contain two elements "
"(branch_name, fill_value) or three elements "
"(branch_name, fill_value, length) "
"to yield a single value or truncate, respectively".format(branches))
flatten = branches[0]
branches = [branches]
else:
flatten = False
arr = _librootnumpy.root2array_fromfile(
filenames, treename, branches,
selection, object_selection,
start, stop, step,
include_weight,
weight_name,
cache_size,
warn_missing_tree)
if flatten:
# select single column
return arr[flatten]
return arr
def root2rec(filenames,
treename=None,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1,
warn_missing_tree=False): # pragma: no cover
"""View the result of :func:`root2array` as a record array.
.. warning:: ``root2rec`` is deprecated and will be removed in
release 5.0.0. Instead use ``root2array(...).view(np.recarray)``.
Notes
-----
* This is equivalent to::
root2array(filenames, treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
root2array
"""
warnings.warn("root2rec is deprecated and will be removed in 5.0.0. "
"Instead use root2array(...).view(np.recarray)",
DeprecationWarning)
return root2array(filenames, treename,
branches, selection, object_selection,
start, stop, step,
include_weight,
weight_name,
cache_size,
warn_missing_tree).view(np.recarray)
def tree2array(tree,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1):
"""Convert a tree into a numpy structured array.
Convert branches of strings and basic types such as bool, int, float,
double, etc. as well as variable-length and fixed-length multidimensional
arrays and 1D or 2D vectors of basic types and strings. ``tree2array`` can
also create columns in the output array that are expressions involving the
TTree branches (i.e. ``'vect.Pt() / 1000'``) similar to ``TTree::Draw()``.
See the notes below for important details.
Parameters
----------
tree : ROOT TTree instance
The ROOT TTree to convert into an array.
branches : list of strings and tuples or a string or tuple, optional (default=None)
List of branches and expressions to include as columns of the array or
a single branch or expression in which case a nonstructured array is
returned. If None then include all branches that can be converted.
Branches or expressions that result in variable-length subarrays can be
truncated at a fixed length by using the tuple ``(branch_or_expression,
fill_value, length)`` or converted into a single value with
``(branch_or_expression, fill_value)`` where ``length==1`` is implied.
``fill_value`` is used when the original array is shorter than
``length``. This truncation is after any object selection performed
with the ``object_selection`` argument.
selection : str, optional (default=None)
Only include entries fulfilling this condition. If the condition
evaluates to multiple values per tree entry (e.g. conditions on array
branches) then an entry will be included if the condition evaluates to
true for at least one array element.
object_selection : dict, optional (default=None)
A dictionary mapping selection strings to branch names or lists of
branch names. Only array elements passing the selection strings will be
included in the output array per entry in the tree. The branches
specified must be variable-length array-type branches and the length of
the selection and branches it acts on must match for each tree entry.
For example ``object_selection={'a > 0': ['a', 'b']}`` will include all
elements of 'a' and corresponding elements of 'b' where 'a > 0' for
each tree entry. 'a' and 'b' must have the same length in every tree
entry.
start, stop, step: int, optional (default=None)
The meaning of the ``start``, ``stop`` and ``step`` parameters is the
same as for Python slices. If a range is supplied (by setting some of
the ``start``, ``stop`` or ``step`` parameters), only the entries in
that range and fulfilling the ``selection`` condition (if defined) are
used.
include_weight : bool, optional (default=False)
Include a column containing the tree weight ``TTree::GetWeight()``.
Note that this will be the same value for all entries unless the tree
is actually a TChain containing multiple trees with different weights.
weight_name : str, optional (default='weight')
The field name for the weight column if ``include_weight=True``.
cache_size : int, optional (default=-1)
Set the size (in bytes) of the TTreeCache used while reading a TTree. A
value of -1 uses ROOT's default cache size. A value of 0 disables the
cache.
Notes
-----
Types are converted according to the following table:
.. _conversion_table:
======================== ===============================
ROOT NumPy
======================== ===============================
``Bool_t`` ``np.bool``
``Char_t`` ``np.int8``
``UChar_t`` ``np.uint8``
``Short_t`` ``np.int16``
``UShort_t`` ``np.uint16``
``Int_t`` ``np.int32``
``UInt_t`` ``np.uint32``
``Float_t`` ``np.float32``
``Double_t`` ``np.float64``
``Long64_t`` ``np.int64``
``ULong64_t`` ``np.uint64``
``<type>[2][3]...`` ``(<nptype>, (2, 3, ...))``
``<type>[nx][2]...`` ``np.object``
``string`` ``np.object``
``vector<t>`` ``np.object``
``vector<vector<t> >`` ``np.object``
======================== ===============================
* Variable-length arrays (such as ``x[nx][2]``) and vectors (such as
``vector<int>``) are converted to NumPy arrays of the corresponding
types.
* Fixed-length arrays are converted to fixed-length NumPy array fields.
**Branches with different lengths:**
Note that when converting trees that have branches of different lengths
into numpy arrays, the shorter branches will be extended to match the
length of the longest branch by repeating their last values. If all
requested branches are shorter than the longest branch in the tree, this
will result in a "read failure" since beyond the end of the longest
requested branch no additional bytes will be read from the file and
root_numpy is unable to distinguish this from other ROOT errors that result
in no bytes being read. In this case, explicitly set the ``stop`` argument
to the length of the longest requested branch.
See Also
--------
root2array
array2root
array2tree
"""
import ROOT
if not isinstance(tree, ROOT.TTree):
raise TypeError("tree must be a ROOT.TTree")
cobj = ROOT.AsCObject(tree)
if isinstance(branches, string_types):
# single branch selected
flatten = branches
branches = [branches]
elif isinstance(branches, tuple):
if len(branches) not in (2, 3):
raise ValueError(
"invalid branch tuple: {0}. "
"A branch tuple must contain two elements "
"(branch_name, fill_value) or three elements "
"(branch_name, fill_value, length) "
"to yield a single value or truncate, respectively".format(branches))
flatten = branches[0]
branches = [branches]
else:
flatten = False
arr = _librootnumpy.root2array_fromtree(
cobj, branches, selection, object_selection,
start, stop, step,
include_weight,
weight_name,
cache_size)
if flatten:
# select single column
return arr[flatten]
return arr
def tree2rec(tree,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1): # pragma: no cover
"""View the result of :func:`tree2array` as a record array.
.. warning:: ``tree2rec`` is deprecated and will be removed in
release 5.0.0. Instead use ``tree2array(...).view(np.recarray)``.
Notes
-----
* This is equivalent to::
tree2array(treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
"""
warnings.warn("tree2rec is deprecated and will be removed in 5.0.0. "
"Instead use tree2array(...).view(np.recarray)",
DeprecationWarning)
return tree2array(tree,
branches=branches,
selection=selection,
object_selection=object_selection,
start=start,
stop=stop,
step=step,
include_weight=include_weight,
weight_name=weight_name,
cache_size=cache_size).view(np.recarray)
def array2tree(arr, name='tree', tree=None):
"""Convert a numpy structured array into a ROOT TTree.
Fields of basic types, strings, and fixed-size subarrays of basic types are
supported. ``np.object`` and ``np.float16`` are currently not supported.
Parameters
----------
arr : array
A numpy structured array
name : str (optional, default='tree')
Name of the created ROOT TTree if ``tree`` is None.
tree : ROOT TTree (optional, default=None)
An existing ROOT TTree to be extended by the numpy array. Any branch
with the same name as a field in the numpy array will be extended as
long as the types are compatible, otherwise a TypeError is raised. New
branches will be created and filled for all new fields.
Returns
-------
root_tree : a ROOT TTree
Notes
-----
When using the ``tree`` argument to extend and/or add new branches to an
existing tree, note that it is possible to create branches of different
lengths. This will result in a warning from ROOT when root_numpy calls the
tree's ``SetEntries()`` method. Beyond that, the tree should still be
usable. While it might not be generally recommended to create branches with
differing lengths, this behaviour could be required in certain situations.
root_numpy makes no attempt to prevent such behaviour as this would be more
strict than ROOT itself. Also see the note about converting trees that have
branches of different lengths into numpy arrays in the documentation of
:func:`tree2array`.
See Also
--------
array2root
root2array
tree2array
Examples
--------
Convert a numpy array into a tree:
>>> from root_numpy import array2tree
>>> import numpy as np
>>>
>>> a = np.array([(1, 2.5, 3.4),
... (4, 5, 6.8)],
... dtype=[('a', np.int32),
... ('b', np.float32),
... ('c', np.float64)])
>>> tree = array2tree(a)
>>> tree.Scan()
************************************************
* Row * a * b * c *
************************************************
* 0 * 1 * 2.5 * 3.4 *
* 1 * 4 * 5 * 6.8 *
************************************************
Add new branches to an existing tree (continuing from the example above):
>>> b = np.array([(4, 10),
... (3, 5)],
... dtype=[('d', np.int32),
... ('e', np.int32)])
>>> array2tree(b, tree=tree)
<ROOT.TTree object ("tree") at 0x1449970>
>>> tree.Scan()
************************************************************************
* Row * a * b * c * d * e *
************************************************************************
* 0 * 1 * 2.5 * 3.4 * 4 * 10 *
* 1 * 4 * 5 * 6.8 * 3 * 5 *
************************************************************************
"""
import ROOT
if tree is not None:
if not isinstance(tree, ROOT.TTree):
raise TypeError("tree must be a ROOT.TTree")
incobj = ROOT.AsCObject(tree)
else:
incobj = None
cobj = _librootnumpy.array2tree_toCObj(arr, name=name, tree=incobj)
return ROOT.BindObject(cobj, 'TTree')
def array2root(arr, filename, treename='tree', mode='update'):
"""Convert a numpy array into a ROOT TTree and save it in a ROOT TFile.
Fields of basic types, strings, and fixed-size subarrays of basic types are
supported. ``np.object`` and ``np.float16`` are currently not supported.
Parameters
----------
arr : array
A numpy structured array
filename : str
Name of the output ROOT TFile. A new file will be created if it doesn't
already exist.
treename : str (optional, default='tree')
Name of the ROOT TTree that will be created. If a TTree with the same
name already exists in the TFile, it will be extended as documented in
:func:`array2tree`.
mode : str (optional, default='update')
Mode used to open the ROOT TFile ('update' or 'recreate').
See Also
--------
array2tree
tree2array
root2array
Examples
--------
>>> from root_numpy import array2root, root2array
>>> import numpy as np
>>>
>>> a = np.array([(1, 2.5, 3.4),
... (4, 5, 6.8)],
... dtype=[('a', np.int32),
... ('b', np.float32),
... ('c', np.float64)])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([(1, 2.5, 3.4), (4, 5.0, 6.8)],
dtype=[('a', '<i4'), ('b', '<f4'), ('c', '<f8')])
>>>
>>> a = np.array(['', 'a', 'ab', 'abc', 'xyz', ''],
... dtype=[('string', 'S3')])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([('',), ('a',), ('ab',), ('abc',), ('xyz',), ('',)],
dtype=[('string', 'S3')])
>>>
>>> a = np.array([([1, 2, 3],),
... ([4, 5, 6],)],
... dtype=[('array', np.int32, (3,))])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([([1, 2, 3],), ([4, 5, 6],)],
dtype=[('array', '<i4', (3,))])
"""
_librootnumpy.array2root(arr, filename, treename, mode)
| bsd-3-clause |
AOSP-S4-KK/platform_external_chromium_org | chrome/common/extensions/docs/server2/schema_util_test.py | 36 | 4110 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from copy import deepcopy
from schema_util import RemoveNoDocs, DetectInlineableTypes, InlineDocs
class SchemaUtilTest(unittest.TestCase):
def testRemoveNoDocs(self):
expected_nodoc = [
{
'name': 'B',
'list': [
{
'name': 'B2'
}
]
},
{
'name': 'D',
'nodoc': False
},
{
'name': 'E',
'items1': [
{
'name': 'E1',
'items': [
{
'name': 'E1.3'
}
]
},
{
'name': 'E2'
}
]
}
]
nodoc_data = [
{
'name': 'A',
'nodoc': True
},
{
'name': 'B',
'list': [
{
'name': 'B1',
'nodoc': True
},
{
'name': 'B2'
},
{
'name': 'B3',
'nodoc': True
}
]
},
{
'name': 'C',
'nodoc': True
},
{
'name': 'D',
'nodoc': False
},
{
'name': 'E',
'dict': {
'name': 'Ed',
'nodoc': True
},
'items1': [
{
'name': 'E1',
'items': [
{
'name': 'E1.1',
'nodoc': True
},
{
'name': 'E1.2',
'nodoc': True
},
{
'name': 'E1.3'
}
]
},
{
'name': 'E2'
},
{
'name': 'E3',
'nodoc': True
}
]
}
]
RemoveNoDocs(nodoc_data)
self.assertEquals(expected_nodoc, nodoc_data)
def testInlineDocs(self):
schema = {
'namespace': 'storage',
'properties': {
'key2': {
'description': 'second key',
'$ref': 'Key'
},
'key1': {
'description': 'first key',
'$ref': 'Key'
}
},
'types': [
{
'inline_doc': True,
'type': 'string',
'id': 'Key', # Should be inlined into both properties and be removed
# from types.
'description': 'This is a key.', # This description should disappear.
'marker': True # This should appear three times in the output.
},
{
'items': {
'$ref': 'Key'
},
'type': 'array',
'id': 'KeyList',
'description': 'A list of keys'
}
]
}
expected_schema = {
'namespace': 'storage',
'properties': {
'key2': {
'marker': True,
'type': 'string',
'description': 'second key'
},
'key1': {
'marker': True,
'type': 'string',
'description': 'first key'
}
},
'types': [
{
'items': {
'marker': True,
'type': 'string'
},
'type': 'array',
'id': 'KeyList',
'description': 'A list of keys'
}
]
}
inlined_schema = deepcopy(schema)
InlineDocs(inlined_schema)
self.assertEqual(expected_schema, inlined_schema)
def testDetectInline(self):
schema = {
'types': [
{
'id': 'Key',
'items': {
'$ref': 'Value'
}
},
{
'id': 'Value',
'marker': True
}
]
}
expected_schema = {
'types': [
{
'id': 'Key',
'items': {
'marker': True,
}
}
]
}
DetectInlineableTypes(schema)
InlineDocs(schema)
self.assertEqual(expected_schema, schema)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
pizzathief/numpy | numpy/core/tests/test_ufunc.py | 4 | 81118 | from __future__ import division, absolute_import, print_function
import warnings
import itertools
import pytest
import numpy as np
import numpy.core._umath_tests as umt
import numpy.linalg._umath_linalg as uml
import numpy.core._operand_flag_tests as opflag_tests
import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
assert_allclose,
)
from numpy.compat import pickle
class TestUfuncKwargs(object):
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
assert_raises(TypeError, np.add, 1, 2, subokx=False)
assert_raises(TypeError, np.add, 1, 2, wherex=[True])
def test_sig_signature(self):
assert_raises(ValueError, np.add, 1, 2, sig='ii->i',
signature='ii->i')
def test_sig_dtype(self):
assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i',
dtype=int)
assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i',
dtype=int)
def test_extobj_refcount(self):
# Should not segfault with USE_DEBUG.
assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
class TestUfuncGenericLoops(object):
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
"""
np_dtypes = [
(np.single, np.single), (np.single, np.double),
(np.csingle, np.csingle), (np.csingle, np.cdouble),
(np.double, np.double), (np.longdouble, np.longdouble),
(np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)]
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
def f2(x, y):
return x**y
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs, xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
# class to use in testing object method loops
class foo(object):
def conjugate(self):
return np.bool_(1)
def logical_xor(self, obj):
return np.bool_(1)
def test_unary_PyUFunc_O_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.abs(x) == 1))
def test_unary_PyUFunc_O_O_method(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.conjugate(x) == True))
def test_binary_PyUFunc_OO_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.add(x, x) == 2))
def test_binary_PyUFunc_OO_O_method(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_binary_PyUFunc_On_Om_method(self, foo=foo):
x = np.full((10, 2, 3), foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
class TestUfunc(object):
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(pickle.dumps(np.sin,
protocol=proto)) is np.sin)
# Check that ufunc not defined in the top level numpy namespace
# such as numpy.core._rational_tests.test_add can also be pickled
res = pickle.loads(pickle.dumps(_rational_tests.test_add,
protocol=proto))
assert_(res is _rational_tests.test_add)
def test_pickle_withstring(self):
astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
def test_reduceat_shifting_sum(self):
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_all_ufunc(self):
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
n 1 absolute nums + O complex -> real
n 1 negative nums + O
n 1 sign nums + O -> int
n 1 invert bool + ints + O flts raise an error
n 1 degrees real + M cmplx raise an error
n 1 radians real + M cmplx raise an error
n 1 arccos flts + M
n 1 arccosh flts + M
n 1 arcsin flts + M
n 1 arcsinh flts + M
n 1 arctan flts + M
n 1 arctanh flts + M
n 1 cos flts + M
n 1 sin flts + M
n 1 tan flts + M
n 1 cosh flts + M
n 1 sinh flts + M
n 1 tanh flts + M
n 1 exp flts + M
n 1 expm1 flts + M
n 1 log flts + M
n 1 log10 flts + M
n 1 log1p flts + M
n 1 sqrt flts + M real x < 0 raises error
n 1 ceil real + M
n 1 trunc real + M
n 1 floor real + M
n 1 fabs real + M
n 1 rint flts + M
n 1 isnan flts -> bool
n 1 isinf flts -> bool
n 1 isfinite flts -> bool
n 1 signbit real -> bool
n 1 modf real -> (frac, int)
n 1 logical_not bool + nums + M -> bool
n 2 left_shift ints + O flts raise an error
n 2 right_shift ints + O flts raise an error
n 2 add bool + nums + O boolean + is ||
n 2 subtract bool + nums + O boolean - is ^
n 2 multiply bool + nums + O boolean * is &
n 2 divide nums + O
n 2 floor_divide nums + O
n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
n 2 fmod nums + M
n 2 power nums + O
n 2 greater bool + nums + O -> bool
n 2 greater_equal bool + nums + O -> bool
n 2 less bool + nums + O -> bool
n 2 less_equal bool + nums + O -> bool
n 2 equal bool + nums + O -> bool
n 2 not_equal bool + nums + O -> bool
n 2 logical_and bool + nums + M -> bool
n 2 logical_or bool + nums + M -> bool
n 2 logical_xor bool + nums + M -> bool
n 2 maximum bool + nums + O
n 2 minimum bool + nums + O
n 2 bitwise_and bool + ints + O flts raise an error
n 2 bitwise_or bool + ints + O flts raise an error
n 2 bitwise_xor bool + ints + O flts raise an error
n 2 arctan2 real + M
n 2 remainder ints + real + O
n 2 hypot real + M
===== ==== ============= =============== ========================
Types other than those listed will be accepted, but they are cast to
the smallest compatible type for which the function is defined. The
casting rules are:
bool -> int8 -> float32
ints -> double
"""
pass
# from include/numpy/ufuncobject.h
size_inferred = 2
can_ignore = 4
def test_signature0(self):
# the arguments to test_signature are: nin, nout, core_signature
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i),(i)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 1, 0))
assert_equal(ixs, (0, 0))
assert_equal(flags, (self.size_inferred,))
assert_equal(sizes, (-1,))
def test_signature1(self):
# empty core signature; treat as plain ufunc (with trivial core)
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(),()->()")
assert_equal(enabled, 0)
assert_equal(num_dims, (0, 0, 0))
assert_equal(ixs, ())
assert_equal(flags, ())
assert_equal(sizes, ())
def test_signature2(self):
# more complicated names for variables
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i1,i2),(J_1)->(_kAB)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 1))
assert_equal(ixs, (0, 1, 2, 3))
assert_equal(flags, (self.size_inferred,)*4)
assert_equal(sizes, (-1, -1, -1, -1))
def test_signature3(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, u"(i1, i12), (J_1)->(i12, i2)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 2))
assert_equal(ixs, (0, 1, 2, 1, 3))
assert_equal(flags, (self.size_inferred,)*4)
assert_equal(sizes, (-1, -1, -1, -1))
def test_signature4(self):
# matrix_multiply signature from _umath_tests
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(n,k),(k,m)->(n,m)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 2, 2))
assert_equal(ixs, (0, 1, 1, 2, 0, 2))
assert_equal(flags, (self.size_inferred,)*3)
assert_equal(sizes, (-1, -1, -1))
def test_signature5(self):
# matmul signature from _umath_tests
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(n?,k),(k,m?)->(n?,m?)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 2, 2))
assert_equal(ixs, (0, 1, 1, 2, 0, 2))
assert_equal(flags, (self.size_inferred | self.can_ignore,
self.size_inferred,
self.size_inferred | self.can_ignore))
assert_equal(sizes, (-1, -1, -1))
def test_signature6(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
1, 1, "(3)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 0))
assert_equal(ixs, (0,))
assert_equal(flags, (0,))
assert_equal(sizes, (3,))
def test_signature7(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "(3),(03,3),(n)->(9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (0, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature8(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "(3?),(3?,3?),(n)->(9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature_failure_extra_parenthesis(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "((i)),(i)->()")
def test_signature_failure_mismatching_parenthesis(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "(i),)i(->()")
def test_signature_failure_signature_missing_input_arg(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "(i),->()")
def test_signature_failure_signature_missing_output_arg(self):
with assert_raises(ValueError):
umt.test_signature(2, 2, "(i),(i)->()")
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_true_divide(self):
a = np.array(10)
b = np.array(20)
tgt = np.array(0.5)
for tc in 'bhilqBHILQefdgFDG':
dt = np.dtype(tc)
aa = a.astype(dt)
bb = b.astype(dt)
# Check result value and dtype.
for x, y in itertools.product([aa, -aa], [bb, -bb]):
# Check with no output type specified
if tc in 'FDG':
tgt = complex(x)/complex(y)
else:
tgt = float(x)/float(y)
res = np.true_divide(x, y)
rtol = max(np.finfo(res).resolution, 1e-15)
assert_allclose(res, tgt, rtol=rtol)
if tc in 'bhilqBHILQ':
assert_(res.dtype.name == 'float64')
else:
assert_(res.dtype.name == dt.name )
# Check with output type specified. This also checks for the
# incorrect casts in issue gh-3484 because the unary '-' does
# not change types, even for unsigned types, Hence casts in the
# ufunc from signed to unsigned and vice versa will lead to
# errors in the values.
for tcout in 'bhilqBHILQ':
dtout = np.dtype(tcout)
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
for tcout in 'efdg':
dtout = np.dtype(tcout)
if tc in 'FDG':
# Casting complex to float is not allowed
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
else:
tgt = float(x)/float(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
atol = max(np.finfo(dtout).tiny, 3e-308)
# Some test values result in invalid for float16.
with np.errstate(invalid='ignore'):
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res) and tcout == 'e':
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
for tcout in 'FDG':
dtout = np.dtype(tcout)
tgt = complex(x)/complex(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
atol = max(np.finfo(dtout).tiny, 3e-308)
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res):
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
# Check booleans
a = np.ones((), dtype=np.bool_)
res = np.true_divide(a, a)
assert_(res == 1.0)
assert_(res.dtype.name == 'float64')
res = np.true_divide(~a, a)
assert_(res == 0.0)
assert_(res.dtype.name == 'float64')
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
a = np.ones(500, dtype=np.float64)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
# warning if sum overflows, which it does in float16
overflow = not np.isfinite(tgt)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_almost_equal(np.sum(d), tgt)
assert_equal(len(w), 1 * overflow)
assert_almost_equal(np.sum(d[::-1]), tgt)
assert_equal(len(w), 2 * overflow)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
assert_almost_equal(np.sum(d[1::2]), 250.)
assert_almost_equal(np.sum(d[::3]), 167.)
assert_almost_equal(np.sum(d[1::3]), 167.)
assert_almost_equal(np.sum(d[::-2]), 250.)
assert_almost_equal(np.sum(d[-1::-2]), 250.)
assert_almost_equal(np.sum(d[::-3]), 167.)
assert_almost_equal(np.sum(d[-1::-3]), 167.)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.)
def test_sum_complex(self):
for dt in (np.complex64, np.complex128, np.clongdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt) + 1j
assert_almost_equal(np.sum(d[::2]), 250. + 250j)
assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
assert_almost_equal(np.sum(d[::3]), 167. + 167j)
assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_almost_equal(d, 2. + 2j)
def test_sum_initial(self):
# Integer, single axis
assert_equal(np.sum([3], initial=2), 5)
# Floating point
assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
# Multiple non-adjacent axes
assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
[12, 12, 12])
def test_sum_where(self):
# More extensive tests done in test_reduction_with_where.
assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.)
assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5.,
where=[True, False]), [9., 5.])
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
a = np.arange(6)
assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
def test_broadcast(self):
msg = "broadcast"
a = np.arange(4).reshape((2, 1, 2))
b = np.arange(4).reshape((1, 2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
# Broadcast in core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
assert_raises(ValueError, umt.inner1d, a, b)
# Extend core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.array(7)
assert_raises(ValueError, umt.inner1d, a, b)
# Broadcast should fail
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
assert_raises(ValueError, umt.inner1d, a, b)
# Writing to a broadcasted array with overlap should warn, gh-2705
a = np.arange(2)
b = np.arange(4).reshape((2, 2))
u, v = np.broadcast_arrays(a, b)
assert_equal(u.strides[0], 0)
x = u + v
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
u += v
assert_equal(len(w), 1)
assert_(x[0,0] != u[0, 0])
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "type cast on one argument"
a = np.arange(6).reshape((2, 3))
b = a + 0.1
assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1),
err_msg=msg)
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def test_incontiguous_array(self):
msg = "incontiguous memory layout of array"
x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
a = x[:, 0,:, 0,:, 0]
b = x[:, 1,:, 1,:, 1]
a[0, 0, 0] = -1
msg2 = "make sure it references to the original array"
assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
x = np.arange(24).reshape(2, 3, 4)
a = x.T
b = x.T
a[0, 0, 0] = -1
assert_equal(x[0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
def test_output_argument(self):
msg = "output argument"
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(4).reshape((2, 1, 2)) + 1
c = np.zeros((2, 3), dtype='int')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with type cast"
c = np.zeros((2, 3), dtype='int16')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with incontiguous layout"
c = np.zeros((2, 3, 4), dtype='int16')
umt.inner1d(a, b, c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
def test_axes_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
# basic tests on inputs (outputs tested below with matrix_multiply).
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
# default
c = inner1d(a, b, axes=[(-1,), (-1,), ()])
assert_array_equal(c, (a * b).sum(-1))
# integers ok for single axis.
c = inner1d(a, b, axes=[-1, -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# mix fine
c = inner1d(a, b, axes=[(-1,), -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# can omit last axis.
c = inner1d(a, b, axes=[-1, -1])
assert_array_equal(c, (a * b).sum(-1))
# can pass in other types of integer (with __index__ protocol)
c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
assert_array_equal(c, (a * b).sum(-1))
# swap some axes
c = inner1d(a, b, axes=[0, 0])
assert_array_equal(c, (a * b).sum(0))
c = inner1d(a, b, axes=[0, 2])
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
# Check errors for improperly constructed axes arguments.
# should have list.
assert_raises(TypeError, inner1d, a, b, axes=-1)
# needs enough elements
assert_raises(ValueError, inner1d, a, b, axes=[-1])
# should pass in indices.
assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
# cannot pass an index unless there is only one dimension
# (output is wrong in this case)
assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
# or pass in generally the wrong number of axes
assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
# axes need to have same length.
assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
# matrix_multiply signature: '(m,n),(n,p)->(m,p)'
mm = umt.matrix_multiply
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(8).reshape((2, 2, 2, 1)) + 1
# Sanity check.
c = mm(a, b)
assert_array_equal(c, np.matmul(a, b))
# Default axes.
c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)])
assert_array_equal(c, np.matmul(a, b))
# Default with explicit axes.
c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)])
assert_array_equal(c, np.matmul(a, b))
# swap some axes.
c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)])
assert_array_equal(c, np.matmul(a.transpose(1, 0, 2),
b.transpose(0, 3, 1, 2)))
# Default with output array.
c = np.empty((2, 2, 3, 1))
d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b))
# Transposed output array
c = np.empty((1, 2, 2, 3))
d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
# Check errors for improperly constructed axes arguments.
# wrong argument
assert_raises(TypeError, mm, a, b, axis=1)
# axes should be list
assert_raises(TypeError, mm, a, b, axes=1)
assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1)))
# list needs to have right length
assert_raises(ValueError, mm, a, b, axes=[])
assert_raises(ValueError, mm, a, b, axes=[(-2, -1)])
# list should contain tuples for multiple axes
assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1])
assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1])
assert_raises(TypeError,
mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]])
assert_raises(TypeError,
mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]])
assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None])
# tuples should not have duplicated values
assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)])
# arrays should have enough axes.
z = np.zeros((2, 2))
assert_raises(ValueError, mm, z, z[0])
assert_raises(ValueError, mm, z, z, out=z[:, 0])
assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
# Regular ufuncs should not accept axes.
assert_raises(TypeError, np.add, 1., 1., axes=[0])
# should be able to deal with bad unrelated kwargs.
assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
def test_axis_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, axis=-1)
assert_array_equal(c, (a * b).sum(-1))
out = np.zeros_like(c)
d = inner1d(a, b, axis=-1, out=out)
assert_(d is out)
assert_array_equal(d, c)
c = inner1d(a, b, axis=0)
assert_array_equal(c, (a * b).sum(0))
# Sanity checks on innerwt and cumsum.
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w, axis=0),
np.sum(a * b * w, axis=0))
assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
out = np.empty_like(a)
b = umt.cumsum(a, out=out, axis=0)
assert_(out is b)
assert_array_equal(b, np.cumsum(a, axis=0))
b = umt.cumsum(a, out=out, axis=1)
assert_(out is b)
assert_array_equal(b, np.cumsum(a, axis=-1))
# Check errors.
# Cannot pass in both axis and axes.
assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0])
# Not an integer.
assert_raises(TypeError, inner1d, a, b, axis=[0])
# more than 1 core dimensions.
mm = umt.matrix_multiply
assert_raises(TypeError, mm, a, b, axis=1)
# Output wrong size in axis.
out = np.empty((1, 2, 3), dtype=a.dtype)
assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
# Regular ufuncs should not accept axis.
assert_raises(TypeError, np.add, 1., 1., axis=0)
def test_keepdims_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, keepdims=False)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
out = np.zeros_like(c)
d = inner1d(a, b, keepdims=True, out=out)
assert_(d is out)
assert_array_equal(d, c)
# Now combined with axis and axes.
c = inner1d(a, b, axis=-1, keepdims=False)
assert_array_equal(c, (a * b).sum(-1, keepdims=False))
c = inner1d(a, b, axis=-1, keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
c = inner1d(a, b, axis=0, keepdims=False)
assert_array_equal(c, (a * b).sum(0, keepdims=False))
c = inner1d(a, b, axis=0, keepdims=True)
assert_array_equal(c, (a * b).sum(0, keepdims=True))
c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
c = inner1d(a, b, axes=[0, 0], keepdims=False)
assert_array_equal(c, (a * b).sum(0))
c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
assert_array_equal(c, (a * b).sum(0, keepdims=True))
c = inner1d(a, b, axes=[0, 2], keepdims=False)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
c = inner1d(a, b, axes=[0, 2], keepdims=True)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
keepdims=True))
c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
keepdims=True))
c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
# Hardly useful, but should work.
c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
.sum(1, keepdims=True))
# Check with two core dimensions.
a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
expected = uml.det(a)
c = uml.det(a, keepdims=False)
assert_array_equal(c, expected)
c = uml.det(a, keepdims=True)
assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
expected_s, expected_l = uml.slogdet(a)
cs, cl = uml.slogdet(a, keepdims=False)
assert_array_equal(cs, expected_s)
assert_array_equal(cl, expected_l)
cs, cl = uml.slogdet(a, keepdims=True)
assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
# Sanity check on innerwt.
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
np.sum(a * b * w, axis=-1, keepdims=True))
assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
np.sum(a * b * w, axis=0, keepdims=True))
# Check errors.
# Not a boolean
assert_raises(TypeError, inner1d, a, b, keepdims='true')
# More than 1 core dimension, and core output dimensions.
mm = umt.matrix_multiply
assert_raises(TypeError, mm, a, b, keepdims=True)
assert_raises(TypeError, mm, a, b, keepdims=False)
# Regular ufuncs should not accept keepdims.
assert_raises(TypeError, np.add, 1., 1., keepdims=False)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
a = np.arange(100, 124).reshape((2, 3, 4))
b = np.arange(200, 224).reshape((2, 3, 4))
w = np.arange(300, 324).reshape((2, 3, 4))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_innerwt_empty(self):
"""Test generalized ufunc with zero-sized operands"""
a = np.array([], dtype='f8')
b = np.array([], dtype='f8')
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_cross1d(self):
"""Test with fixed-sized signature."""
a = np.eye(3)
assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
out = np.zeros((3, 3))
result = umt.cross1d(a[0], a, out)
assert_(result is out)
assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
def test_can_ignore_signature(self):
# Comparing the effects of ? in signature:
# matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
# matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
mat = np.arange(12).reshape((2, 3, 2))
single_vec = np.arange(2)
col_vec = single_vec[:, np.newaxis]
col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
# matrix @ single column vector with proper dimension
mm_col_vec = umt.matrix_multiply(mat, col_vec)
# matmul does the same thing
matmul_col_vec = umt.matmul(mat, col_vec)
assert_array_equal(matmul_col_vec, mm_col_vec)
# matrix @ vector without dimension making it a column vector.
# matrix multiply fails -> missing core dim.
assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
# matmul mimicker passes, and returns a vector.
matmul_col = umt.matmul(mat, single_vec)
assert_array_equal(matmul_col, mm_col_vec.squeeze())
# Now with a column array: same as for column vector,
# broadcasting sensibly.
mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
matmul_col_vec = umt.matmul(mat, col_vec_array)
assert_array_equal(matmul_col_vec, mm_col_vec)
# As above, but for row vector
single_vec = np.arange(3)
row_vec = single_vec[np.newaxis, :]
row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
# row vector @ matrix
mm_row_vec = umt.matrix_multiply(row_vec, mat)
matmul_row_vec = umt.matmul(row_vec, mat)
assert_array_equal(matmul_row_vec, mm_row_vec)
# single row vector @ matrix
assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
matmul_row = umt.matmul(single_vec, mat)
assert_array_equal(matmul_row, mm_row_vec.squeeze())
# row vector array @ matrix
mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
matmul_row_vec = umt.matmul(row_vec_array, mat)
assert_array_equal(matmul_row_vec, mm_row_vec)
# Now for vector combinations
# row vector @ column vector
col_vec = row_vec.T
col_vec_array = row_vec_array.swapaxes(-2, -1)
mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
matmul_row_col_vec = umt.matmul(row_vec, col_vec)
assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
# single row vector @ single col vector
assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
matmul_row_col = umt.matmul(single_vec, single_vec)
assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
# row vector array @ matrix
mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
assert_array_equal(matmul_row_col_array, mm_row_col_array)
# Finally, check that things are *not* squeezed if one gives an
# output.
out = np.zeros_like(mm_row_col_array)
out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
assert_array_equal(out, mm_row_col_array)
out[:] = 0
out = umt.matmul(row_vec_array, col_vec_array, out=out)
assert_array_equal(out, mm_row_col_array)
# And check one cannot put missing dimensions back.
out = np.zeros_like(mm_row_col_vec)
assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
out)
# But fine for matmul, since it is just a broadcast.
out = umt.matmul(single_vec, single_vec, out)
assert_array_equal(out, mm_row_col_vec.squeeze())
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
def test_matrix_multiply_umath_empty(self):
res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
assert_array_equal(res, np.zeros((0, 0)))
res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
assert_array_equal(res, np.zeros((10, 10)))
def compare_matrix_multiply_results(self, tp):
d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
msg = "matrix multiply on type %s" % d1.dtype.name
def permute_n(n):
if n == 1:
return ([0],)
ret = ()
base = permute_n(n-1)
for perm in base:
for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
ret += (new,)
return ret
def slice_n(n):
if n == 0:
return ((),)
ret = ()
base = slice_n(n-1)
for sl in base:
ret += (sl+(slice(None),),)
ret += (sl+(slice(0, 1),),)
return ret
def broadcastable(s1, s2):
return s1 == s2 or s1 == 1 or s2 == 1
permute_3 = permute_n(3)
slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
ref = True
for p1 in permute_3:
for p2 in permute_3:
for s1 in slice_3:
for s2 in slice_3:
a1 = d1.transpose(p1)[s1]
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base is not None
ref = ref and a2.base is not None
if (a1.shape[-1] == a2.shape[-2] and
broadcastable(a1.shape[0], a2.shape[0])):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
a1[..., np.newaxis,:], axis=-1),
err_msg=msg + ' %s %s' % (str(a1.shape),
str(a2.shape)))
assert_equal(ref, True, err_msg="reference check")
def test_euclidean_pdist(self):
a = np.arange(12, dtype=float).reshape(4, 3)
out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
umt.euclidean_pdist(a, out)
b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
b = b[~np.tri(a.shape[0], dtype=bool)]
assert_almost_equal(out, b)
# An output array is required to determine p with signature (n,d)->(p)
assert_raises(ValueError, umt.euclidean_pdist, a)
def test_cumsum(self):
a = np.arange(10)
result = umt.cumsum(a)
assert_array_equal(result, a.cumsum())
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
np.array([x or None for x in a], dtype=object))
assert_equal(np.logical_or(a, True),
np.array([x or True for x in a], dtype=object))
assert_equal(np.logical_or(a, 12),
np.array([x or 12 for x in a], dtype=object))
assert_equal(np.logical_or(a, "blah"),
np.array([x or "blah" for x in a], dtype=object))
assert_equal(np.logical_and(a, None),
np.array([x and None for x in a], dtype=object))
assert_equal(np.logical_and(a, True),
np.array([x and True for x in a], dtype=object))
assert_equal(np.logical_and(a, 12),
np.array([x and 12 for x in a], dtype=object))
assert_equal(np.logical_and(a, "blah"),
np.array([x and "blah" for x in a], dtype=object))
assert_equal(np.logical_not(a),
np.array([not x for x in a], dtype=object))
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
def test_object_comparison(self):
class HasComparisons(object):
def __eq__(self, other):
return '=='
arr0d = np.array(HasComparisons())
assert_equal(arr0d == arr0d, True)
assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
assert_equal(np.equal(arr0d, arr0d, dtype=object), '==')
arr1d = np.array([HasComparisons()])
assert_equal(arr1d == arr1d, np.array([True]))
assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.sum(a), 'abc')
assert_equal(np.max(a), 'c')
assert_equal(np.min(a), 'a')
a = np.array([True, False, True], dtype=object)
assert_equal(np.sum(a), 2)
assert_equal(np.prod(a), 0)
assert_equal(np.any(a), True)
assert_equal(np.all(a), False)
assert_equal(np.max(a), True)
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
assert_equal(np.array([[1], [2, 3]], dtype=object)
.sum(initial=[0], where=[False, True]), [0, 2, 3])
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
arr = np.ones(4, dtype=object)
arr[:] = [[1] for i in range(4)]
# Twice reproduced also for tuples:
np.add.accumulate(arr, out=arr)
np.add.accumulate(arr, out=arr)
assert_array_equal(arr, np.array([[1]*i for i in [1, 3, 6, 10]]))
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
np.add.accumulate(arr, out=arr, axis=-1)
np.add.accumulate(arr, out=arr, axis=-1)
assert_array_equal(arr[0, :], np.array([[2]*i for i in [1, 3, 6, 10]]))
def test_object_array_reduceat_inplace(self):
# Checks that in-place reduceats work, see also gh-7465
arr = np.empty(4, dtype=object)
arr[:] = [[1] for i in range(4)]
out = np.empty(4, dtype=object)
out[:] = [[1] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr)
np.add.reduceat(arr, np.arange(4), out=arr)
assert_array_equal(arr, out)
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
out = np.ones((2, 4), dtype=object)
out[0, :] = [[2] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sum(a), 0)
assert_equal(np.prod(a), 1)
assert_equal(np.any(a), False)
assert_equal(np.all(a), True)
assert_raises(ValueError, np.max, a)
assert_raises(ValueError, np.min, a)
def test_axis_out_of_bounds(self):
a = np.array([False, False])
assert_raises(np.AxisError, a.all, axis=1)
a = np.array([False, False])
assert_raises(np.AxisError, a.all, axis=-2)
a = np.array([False, False])
assert_raises(np.AxisError, a.any, axis=1)
a = np.array([False, False])
assert_raises(np.AxisError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
# even for scalars
assert_equal(np.sum(3, axis=0), 3)
assert_equal(np.prod(3.5, axis=0), 3.5)
assert_equal(np.any(True, axis=0), True)
assert_equal(np.all(False, axis=0), False)
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
# Check scalar behaviour for ufuncs without an identity
assert_equal(np.power.reduce(3), 3)
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
# check if scalars/0-d arrays get cast
assert_(type(np.any(0, axis=0)) is np.bool_)
# assert that 0-d arrays get wrapped
class MyArray(np.ndarray):
pass
a = np.array(1).view(MyArray)
assert_(type(np.any(a)) is MyArray)
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200, 100), np.int64)
b = np.ones((200, 100), np.int64)
c = np.ones((200, 100), np.float64)
np.add(a, b, out=c)
assert_equal(c, 2)
a = np.zeros(65536)
b = np.zeros(65536, dtype=np.float32)
np.subtract(a, 0, out=b)
assert_equal(b, 0)
def test_where_param(self):
# Test that the where= ufunc parameter works with regular arrays
a = np.arange(7)
b = np.ones(7)
c = np.zeros(7)
np.add(a, b, out=c, where=(a % 2 == 1))
assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
a = np.arange(4).reshape(2, 2) + 2
np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
assert_equal(a, [[2, 27], [16, 5]])
# Broadcasting the where= parameter
np.subtract(a, 2, out=a, where=[True, False])
assert_equal(a, [[0, 27], [14, 5]])
def test_where_param_buffer_output(self):
# This test is temporarily skipped because it requires
# adding masking features to the nditer to work properly
# With casting on output
a = np.ones(10, np.int64)
b = np.ones(10, np.int64)
c = 1.5 * np.ones(10, np.float64)
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
def test_where_param_alloc(self):
# With casting and allocated output
a = np.array([1], dtype=np.int64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
# No casting and allocated output
a = np.array([1], dtype=np.float64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
def check_identityless_reduction(self, a):
# np.minimum.reduce is an identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
a[1, 0, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
assert_equal(np.minimum.reduce(a, axis=0),
[[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 1, 1, 1], [0, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 1, 1], [0, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 1, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 0, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 0, 1] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 0, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[0, 1, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
def test_identityless_reduction_corder(self):
a = np.empty((2, 3, 4), order='C')
self.check_identityless_reduction(a)
def test_identityless_reduction_forder(self):
a = np.empty((2, 3, 4), order='F')
self.check_identityless_reduction(a)
def test_identityless_reduction_otherorder(self):
a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig(self):
a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig_unaligned(self):
a = np.empty((3*4*5*8 + 1,), dtype='i1')
a = a[1:].view(dtype='f8')
a.shape = (3, 4, 5)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_initial_reduction(self):
# np.minimum.reduce is an identityless reduction
# For cases like np.maximum(np.abs(...), initial=0)
# More generally, a supremum over non-negative numbers.
assert_equal(np.maximum.reduce([], initial=0), 0)
# For cases like reduction of an empty array over the reals.
assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
# Random tests
assert_equal(np.minimum.reduce([5], initial=4), 4)
assert_equal(np.maximum.reduce([4], initial=5), 5)
assert_equal(np.maximum.reduce([5], initial=4), 5)
assert_equal(np.minimum.reduce([4], initial=5), 4)
# Check initial=None raises ValueError for both types of ufunc reductions
assert_raises(ValueError, np.minimum.reduce, [], initial=None)
assert_raises(ValueError, np.add.reduce, [], initial=None)
# Check that np._NoValue gives default behavior.
assert_equal(np.add.reduce([], initial=np._NoValue), 0)
# Check that initial kwarg behaves as intended for dtype=object
a = np.array([10], dtype=object)
res = np.add.reduce(a, initial=5)
assert_equal(res, 15)
@pytest.mark.parametrize('axis', (0, 1, None))
@pytest.mark.parametrize('where', (np.array([False, True, True]),
np.array([[True], [False], [True]]),
np.array([[True, False, False],
[False, True, False],
[False, True, True]])))
def test_reduction_with_where(self, axis, where):
a = np.arange(9.).reshape(3, 3)
a_copy = a.copy()
a_check = np.zeros_like(a)
np.positive(a, out=a_check, where=where)
res = np.add.reduce(a, axis=axis, where=where)
check = a_check.sum(axis)
assert_equal(res, check)
# Check we do not overwrite elements of a internally.
assert_array_equal(a, a_copy)
@pytest.mark.parametrize(('axis', 'where'),
((0, np.array([True, False, True])),
(1, [True, True, False]),
(None, True)))
@pytest.mark.parametrize('initial', (-np.inf, 5.))
def test_reduction_with_where_and_initial(self, axis, where, initial):
a = np.arange(9.).reshape(3, 3)
a_copy = a.copy()
a_check = np.full(a.shape, -np.inf)
np.positive(a, out=a_check, where=where)
res = np.maximum.reduce(a, axis=axis, where=where, initial=initial)
check = a_check.max(axis, initial=initial)
assert_equal(res, check)
def test_reduction_where_initial_needed(self):
a = np.arange(9.).reshape(3, 3)
m = [False, True, False]
assert_raises(ValueError, np.maximum.reduce, a, where=m)
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n // 2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In versions >= 1.10, 'same_kind' is the
# default and an exception is raised instead of a warning.
# when 'same_kind' is not satisfied.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_raises(TypeError, np.add, a, 1.1, out=a)
def add_inplace(a, b):
a += b
assert_raises(TypeError, add_inplace, a, 1.1)
# Make sure that explicitly overriding the exception is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [2, 3, 4])
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=_rational_tests.rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = _rational_tests.test_add(a, b, c)
target = np.array([0, 2, 4], dtype=_rational_tests.rational)
assert_equal(result, target)
# no output type should raise TypeError
with assert_raises(TypeError):
_rational_tests.test_add(a, b)
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
b = np.arange(9, dtype='l').reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy.core._struct_ufunc_tests as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
assert_raises(RuntimeError, struct_ufunc.register_fail)
def test_custom_ufunc(self):
a = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
b = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
result = _rational_tests.test_add_rationals(a, b)
expected = np.array(
[_rational_tests.rational(1),
_rational_tests.rational(2, 3),
_rational_tests.rational(1, 2)],
dtype=_rational_tests.rational)
assert_equal(result, expected)
def test_custom_ufunc_forced_sig(self):
# gh-9351 - looking for a non-first userloop would previously hang
with assert_raises(TypeError):
np.multiply(_rational_tests.rational(1), 1,
signature=(_rational_tests.rational, int, None))
def test_custom_array_like(self):
class MyThing(object):
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > self.ndim:
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
def test_inplace_fancy_indexing(self):
a = np.arange(10)
np.add.at(a, [2, 5, 2], 1)
assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
a = np.arange(10)
b = np.array([100, 100, 100])
np.add.at(a, [2, 5, 2], b)
assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, (slice(None), [1, 2, 1]), b)
assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
assert_equal(a,
[[[0, 401, 202],
[3, 404, 205],
[6, 407, 208]],
[[9, 410, 211],
[12, 413, 214],
[15, 416, 217]],
[[18, 419, 220],
[21, 422, 223],
[24, 425, 226]]])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, ([1, 2, 1], slice(None)), b)
assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[203, 404, 605],
[106, 207, 308]],
[[9, 10, 11],
[212, 413, 614],
[115, 216, 317]],
[[18, 19, 20],
[221, 422, 623],
[124, 225, 326]]])
a = np.arange(9).reshape(3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (0, [1, 2, 1]), b)
assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]],
[[209, 410, 611],
[12, 13, 14],
[15, 16, 17]],
[[118, 219, 320],
[21, 22, 23],
[24, 25, 26]]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), slice(None)), b)
assert_equal(a,
[[[100, 201, 302],
[103, 204, 305],
[106, 207, 308]],
[[109, 210, 311],
[112, 213, 314],
[115, 216, 317]],
[[118, 219, 320],
[121, 222, 323],
[124, 225, 326]]])
a = np.arange(10)
np.negative.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
# Test 0-dim array
a = np.array(0)
np.add.at(a, (), 1)
assert_equal(a, 1)
assert_raises(IndexError, np.add.at, a, 0, 1)
assert_raises(IndexError, np.add.at, a, [], 1)
# Test mixed dtypes
a = np.arange(10)
np.power.at(a, [1, 2, 3, 2], 3.5)
assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
# Test boolean indexing and boolean ufuncs
a = np.arange(10)
index = a % 2 == 0
np.equal.at(a, index, [0, 2, 4, 6, 8])
assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
# Test unary operator
a = np.arange(10, dtype='u4')
np.invert.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
# Test empty subspace
orig = np.arange(4)
a = orig[:, None][:, 0:0]
np.add.at(a, [0, 1], 3)
assert_array_equal(orig, np.arange(4))
# Test with swapped byte order
index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
np.add.at(values, index, 3)
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
values = np.array(['a', 1], dtype=object)
assert_raises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=object))
# Test multiple output ufuncs raise error, gh-5665
assert_raises(ValueError, np.modf.at, np.arange(10), [1])
def test_reduce_arguments(self):
f = np.add.reduce
d = np.ones((5,2), dtype=int)
o = np.ones((2,), dtype=d.dtype)
r = o * 5
assert_equal(f(d), r)
# a, axis=0, dtype=None, out=None, keepdims=False
assert_equal(f(d, axis=0), r)
assert_equal(f(d, 0), r)
assert_equal(f(d, 0, dtype=None), r)
assert_equal(f(d, 0, dtype='i'), r)
assert_equal(f(d, 0, 'i'), r)
assert_equal(f(d, 0, None), r)
assert_equal(f(d, 0, None, out=None), r)
assert_equal(f(d, 0, None, out=o), r)
assert_equal(f(d, 0, None, o), r)
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
assert_equal(f(d, 0, None, None, False, 0), r)
assert_equal(f(d, 0, None, None, False, initial=0), r)
assert_equal(f(d, 0, None, None, False, 0, True), r)
assert_equal(f(d, 0, None, None, False, 0, where=True), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0,
where=True), r)
# too little
assert_raises(TypeError, f)
# too much
assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
assert_raises(TypeError, f, d, axis="invalid", dtype=None,
keepdims=True)
# invalid dtype
assert_raises(TypeError, f, d, 0, "invalid")
assert_raises(TypeError, f, d, dtype="invalid")
assert_raises(TypeError, f, d, dtype="invalid", out=None)
# invalid out
assert_raises(TypeError, f, d, 0, None, "invalid")
assert_raises(TypeError, f, d, out="invalid")
assert_raises(TypeError, f, d, out="invalid", dtype=None)
# keepdims boolean, no invalid value
# assert_raises(TypeError, f, d, 0, None, None, "invalid")
# assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
# invalid mix
assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
out=None)
# invalid keyord
assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
assert_raises(TypeError, f, d, invalid=0)
assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
out=None)
assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
out=None, invalid=0)
assert_raises(TypeError, f, d, axis=0, dtype=None,
out=None, invalid=0)
def test_structured_equal(self):
# https://github.com/numpy/numpy/issues/4855
class MyA(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*(input.view(np.ndarray)
for input in inputs), **kwargs)
a = np.arange(12.).reshape(4,3)
ra = a.view(dtype=('f8,f8,f8')).squeeze()
mra = ra.view(MyA)
target = np.array([ True, False, False, False], dtype=bool)
assert_equal(np.all(target == (mra == ra[0])), True)
def test_scalar_equal(self):
# Scalar comparisons should always work, without deprecation warnings.
# even when the ufunc fails.
a = np.array(0.)
b = np.array('a')
assert_(a != b)
assert_(b != a)
assert_(not (a == b))
assert_(not (b == a))
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod,
np.greater, np.greater_equal, np.less, np.less_equal,
np.equal, np.not_equal]
a = np.array('1')
b = 1
c = np.array([1., 2.])
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
assert_raises(TypeError, f, c, a)
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
#
# gh-8036
x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8)
x = x[4:6,1:11:6,1:5].transpose(1, 2, 0)
y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4)
y = y_base[::2,:]
y_base_copy = y_base.copy()
r0 = np.add.reduce(x, out=y.copy(), axis=2)
r1 = np.add.reduce(x, out=y, axis=2)
# The results should match, and y_base shouldn't get clobbered
assert_equal(r0, r1)
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
def test_no_doc_string(self):
# gh-9337
assert_('\n' not in umt.inner1d_no_doc.__doc__)
def test_invalid_args(self):
# gh-7961
exc = pytest.raises(TypeError, np.sqrt, None)
# minimally check the exception text
assert exc.match('loop of ufunc does not support')
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_not_finite(self, nat):
try:
assert not np.isfinite(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_nan(self, nat):
try:
assert np.isnan(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_not_inf(self, nat):
try:
assert not np.isinf(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
def test_ufunc_types(ufunc):
'''
Check all ufuncs that the correct type is returned. Avoid
object and boolean types since many operations are not defined for
for them.
Choose the shape so even dot and matmul will succeed
'''
for typ in ufunc.types:
# types is a list of strings like ii->i
if 'O' in typ or '?' in typ:
continue
inp, out = typ.split('->')
args = [np.ones((3, 3), t) for t in inp]
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always")
res = ufunc(*args)
if isinstance(res, tuple):
outs = tuple(out)
assert len(res) == len(outs)
for r, t in zip(res, outs):
assert r.dtype == np.dtype(t)
else:
assert res.dtype == np.dtype(out)
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
def test_ufunc_noncontiguous(ufunc):
'''
Check that contiguous and non-contiguous calls to ufuncs
have the same results for values in range(9)
'''
for typ in ufunc.types:
# types is a list of strings like ii->i
if any(set('O?mM') & set(typ)):
# bool, object, datetime are too irregular for this simple test
continue
inp, out = typ.split('->')
args_c = [np.empty(6, t) for t in inp]
args_n = [np.empty(18, t)[::3] for t in inp]
for a in args_c:
a.flat = range(1,7)
for a in args_n:
a.flat = range(1,7)
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always")
res_c = ufunc(*args_c)
res_n = ufunc(*args_n)
if len(out) == 1:
res_c = (res_c,)
res_n = (res_n,)
for c_ar, n_ar in zip(res_c, res_n):
dt = c_ar.dtype
if np.issubdtype(dt, np.floating):
# for floating point results allow a small fuss in comparisons
# since different algorithms (libm vs. intrinsics) can be used
# for different input strides
res_eps = np.finfo(dt).eps
tol = 2*res_eps
assert_allclose(res_c, res_n, atol=tol, rtol=tol)
else:
assert_equal(c_ar, n_ar)
| bsd-3-clause |
rs/flask-assets | docs/conf.py | 1 | 6691 | # -*- coding: utf-8 -*-
#
# Flask-Assets documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 6 14:01:08 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Assets'
copyright = u'2010, Michael Elsdörfer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'flask_small'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'github_fork': 'miracle2k/flask-assets', 'index_logo': False}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Assetsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-Assets.tex', u'Flask-Assets Documentation',
u'Michael Elsdörfer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'webassets': ('http://elsdoerfer.name/docs/webassets/', None),
} | bsd-2-clause |
yamahata/neutron | neutron/plugins/vmware/extensions/networkgw.py | 4 | 8687 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 VMware. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from abc import abstractmethod
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.api.v2 import resource_helper
from neutron.plugins.vmware.common.utils import NetworkTypes
GATEWAY_RESOURCE_NAME = "network_gateway"
DEVICE_RESOURCE_NAME = "gateway_device"
# Use dash for alias and collection name
EXT_ALIAS = GATEWAY_RESOURCE_NAME.replace('_', '-')
NETWORK_GATEWAYS = "%ss" % EXT_ALIAS
GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-')
DEVICE_ID_ATTR = 'id'
IFACE_NAME_ATTR = 'interface_name'
# Attribute Map for Network Gateway Resource
# TODO(salvatore-orlando): add admin state as other neutron resources
RESOURCE_ATTRIBUTE_MAP = {
NETWORK_GATEWAYS: {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'default': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'devices': {'allow_post': True, 'allow_put': False,
'validate': {'type:device_list': None},
'is_visible': True},
'ports': {'allow_post': False, 'allow_put': False,
'default': [],
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
},
GATEWAY_DEVICES: {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'client_certificate': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'connector_type': {'allow_post': True, 'allow_put': True,
'validate': {'type:connector_type': None},
'is_visible': True},
'connector_ip': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
}
}
def _validate_device_list(data, valid_values=None):
"""Validate the list of service definitions."""
if not data:
# Devices must be provided
msg = _("Cannot create a gateway with an empty device list")
return msg
try:
for device in data:
key_specs = {DEVICE_ID_ATTR:
{'type:regex': attributes.UUID_PATTERN,
'required': True},
IFACE_NAME_ATTR:
{'type:string': None,
'required': False}}
err_msg = attributes._validate_dict(
device, key_specs=key_specs)
if err_msg:
return err_msg
unexpected_keys = [key for key in device if key not in key_specs]
if unexpected_keys:
err_msg = (_("Unexpected keys found in device description:%s")
% ",".join(unexpected_keys))
return err_msg
except TypeError:
return (_("%s: provided data are not iterable") %
_validate_device_list.__name__)
def _validate_connector_type(data, valid_values=None):
if not data:
# A connector type is compulsory
msg = _("A connector type is required to create a gateway device")
return msg
connector_types = (valid_values if valid_values else
[NetworkTypes.GRE,
NetworkTypes.STT,
NetworkTypes.BRIDGE,
'ipsec%s' % NetworkTypes.GRE,
'ipsec%s' % NetworkTypes.STT])
if data not in connector_types:
msg = _("Unknown connector type: %s") % data
return msg
nw_gw_quota_opts = [
cfg.IntOpt('quota_network_gateway',
default=5,
help=_('Number of network gateways allowed per tenant, '
'-1 for unlimited'))
]
cfg.CONF.register_opts(nw_gw_quota_opts, 'QUOTAS')
attributes.validators['type:device_list'] = _validate_device_list
attributes.validators['type:connector_type'] = _validate_connector_type
class Networkgw(object):
"""API extension for Layer-2 Gateway support.
The Layer-2 gateway feature allows for connecting neutron networks
with external networks at the layer-2 level. No assumption is made on
the location of the external network, which might not even be directly
reachable from the hosts where the VMs are deployed.
This is achieved by instantiating 'network gateways', and then connecting
Neutron network to them.
"""
@classmethod
def get_name(cls):
return "Network Gateway"
@classmethod
def get_alias(cls):
return EXT_ALIAS
@classmethod
def get_description(cls):
return "Connects Neutron networks with external networks at layer 2."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/network-gateway/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-01-01T00:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
member_actions = {
GATEWAY_RESOURCE_NAME.replace('_', '-'): {
'connect_network': 'PUT',
'disconnect_network': 'PUT'}}
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
None,
action_map=member_actions,
register_quota=True,
translate_name=True)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class NetworkGatewayPluginBase(object):
@abstractmethod
def create_network_gateway(self, context, network_gateway):
pass
@abstractmethod
def update_network_gateway(self, context, id, network_gateway):
pass
@abstractmethod
def get_network_gateway(self, context, id, fields=None):
pass
@abstractmethod
def delete_network_gateway(self, context, id):
pass
@abstractmethod
def get_network_gateways(self, context, filters=None, fields=None):
pass
@abstractmethod
def connect_network(self, context, network_gateway_id,
network_mapping_info):
pass
@abstractmethod
def disconnect_network(self, context, network_gateway_id,
network_mapping_info):
pass
@abstractmethod
def create_gateway_device(self, context, gateway_device):
pass
@abstractmethod
def update_gateway_device(self, context, id, gateway_device):
pass
@abstractmethod
def delete_gateway_device(self, context, id):
pass
@abstractmethod
def get_gateway_device(self, context, id, fields=None):
pass
@abstractmethod
def get_gateway_devices(self, context, filters=None, fields=None):
pass
| apache-2.0 |
fishilico/selinux-refpolicy-patched | bin/find_missing_usr_fc.py | 1 | 4231 | #!/usr/bin/env python
"""
Archlinux only uses /usr/bin folder. /bin, /sbin and /usr/sbin are symlinks to it.
This program read all .fc files and find /bin, /sbin and /usr/sbin definitions
which don't have a matching /usr/bin definition.
Same thing applies to /usr/lib and /lib folders
"""
import re
import os
import os.path
def analyze_fc_file(filename):
"""Analyze a .fc file"""
definitions = {'bin': {}, 'sbin': {}, 'usr/bin': {}, 'usr/sbin': {},
'lib': {}, 'usr/lib': {}}
# Read file
ifdef_level = 0
with open(filename, 'r') as f:
for line in f:
# Skip optional blocks
if line.startswith('ifdef(') or line.startswith('ifndef('):
ifdef_level += 1
elif line.startswith('\')'):
ifdef_level -= 1
if ifdef_level > 0:
continue
# /bin, /sbin, /usr/bin and /usr/sbin
matches = re.match(r'^/(?P<dir>(usr/)?s?bin)/(?P<path>\S+)\s+(?P<ftype>-.)?\s+(?P<context>\S+)', line.strip())
if matches is not None:
d, p, t, c = matches.group('dir', 'path', 'ftype', 'context')
# Ignore /bin/.* and /sbin/.* definitions
if p != '.*':
definitions[d][p] = (t, c)
continue
# /lib, /usr/lib
matches = re.match(r'^/(?P<dir>(usr/)?lib)/(?P<path>\S+)\s+(?P<ftype>-.)?\s+(?P<context>\S+)', line.strip())
if matches is not None:
d, p, t, c = matches.group('dir', 'path', 'ftype', 'context')
# Ignore /lib/.* and /lib/.* definitions
# and /lib/ld-[^/]*\.so(\.[^/]*)* and /lib/security/pam_poldi\.so
if p not in ('.*', 'ld-[^/]*\\.so(\\.[^/]*)*', 'security/pam_poldi\\.so'):
definitions[d][p] = (t, c)
continue
# /usr/s?bin and /usr/(s)?bin
matches = re.match(r'^/(usr/(s|\(s\))\?bin)/(?P<path>\S+)\s+(?P<ftype>-.)?\s+(?P<context>\S+)', line.strip())
if matches is not None:
p, t, c = matches.group('path', 'ftype', 'context')
definitions['usr/bin'][p] = (t, c)
definitions['usr/sbin'][p] = (t, c)
continue
if ifdef_level != 0:
print("{}: invalid ifdef level at end of file, {}".format(filename, ifdef_level))
return
# Check binary folder
bindef = definitions['usr/bin']
# Quirk to be able to treat refpolicy before changing /usr/sbin to /usr/s?bin
bindef.update(definitions['usr/sbin'])
for bindir in 'bin', 'sbin', 'usr/sbin':
for path, data in sorted(definitions[bindir].items()):
ftype, context = data
data2 = bindef.get(path)
if data2 is None:
print("{}: missing policy (from {}): /usr/bin/{} {} {}"
.format(filename, bindir, path, ftype or '', context))
elif data != data2:
ftype2, context2 = data2
print("{}: mismatching policy: <{}/{} {} {}> vs. </usr/bin/{} {} {}>"
.format(filename, bindir, path, ftype or '', context, path, ftype2 or '', context2))
# Check library folder
libdef = definitions['usr/lib']
for path, data in sorted(definitions['lib'].items()):
ftype, context = data
data2 = libdef.get(path)
if data2 is None:
print("{}: missing policy (from lib): /usr/lib/{} {} {}".format(filename, path, ftype or '', context))
elif data != data2:
ftype2, context2 = data2
print("{}: mismatching policy: </lib/{} {} {}> vs. </usr/lib/{} {} {}>"
.format(filename, path, ftype or '', context, path, ftype2 or '', context2))
def analyze_all_fc(dirname):
"""Analyze all .fc files in the specified directory"""
for dirpath, _, files in os.walk(dirname):
for filename in files:
if filename.endswith('.fc'):
analyze_fc_file(os.path.join(dirpath, filename))
if __name__ == '__main__':
BASE_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
analyze_all_fc(os.path.join(BASE_DIR, 'policy'))
| gpl-2.0 |
jbetten/DataStructures | test/lib/googletest/googletest/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
ljgabc/lfs | usr/lib/python2.7/idlelib/configSectionNameDialog.py | 150 | 3720 | """
Dialog that allows user to specify a new config file section name.
Used to get new highlight theme and keybinding set names.
"""
from Tkinter import *
import tkMessageBox
class GetCfgSectionNameDialog(Toplevel):
def __init__(self,parent,title,message,usedNames):
"""
message - string, informational message to display
usedNames - list, list of names already in use for validity check
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE,width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.message=message
self.usedNames=usedNames
self.result=''
self.CreateWidgets()
self.withdraw() #hide while setting geometry
self.update_idletasks()
#needs to be done here so that the winfo_reqwidth is valid
self.messageInfo.config(width=self.frameMain.winfo_reqwidth())
self.geometry("+%d+%d" %
((parent.winfo_rootx()+((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty()+((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
self.deiconify() #geometry set, unhide
self.wait_window()
def CreateWidgets(self):
self.name=StringVar(self)
self.fontSize=StringVar(self)
self.frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
self.frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
self.messageInfo=Message(self.frameMain,anchor=W,justify=LEFT,padx=5,pady=5,
text=self.message)#,aspect=200)
entryName=Entry(self.frameMain,textvariable=self.name,width=30)
entryName.focus_set()
self.messageInfo.pack(padx=5,pady=5)#,expand=TRUE,fill=BOTH)
entryName.pack(padx=5,pady=5)
frameButtons=Frame(self)
frameButtons.pack(side=BOTTOM,fill=X)
self.buttonOk = Button(frameButtons,text='Ok',
width=8,command=self.Ok)
self.buttonOk.grid(row=0,column=0,padx=5,pady=5)
self.buttonCancel = Button(frameButtons,text='Cancel',
width=8,command=self.Cancel)
self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
def NameOk(self):
#simple validity check for a sensible
#ConfigParser file section name
nameOk=1
name=self.name.get()
name.strip()
if not name: #no name specified
tkMessageBox.showerror(title='Name Error',
message='No name specified.', parent=self)
nameOk=0
elif len(name)>30: #name too long
tkMessageBox.showerror(title='Name Error',
message='Name too long. It should be no more than '+
'30 characters.', parent=self)
nameOk=0
elif name in self.usedNames:
tkMessageBox.showerror(title='Name Error',
message='This name is already in use.', parent=self)
nameOk=0
return nameOk
def Ok(self, event=None):
if self.NameOk():
self.result=self.name.get().strip()
self.destroy()
def Cancel(self, event=None):
self.result=''
self.destroy()
if __name__ == '__main__':
#test the dialog
root=Tk()
def run():
keySeq=''
dlg=GetCfgSectionNameDialog(root,'Get Name',
'The information here should need to be word wrapped. Test.')
print dlg.result
Button(root,text='Dialog',command=run).pack()
root.mainloop()
| gpl-2.0 |
zhujzhuo/Sahara | sahara/service/validations/edp/base.py | 8 | 2688 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cluster creation related checks"""
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
conductor = c.API
data_source_type = {
"type": "string",
"enum": ["swift", "hdfs", "maprfs"]
}
job_configs = {
"type": "object",
"properties": {
"configs": {
"type": "simple_config",
},
"params": {
"type": "simple_config",
},
"args": {
"type": "array",
"items": {
"type": "string"
}
}
},
"additionalProperties": False,
}
def check_data_source_unique_name(name):
if name in [ds.name for ds in conductor.data_source_get_all(
context.ctx())]:
raise ex.NameAlreadyExistsException(_("Data source with name '%s' "
"already exists") % name)
def check_data_source_exists(data_source_id):
if not conductor.data_source_get(context.ctx(), data_source_id):
raise ex.InvalidReferenceException(
_("DataSource with id '%s' doesn't exist") % data_source_id)
def check_job_unique_name(name):
if name in [j.name for j in conductor.job_get_all(context.ctx())]:
raise ex.NameAlreadyExistsException(_("Job with name '%s' "
"already exists") % name)
def check_job_binary_internal_exists(jbi_id):
if not conductor.job_binary_internal_get(context.ctx(), jbi_id):
raise ex.InvalidReferenceException(
_("JobBinaryInternal with id '%s' doesn't exist") % jbi_id)
def check_data_sources_are_different(data_source_1_id, data_source_2_id):
ds1 = conductor.data_source_get(context.ctx(), data_source_1_id)
ds2 = conductor.data_source_get(context.ctx(), data_source_2_id)
if ds1.type == ds2.type and ds1.url == ds2.url:
raise ex.InvalidDataException(_('Provided input and output '
'DataSources reference the same '
'location: %s') % ds1.url)
| apache-2.0 |
jayvdb/coala | coalib/bearlib/languages/documentation/DocBaseClass.py | 14 | 3590 | from coalib.bearlib.languages.documentation.DocstyleDefinition import (
DocstyleDefinition)
from coalib.results.Diff import Diff
from coalib.results.TextRange import TextRange
from coalib.bearlib.languages.documentation.DocumentationExtraction import (
extract_documentation_with_markers)
class DocBaseClass:
"""
DocBaseClass holds important functions which will extract, parse
and generates diffs for documentation. All bears that processes
documentation should inherit from this.
"""
@staticmethod
def extract(content, language, docstyle):
"""
Extracts all documentation texts inside the given source-code-string
using the coala docstyle definition files.
The documentation texts are sorted by their order appearing in
``content``.
For more information about how documentation comments are
identified and extracted, see DocstyleDefinition.doctypes enumeration.
:param content: The source-code-string where to extract
documentation from. Needs to be a list
or tuple where each string item is a
single line(including ending whitespaces
like ``\\n``).
:param language: The programming language used.
:param docstyle: The documentation style/tool used
(e.g. doxygen).
:raises FileNotFoundError: Raised when the docstyle definition file
was not found.
:raises KeyError: Raised when the given language is not
defined in given docstyle.
:raises ValueError: Raised when a docstyle definition setting
has an invalid format.
:return: An iterator returning instances of
DocumentationComment or MalformedComment
found in the content.
"""
docstyle_definition = DocstyleDefinition.load(language, docstyle)
return extract_documentation_with_markers(
content, docstyle_definition)
@staticmethod
def generate_diff(file, doc_comment, new_comment):
"""
Generates diff between the original doc_comment and its fix
new_comment which are instances of DocumentationComment.
:param doc_comment:
Original instance of DocumentationComment.
:param new_comment:
Fixed instance of DocumentationComment.
:return:
Diff instance.
"""
diff = Diff(file)
# We need to update old comment positions, as `assemble()`
# prepends indentation for first line.
old_range = TextRange.from_values(
doc_comment.range.start.line,
1,
doc_comment.range.end.line,
doc_comment.range.end.column)
# Clearing cached assemble() so a fresh one is fetched.
new_comment.assemble.cache_clear()
diff.replace(old_range, new_comment.assemble())
return diff
def process_documentation(self, *args, **kwargs):
"""
Checks and handles the fixing part of documentation.
:return:
A tuple of processed documentation and warning_desc.
"""
raise NotImplementedError('This function has to be implemented for a '
'documentation bear.')
| agpl-3.0 |
fablab-lannion/CMStools | FabLab/languages/fr-ca.py | 163 | 8203 | # coding: utf8
{
'!langcode!': 'fr-ca',
'!langname!': 'Français (Canadien)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%s %%{row} deleted': '%s rangées supprimées',
'%s %%{row} updated': '%s rangées mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'about': 'à propos',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'Administrative Interface': 'Administrative Interface',
'Administrative interface': "Interface d'administration",
'Ajax Recipes': 'Recettes Ajax',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Contrôleur',
'Copyright': "Droit d'auteur",
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s select',
'db': 'db',
'DB Model': 'Modèle DB',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement ',
'Description': 'Descriptif',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'Courriel',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'entrer un entier compris entre %(min)g et %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'faq',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Group %(group_id)s created': '%(group_id)s groupe créé',
'Group ID': 'Groupe ID',
'Group uniquely assigned to user %(id)s': "Groupe unique attribué à l'utilisateur %(id)s",
'Groups': 'Groupes',
'Hello World': 'Bonjour le monde',
'Home': 'Accueil',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Importer/Exporter',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Internal State': 'État interne',
'Introduction': 'Présentation',
'Invalid email': 'Courriel invalide',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Key': 'Key',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'layouts',
'Live chat': 'Clavardage en direct',
'Live Chat': 'Live Chat',
'Logged in': 'Connecté',
'login': 'connectez-vous',
'Login': 'Connectez-vous',
'logout': 'déconnectez-vous',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu modèle',
'My Sites': 'My Sites',
'Name': 'Nom',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'password': 'mot de passe',
'Password': 'Mot de passe',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'please input your password again': "S'il vous plaît entrer votre mot de passe",
'Plugins': 'Plugiciels',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous 100 rows': '100 lignes précédentes',
'profile': 'profile',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Examples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'Record %(id)s created': 'Record %(id)s created',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Created': 'Record Created',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID d'enregistrement",
'Record id': "id d'enregistrement",
'Record Updated': 'Record Updated',
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration key': "Clé d'enregistrement",
'Registration successful': 'Inscription réussie',
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Semantic': 'Sémantique',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'état',
'Statistics': 'Statistics',
'Stylesheet': 'Feuille de style',
'submit': 'submit',
'Submit': 'Soumettre',
'Support': 'Soutien',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "query" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Twitter': 'Twitter',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT pour construire des requêtes plus complexes.',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User ID': 'ID utilisateur',
'User Voice': 'User Voice',
'value already in database or empty': 'valeur déjà dans la base ou vide',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenu',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'You are successfully running web2py': 'Vous roulez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You visited the url %s': "Vous avez visité l'URL %s",
}
| gpl-2.0 |
lavajumper/sexcoin | test/functional/fundrawtransaction.py | 3 | 32125 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid viacoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| mit |
t794104/ansible | test/units/modules/network/f5/test_bigip_lx_package.py | 38 | 3742 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_lx_package import Parameters
from library.modules.bigip_lx_package import ModuleManager
from library.modules.bigip_lx_package import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_lx_package import Parameters
from ansible.modules.network.f5.bigip_lx_package import ArgumentSpec
from ansible.modules.network.f5.bigip_lx_package import ModuleManager
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
package='MyApp-0.1.0-0001.noarch.rpm',
state='present'
)
p = Parameters(params=args)
assert p.package == 'MyApp-0.1.0-0001.noarch.rpm'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
try:
self.p1 = patch('library.modules.bigip_lx_package.tmos_version')
self.m1 = self.p1.start()
self.m1.return_value = '12.1.3'
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_lx_package.tmos_version')
self.m1 = self.p1.start()
self.m1.return_value = '12.1.3'
def tearDown(self):
self.patcher1.stop()
def test_create_iapp_template(self, *args):
package_name = os.path.join(fixture_path, 'MyApp-0.1.0-0001.noarch.rpm')
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
package=package_name,
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.upload_to_device = Mock(return_value=True)
mm.enable_iapplx_on_device = Mock(return_value=True)
mm.remove_package_file_from_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
IsmaeRLGV/Modular-UserBot- | config.py | 1 | 3463 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from API.db import *
import os,time,random,re
clear=lambda: os.system("clear")
cont="s"
print "Bienvenido a la configuración de UserBot."
print "Nota: Algunos segmentos requridos u opcionales dejarlos en blanco si no sabes para que sirve.\n\n"
while cont != "*":
print "Marque:","\n 1-Para editar el nick,puerto,canal,etc.","\n 2-Gestionar administradores (bot).","\n 3-Para salir."
try:
cont=int(raw_input("->> "))
except ValueError:
print "El valor ingresado no es un numero.\n"
exit()
if cont == 1:
clear()
print "LEYENDA: Opcional: * Requerido: +"
print "Ingrese algunos datos necesarios."
HOST=raw_input("Servidor + >>> ")
if HOST == "":
HOST = "irc.freenode.net"
try:
PORT=int(raw_input("Puerto * >>> "))
except ValueError:
PORT=6667
CHAN=raw_input("Canal Principal + >>> ")
if CHAN == "":
CHAN = "#gazuza"
NICK=raw_input("Nickname + >>> ")
if NICK == "":
NICK = "UserBot"
comd=raw_input("Inicio de Comandos * >>> ")
if comd == "":
comd = "!"
database("API/DB/HOST",HOST).W_db()
database("API/DB/PORT",PORT).W_db()
database("API/DB/CHAN",CHAN).W_db()
database("API/DB/NICK",NICK).W_db()
database("API/DB/DB_comd",comd).W_db()
database("API/DB/DB_user",[[["UserBot", "127.0.0.7"],"password",[],0,["status","connected"]]]).W_db()
clear()
print "Servidor: " + HOST
print "Puerto: %s" % PORT
print "Canal Principal: " + CHAN
print "Nick: " + NICK
print "Inicio de Comandos: " + comd
if cont == 2:
print "¿Que desea hacer?","\n1 - Agregar Administradores","\n2 - Eliminar Administradores"
try:
EE0=int(raw_input("->> "))
except ValueError:
EE0=9
if EE0 == 1:
clear()
print "LEYENDA: Opcional: * Requerido: +"
user=raw_input("Usuario >>> ")
cont2=raw_input("¿Desea autogenerar la clave?\nS/n >>> ")
ready_passw=False
if cont2 == "S" or cont2 == "s":
ii=('o','j','k','9','h','l','a','u','7','3','b','0','p','m','c','w','f','d','x','4','r','1','e','2','g','5','q','6','8','t','y','i','s','z','v','n','O','J','K','H','L','A','U','B','P','M','C','W','F','D','X','R','E','G','Q','T','Y','I','S','Z','V','N')
hash_=''
for iii in range(30):
code=random.choice(ii)
while code in hash_:
code=random.choice(ii)
hash_=code+hash_
password=hash_
ready_passw=True
if cont2== "N" or cont2 == "n":
ready=""
while not "n" in ready:
ready=""
characters=0
password=raw_input("password >>> ")
numeric=re.search("\d", password)
string_mayus=re.search("[A-Z]",password)
string_minus=re.search("[a-z]",password)
if numeric:
ready+="s"
if string_mayus:
ready+="s"
if string_minus:
ready+="s"
for i in password:
characters+=1
if characters == 30:
ready+="s"
if ready=="ssss":
ready_passw=True
ready+="n"
else:
print "La clave ingresada no cumple con las condiciones."
if ready_passw:
clear()
print "Usuario: " + user
print "Password: " + password
try:
import API.arrays
API.arrays.DB_admins.append("%s %s" % (user,password))
print "Se añadio a la Base de Datos."
except AttributeError:
ee="%s %s" % (user,password)
database("API/DB/DB_admins", [ee]).a_db()
print "Se creo y añadio a la Base de Datos."
exit()
if cont == 3:
print "Saliendo..."
exit()
| apache-2.0 |
welchbj/tt | tt/tests/unit/trees/test_node_is_cnf.py | 1 | 3300 | """Tests for CNF detection in tree nodes."""
import unittest
from tt.trees import ExpressionTreeNode
class TestTreeNodeIsCnf(unittest.TestCase):
def assert_is_cnf(self, postfix_tokens):
"""Assert the passed tokens are in conjunctive normal form."""
self.assertTrue(ExpressionTreeNode.build_tree(postfix_tokens).is_cnf)
def assert_not_cnf(self, postfix_tokens):
"""Assert the passed tokens are not in conjunctive normal form."""
self.assertFalse(ExpressionTreeNode.build_tree(postfix_tokens).is_cnf)
def test_is_cnf_single_operand(self):
"""Test CNF determination for single operand trees."""
for postfix_tokens in (['0'], ['1'], ['token']):
self.assert_is_cnf(postfix_tokens)
def test_is_cnf_only_unary_operators(self):
"""Test CNF determination for trees with only unary operators."""
self.assert_is_cnf(['A', 'not'])
self.assert_not_cnf(['A', 'not', 'not'])
self.assert_not_cnf(['A', 'not', 'not', 'not'])
def test_is_cnf_single_clause(self):
"""Test that a single clause of ORed operands is in CNF."""
self.assert_is_cnf(['op1', 'op2', 'or'])
self.assert_is_cnf(['A', 'B', 'C', 'D', 'or', 'or', 'or'])
self.assert_is_cnf(['A', '~', 'B', 'C', '~', 'or', 'or'])
def test_is_cnf_clauses_of_single_operands(self):
"""Test several ANDed clauses of single operands is in CNF."""
self.assert_is_cnf(['op1', 'op2', 'and'])
self.assert_is_cnf(['A', 'B', 'C', 'D', 'and', 'and', 'and'])
self.assert_is_cnf(['A', 'B', 'not', 'C', 'and', 'and'])
def test_is_cnf_contains_non_primitive_operator_in_clause(self):
"""Test cases where non-primitive operator is in a clause."""
self.assert_not_cnf(['A', 'B', 'or',
'C', 'D', 'or',
'D', 'E', 'nand',
'and', 'and'])
self.assert_not_cnf(['A', 'B', 'C', 'xor', 'or',
'A', 'B', 'or',
'and'])
def test_is_cnf_contains_non_primitive_operator_joining_clauses(self):
"""Test cases where non-primitive operator joins clauses."""
self.assert_not_cnf(['A', 'B', '<->'])
self.assert_not_cnf(['A', 'B', 'or',
'B', 'C', 'or',
'C', 'E', 'or',
'->', 'and'])
def test_is_cnf_contains_notted_clause(self):
"""Test cases where an entire clause is notted."""
self.assert_not_cnf(['A', 'B', 'or', 'not',
'C', 'D', 'or',
'and'])
def test_is_cnf_multiple_clauses(self):
"""Test CNF determination for multiple clauses."""
self.assert_is_cnf(['A', 'C', 'or',
'B', 'C', 'or',
'E', 'F', 'G', 'or', 'or',
'and', 'and'])
def test_is_cnf_for_dnf_expression(self):
"""Test CNF determination for an expression in DNF."""
self.assert_not_cnf(['A', 'B', 'C', 'and', 'and',
'B', 'C', 'not', 'and',
'D', 'E', 'and',
'or', 'or'])
| mit |
IsaacHaze/PyMuPDF | examples/PDF2TextJS.py | 2 | 3002 | #!/usr/bin/env python
"""
Created on Wed Jul 29 07:00:00 2015
@author: Jorj McKie
Copyright (c) 2015 Jorj X. McKie
The license of this program is governed by the GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007. See the "COPYING" file of this repository.
This is an example for using the Python binding PyMuPDF of MuPDF.
This program extracts the text of an input PDF and writes it in a text file.
The input file name is provided as a parameter to this script (sys.argv[1])
The output file name is equal to input with the extension ".pdf" replaced by
".txt".
Encoding of the text in the PDF is assumed to be UTF-8.
Change the ENCODING variable as required.
-------------------------------------------------------------------------------
The result of this program is similar to that of PDF2Text.py -- the difference
is an internal one:
This program uses method extractJSON() of text page which delivers less
information than extractXML(), but sufficient to reconstruct a text
version of a PDF.
The benefit of it is a vastly higher performance: expect to see an improvement
by a factor of 20 or more!
-------------------------------------------------------------------------------
"""
import fitz
import sys, json
ENCODING = "utf-8"
def SortBlocks(blocks):
'''
Sort the blocks of a TextPage in ascending vertical pixel order,
then in ascending horizontal pixel order.
This ensures sequencing the text in a more readable form, at least by
convention of the Western hemisphere: from top-left to bottom-right.
If you need something else, change the sortkey variable accordingly ...
'''
sblocks = []
for b in blocks:
x0 = str(int(b["bbox"][0]+0.99999)).rjust(4,"0") # x coord in pixels
y0 = str(int(b["bbox"][1]+0.99999)).rjust(4,"0") # y coord in pixels
sortkey = y0 + x0 # = "yx"
sblocks.append([sortkey, b])
sblocks.sort()
return sblocks
def GetPageText(pg):
dl = fitz.DisplayList()
dv = fitz.Device(dl)
pg.run(dv, fitz.Identity)
ts = fitz.TextSheet()
tp = fitz.TextPage()
rect = pg.bound()
dl.run(fitz.Device(ts, tp), fitz.Identity, rect)
return tp.extractJSON()
#==============================================================================
# Main Program
#==============================================================================
ifile = sys.argv[1]
ofile = ifile.replace(".pdf",".txt")
doc = fitz.Document(ifile)
pages = doc.pageCount
fout = open(ofile,"w")
for i in range(pages):
print "========== processing page", i, "=========="
pg_text = ""
pg = doc.loadPage(i)
text = GetPageText(pg)
pgdict = json.loads(text)
blocks = SortBlocks(pgdict["blocks"])
for bx in blocks:
b = bx[1]
for l in b["lines"]:
for s in l["spans"]:
pg_text += unicode(s["text"])
pg_text += "\n"
pg_text = pg_text.encode(ENCODING, "ignore")
fout.write(pg_text)
fout.close()
| gpl-3.0 |
janblazek/ci-dnf-stack | dnf-behave-tests/dnf/steps/fixtures/server.py | 3 | 2665 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import contextlib
import multiprocessing
import socket
import time
class ServerContext(object):
"""
This object manages a group of simple servers. Each of them is run in a
separate process. This is the shared base for http and ftp servers and it
has a 'path' argument on the interface, which is passed to the server and
from which the server will serve files.
It also provides a dict with keys being the path and value being the
(address, process) pair for the server serving from the path.
"""
@staticmethod
def _get_free_socket(host='localhost'):
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, 0))
return (host, s.getsockname()[1])
def __init__(self):
# mapping path -> (address, server process)
self._error = multiprocessing.Manager().Value(str, "")
self.servers = dict()
def _start_server(self, path, target, *args):
"""
Start a new server. Returns (host, port) tupple of the new running server.
"""
if path in self.servers:
return self.get_address(path)
address = self._get_free_socket()
process = multiprocessing.Process(target=target, args=(address, path, self._error) + args)
process.start()
self.servers[path] = (address, process)
attempts = 1
while attempts <= 10:
if self._error.value:
raise AssertionError("Server failed to start: " + self._error.value)
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
break
except Exception as e:
err = str(e)
# progressive sleep; 0.05 * 55 = 2.75s total sleep over 10 attempts
time.sleep(0.05 * attempts)
attempts += 1
if attempts > 10:
raise AssertionError("Server not ready: " + err)
return address
def stop_server(self, path):
self.servers.pop(path)[1].terminate()
def get_address(self, path):
"""
Get address of the server bound to the "path" directory.
"""
if path in self.servers:
return self.servers[path][0]
return None
def shutdown(self):
"""
Terminate all running servers.
"""
for _, process in self.servers.values():
process.terminate()
| gpl-3.0 |
anand-c-goog/tensorflow | tensorflow/python/summary/writer/writer.py | 8 | 10113 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
@@__init__
@@add_summary
@@add_session_log
@@add_graph
@@add_meta_graph
@@add_run_metadata
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.train.SummaryWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event method.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (
graph.as_graph_def(add_shapes=True) if isinstance(graph, ops.Graph)
else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(
graph_def=graph_def or maybe_graph_as_def))
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
[`Session.run()`](client.md#Session.run) or
[`Tensor.eval()`](framework.md#Tensor.eval), to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as retured by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s"
% type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
class LegacySummaryWriter(SummaryToEventTransformer):
"""Exact match for the pre-1.0 tf.train.SummaryWriter."""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None):
event_writer = EventFileWriter(logdir, max_queue, flush_secs)
super(LegacySummaryWriter, self).__init__(event_writer, graph, graph_def)
# Proxy the event_writer public API onto the LegacySummaryWriter
# this gives consistency with the tf.train.SummaryWriter API.
self.get_logdir = self.event_writer.get_logdir
self.add_event = self.event_writer.add_event
self.flush = self.event_writer.flush
self.close = self.event_writer.close
self.reopen = self.event_writer.reopen
| apache-2.0 |
2014c2g14/c2g14 | w2/static/Brython2.0.0-20140209-164925/Lib/random.py | 104 | 25660 | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from collections.abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
if a is None:
try:
a = int.from_bytes(_urandom(32), 'big')
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
if version == 2:
if isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(self.random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overriden random() method but no new getrandbits() method,
# so we can only use random() from here.
random = self.random
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| gpl-2.0 |
jbalogh/zamboni | apps/addons/management/commands/process_addons.py | 1 | 1219 | from optparse import make_option
from django.core.management.base import BaseCommand
from addons.models import Addon
from amo.utils import chunked
from devhub.tasks import convert_purified, flag_binary, get_preview_sizes
tasks = {
'flag_binary': {'method': flag_binary, 'qs': []},
'get_preview_sizes': {'method': get_preview_sizes, 'qs': []},
'convert_purified': {'method': convert_purified, 'qs': []}
}
class Command(BaseCommand):
"""
A generic command to run a task on addons.
Add tasks to the tasks dictionary, providing a list of Q objects if you'd
like to filter the list down.
"""
option_list = BaseCommand.option_list + (
make_option('--task', action='store', type='string',
dest='task', help='Run task on the addons.'),
)
def handle(self, *args, **options):
task = tasks.get(options.get('task'))
if not task:
raise ValueError('Unknown task: %s' % ', '.join(tasks.keys()))
pks = (Addon.objects.filter(*task['qs'])
.values_list('pk', flat=True)
.order_by('id'))
for chunk in chunked(pks, 100):
task['method'].delay(chunk)
| bsd-3-clause |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/requests-2.1.0-py2.7.egg/requests/packages/charade/latin1prober.py | 50 | 5387 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((float(self._mFreqCounter[3]) / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
| apache-2.0 |
imsparsh/python-for-android | python3-alpha/python3-src/Lib/test/test_macpath.py | 62 | 4985 | import macpath
from test import support, test_genericpath
import unittest
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assertEqual(macpath.abspath("xx:yy"), "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assertTrue(isabs("xx:yy"))
self.assertTrue(isabs("xx:yy:"))
self.assertTrue(isabs("xx:"))
self.assertFalse(isabs("foo"))
self.assertFalse(isabs(":foo"))
self.assertFalse(isabs(":foo:bar"))
self.assertFalse(isabs(":foo:bar:"))
self.assertTrue(isabs(b"xx:yy"))
self.assertTrue(isabs(b"xx:yy:"))
self.assertTrue(isabs(b"xx:"))
self.assertFalse(isabs(b"foo"))
self.assertFalse(isabs(b":foo"))
self.assertFalse(isabs(b":foo:bar"))
self.assertFalse(isabs(b":foo:bar:"))
def test_split(self):
split = macpath.split
self.assertEqual(split("foo:bar"),
('foo:', 'bar'))
self.assertEqual(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEqual(split(":"), ('', ''))
self.assertEqual(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
self.assertEqual(split(b"foo:bar"),
(b'foo:', b'bar'))
self.assertEqual(split(b"conky:mountpoint:foo:bar"),
(b'conky:mountpoint:foo', b'bar'))
self.assertEqual(split(b":"), (b'', b''))
self.assertEqual(split(b":conky:mountpoint:"),
(b':conky:mountpoint', b''))
def test_join(self):
join = macpath.join
self.assertEqual(join('a', 'b'), ':a:b')
self.assertEqual(join('', 'a:b'), 'a:b')
self.assertEqual(join('a:b', 'c'), 'a:b:c')
self.assertEqual(join('a:b', ':c'), 'a:b:c')
self.assertEqual(join('a', ':b', ':c'), ':a:b:c')
self.assertEqual(join(b'a', b'b'), b':a:b')
self.assertEqual(join(b'', b'a:b'), b'a:b')
self.assertEqual(join(b'a:b', b'c'), b'a:b:c')
self.assertEqual(join(b'a:b', b':c'), b'a:b:c')
self.assertEqual(join(b'a', b':b', b':c'), b':a:b:c')
def test_splitext(self):
splitext = macpath.splitext
self.assertEqual(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEqual(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEqual(splitext(".ext"), ('.ext', ''))
self.assertEqual(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEqual(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEqual(splitext(""), ('', ''))
self.assertEqual(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
self.assertEqual(splitext(b":foo.ext"), (b':foo', b'.ext'))
self.assertEqual(splitext(b"foo:foo.ext"), (b'foo:foo', b'.ext'))
self.assertEqual(splitext(b".ext"), (b'.ext', b''))
self.assertEqual(splitext(b"foo.ext:foo"), (b'foo.ext:foo', b''))
self.assertEqual(splitext(b":foo.ext:"), (b':foo.ext:', b''))
self.assertEqual(splitext(b""), (b'', b''))
self.assertEqual(splitext(b"foo.bar.ext"), (b'foo.bar', b'.ext'))
def test_ismount(self):
ismount = macpath.ismount
self.assertEqual(ismount("a:"), True)
self.assertEqual(ismount("a:b"), False)
self.assertEqual(ismount("a:b:"), True)
self.assertEqual(ismount(""), False)
self.assertEqual(ismount(":"), False)
self.assertEqual(ismount(b"a:"), True)
self.assertEqual(ismount(b"a:b"), False)
self.assertEqual(ismount(b"a:b:"), True)
self.assertEqual(ismount(b""), False)
self.assertEqual(ismount(b":"), False)
def test_normpath(self):
normpath = macpath.normpath
self.assertEqual(normpath("a:b"), "a:b")
self.assertEqual(normpath("a"), ":a")
self.assertEqual(normpath("a:b::c"), "a:c")
self.assertEqual(normpath("a:b:c:::d"), "a:d")
self.assertRaises(macpath.norm_error, normpath, "a::b")
self.assertRaises(macpath.norm_error, normpath, "a:b:::c")
self.assertEqual(normpath(":"), ":")
self.assertEqual(normpath("a:"), "a:")
self.assertEqual(normpath("a:b:"), "a:b")
self.assertEqual(normpath(b"a:b"), b"a:b")
self.assertEqual(normpath(b"a"), b":a")
self.assertEqual(normpath(b"a:b::c"), b"a:c")
self.assertEqual(normpath(b"a:b:c:::d"), b"a:d")
self.assertRaises(macpath.norm_error, normpath, b"a::b")
self.assertRaises(macpath.norm_error, normpath, b"a:b:::c")
self.assertEqual(normpath(b":"), b":")
self.assertEqual(normpath(b"a:"), b"a:")
self.assertEqual(normpath(b"a:b:"), b"a:b")
class MacCommonTest(test_genericpath.CommonTest):
pathmodule = macpath
def test_main():
support.run_unittest(MacPathTestCase, MacCommonTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
lenstr/rethinkdb | external/v8_3.30.33.16/build/gyp/test/hello/gyptest-regyp-output.py | 202 | 1077 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles get rebuilt when a source gyp file changes and
--generator-output is used.
"""
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make and Android generators, and --generator-output is not supported
# by Android and ninja, so we can only test for make.
test = TestGyp.TestGyp(formats=['make'])
CHDIR='generator-output'
test.run_gyp('hello.gyp', '--generator-output=%s' % CHDIR)
test.build('hello.gyp', test.ALL, chdir=CHDIR)
test.run_built_executable('hello', stdout="Hello, world!\n", chdir=CHDIR)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL, chdir=CHDIR)
test.run_built_executable('hello', stdout="Hello, two!\n", chdir=CHDIR)
test.pass_test()
| agpl-3.0 |
jay3sh/vispy | vispy/visuals/isocurve.py | 18 | 7809 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .line import LineVisual
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
from ..testing import has_matplotlib
# checking for matplotlib
_HAS_MPL = has_matplotlib()
if _HAS_MPL:
from matplotlib import _cntr as cntr
class IsocurveVisual(LineVisual):
"""Displays an isocurve of a 2D scalar array.
Parameters
----------
data : ndarray | None
2D scalar array.
levels : ndarray, shape (Nlev,) | None
The levels at which the isocurve is constructed from "*data*".
color_lev : Color, colormap name, tuple, list or array
The color to use when drawing the line. If a list is given, it
must be of shape (Nlev), if an array is given, it must be of
shape (Nlev, ...). and provide one color per level (rgba, colorname).
clim : tuple
(min, max) limits to apply when mapping level values through a
colormap.
**kwargs : dict
Keyword arguments to pass to `LineVisual`.
Notes
-----
"""
def __init__(self, data=None, levels=None, color_lev=None, clim=None,
**kwargs):
self._data = None
self._levels = levels
self._color_lev = color_lev
self._clim = clim
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
self._X = None
self._Y = None
self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
self._cl = None
self._li = None
self._connect = None
self._verts = None
kwargs['method'] = 'gl'
kwargs['antialias'] = False
LineVisual.__init__(self, **kwargs)
if data is not None:
self.set_data(data)
@property
def levels(self):
""" The threshold at which the isocurve is constructed from the
2D data.
"""
return self._levels
@levels.setter
def levels(self, levels):
self._levels = levels
self._need_level_update = True
self._need_recompute = True
self.update()
@property
def color(self):
return self._color_lev
@color.setter
def color(self, color):
self._color_lev = color
self._need_level_update = True
self._need_color_update = True
self.update()
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) +
colors[i+self._level_min])
self._cl = np.vstack(level_color)
def _levels_to_colors(self):
# computes ColorArrays for given levels
# try _color_lev as colormap, except as everything else
try:
f_color_levs = get_colormap(self._color_lev)
except:
colors = ColorArray(self._color_lev).rgba
else:
lev = _normalize(self._levels, self._clim[0], self._clim[1])
# map function expects (Nlev,1)!
colors = f_color_levs.map(lev[:, np.newaxis])
# broadcast to (nlev, 4) array
if len(colors) == 1:
colors = colors * np.ones((len(self._levels), 1))
# detect color_lev/levels mismatch and raise error
if (len(colors) != len(self._levels)):
raise TypeError("Color/level mismatch. Color must be of shape "
"(Nlev, ...) and provide one color per level")
self._lc = colors
def _prepare_draw(self, view):
if (self._data is None or self._levels is None or
self._color_lev is None or self._data_is_uniform):
return False
if self._need_level_update:
self._levels_to_colors()
self._need_level_update = False
if self._need_recompute:
self._compute_iso_line()
self._compute_iso_color()
LineVisual.set_data(self, pos=self._verts, connect=self._connect,
color=self._cl)
self._need_recompute = False
if self._need_color_update:
self._compute_iso_color()
LineVisual.set_data(self, color=self._cl)
self._need_color_update = False
return LineVisual._prepare_draw(self, view)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.