repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
chandler14362/panda3d | direct/src/leveleditor/CurveAnimUI.py | 13 | 7082 | """
This is the GUI for the Curve Animation
"""
import wx
from direct.interval.IntervalGlobal import *
from direct.actor.Actor import *
from . import ObjectGlobals as OG
class CurveAnimUI(wx.Dialog):
"""
This is the Curve Animation Panel implementation.
"""
def __init__(self, parent, editor):
wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title="Curve Animation",
pos=wx.DefaultPosition, size=(430, 140))
self.editor = editor
self.nodePath = None
self.curve = None
self.mainPanel = wx.Panel(self, -1)
self.chooseNode = wx.StaticText( self.mainPanel, -1, "Choose NodePath:")
self.chooseNodeTxt = wx.TextCtrl( self.mainPanel, -1, "")
self.chooseNodeButton = wx.Button( self.mainPanel, -1, "Choose..")
self.chooseCurve = wx.StaticText( self.mainPanel, -1, "Choose attch Curve:")
self.chooseCurveTxt = wx.TextCtrl( self.mainPanel, -1, "")
self.chooseCurveButton = wx.Button( self.mainPanel, -1, "Choose..")
self.duritionTime = wx.StaticText( self.mainPanel, -1, "Durition(Frame):")
self.duritionTimeSpin = wx.SpinCtrl( self.mainPanel, -1, "",size = (70,25), min=24, max=10000)
self.createAnimButton = wx.Button( self.mainPanel, -1, "Creat")
self.saveAnimButton = wx.Button( self.mainPanel, -1, "Save Animation")
self.SetProperties()
self.DoLayout()
self.Bind(wx.EVT_BUTTON, self.OnChooseNode, self.chooseNodeButton)
self.Bind(wx.EVT_BUTTON, self.OnChooseCurve, self.chooseCurveButton)
self.Bind(wx.EVT_BUTTON, self.OnCreateAnim, self.createAnimButton)
self.Bind(wx.EVT_BUTTON, self.OnSaveAnim, self.saveAnimButton)
self.Bind(wx.EVT_CLOSE, self.OnExit)
def SetProperties(self):
self.duritionTimeSpin.SetValue(24)
self.chooseNodeTxt.SetMinSize((200,21))
self.chooseCurveTxt.SetMinSize((200,21))
self.saveAnimButton.SetToolTipString("Save the animation to the global animation control")
def DoLayout(self):
dialogSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer = wx.FlexGridSizer(4, 3, 0, 0)
mainSizer.Add(self.chooseNode, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 10)
mainSizer.Add(self.chooseNodeTxt, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
mainSizer.Add(self.chooseNodeButton, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
mainSizer.Add(self.chooseCurve, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 10)
mainSizer.Add(self.chooseCurveTxt, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
mainSizer.Add(self.chooseCurveButton, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
mainSizer.Add(self.duritionTime, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 10)
mainSizer.Add(self.duritionTimeSpin, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
mainSizer.Add(self.createAnimButton, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
mainSizer.Add(self.saveAnimButton, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, 5)
self.mainPanel.SetSizerAndFit(mainSizer)
dialogSizer.Add(self.mainPanel, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.SetSizer(dialogSizer)
self.Layout()
def OnChooseNode(self, evt):
if base.direct.selected.last == None or base.direct.selected.last.hasTag('Controller') or not base.direct.selected.last.hasTag('OBJRoot'):
dlg = wx.MessageDialog(None, 'Please select an object.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
else:
obj = self.editor.objectMgr.findObjectByNodePath(base.direct.selected.last)
if obj[OG.OBJ_DEF].name == '__Curve__':
dlg = wx.MessageDialog(None, 'Please select an object, not a curve.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
else:
self.nodePath = obj
self.chooseNodeTxt.SetValue(str(self.nodePath[OG.OBJ_UID]))
def OnChooseCurve(self, evt):
if base.direct.selected.last == None or base.direct.selected.last.hasTag('Controller') or not base.direct.selected.last.hasTag('OBJRoot'):
dlg = wx.MessageDialog(None, 'Please select a curve.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
else:
obj = self.editor.objectMgr.findObjectByNodePath(base.direct.selected.last)
if obj[OG.OBJ_DEF].name != '__Curve__':
dlg = wx.MessageDialog(None, 'Please select a curve, not an object.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
elif obj[OG.OBJ_DEF].name == '__Curve__':
self.curve = obj
self.chooseCurveTxt.SetValue(str(self.curve[OG.OBJ_UID]))
def OnCreateAnim(self, evt):
self.time = self.duritionTimeSpin.GetValue()
if self.nodePath == None or self.curve == None:
dlg = wx.MessageDialog(None, 'Please select an object and a curve first.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
else:
self.curveSequence = self.editor.animMgr.singleCurveAnimation(self.nodePath, self.curve, self.time)
self.curveSequence.start()
def OnSaveAnim(self,evt):
if not self.curveSequence:
dlg = wx.MessageDialog(None, 'Please create a animation first.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
else:
if self.editor.animMgr.curveAnimation == {}:
self.editor.animMgr.curveAnimation[(self.nodePath[OG.OBJ_UID],self.curve[OG.OBJ_UID])] = (self.nodePath[OG.OBJ_UID],self.curve[OG.OBJ_UID],self.time)
self.editor.updateStatusReadout('Sucessfully saved to global animation list')
return
hasKey = False
for key in self.editor.animMgr.curveAnimation:
if key == (self.nodePath[OG.OBJ_UID], self.curve[OG.OBJ_UID]):
dlg = wx.MessageDialog(None, 'Already have the animation for this object attach to this curve.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
hasKey = True
return
elif self.nodePath[OG.OBJ_UID] == key[0]:
dlg = wx.MessageDialog(None, 'This object is already attached to a curve.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
hasKey = True
return
if hasKey == False and self.editor.animMgr.curveAnimation != {}:
self.editor.animMgr.curveAnimation[(self.nodePath[OG.OBJ_UID],self.curve[OG.OBJ_UID])] = (self.nodePath[OG.OBJ_UID],self.curve[OG.OBJ_UID],self.time)
self.editor.updateStatusReadout('Sucessfully saved to global animation list')
def OnExit(self,evt):
self.Destroy()
self.editor.ui.curveAnimMenuItem.Check(False)
| bsd-3-clause |
vipjml/python-driver | tests/integration/cqlengine/query/test_queryset.py | 4 | 54820 | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from datetime import datetime
import time
from uuid import uuid1, uuid4
import uuid
from cassandra.cluster import Session
from cassandra import InvalidRequest
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine.connection import NOT_SET
import mock
from cassandra.cqlengine import functions
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
from cassandra.cqlengine import query
from cassandra.cqlengine.query import QueryException, BatchQuery
from datetime import timedelta
from datetime import tzinfo
from cassandra.cqlengine import statements
from cassandra.cqlengine import operators
from cassandra.util import uuid_from_time
from cassandra.cqlengine.connection import get_session
from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21
from tests.integration.cqlengine import execute_count
class TzOffset(tzinfo):
"""Minimal implementation of a timezone offset to help testing with timezone
aware datetimes.
"""
def __init__(self, offset):
self._offset = timedelta(hours=offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return 'TzOffset: {}'.format(self._offset.hours)
def dst(self, dt):
return timedelta(0)
class TestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(primary_key=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer()
class IndexedTestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(index=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer(index=True)
class IndexedCollectionsTestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(index=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer(index=True)
test_list = columns.List(columns.Integer, index=True)
test_set = columns.Set(columns.Integer, index=True)
test_map = columns.Map(columns.Text, columns.Integer, index=True)
test_list_no_index = columns.List(columns.Integer, index=False)
test_set_no_index = columns.Set(columns.Integer, index=False)
test_map_no_index = columns.Map(columns.Text, columns.Integer, index=False)
class TestMultiClusteringModel(Model):
one = columns.Integer(primary_key=True)
two = columns.Integer(primary_key=True)
three = columns.Integer(primary_key=True)
class TestQuerySetOperation(BaseCassEngTestCase):
def test_query_filter_parsing(self):
"""
Tests the queryset filter method parses it's kwargs properly
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_query_expression_parsing(self):
""" Tests that query experessions are evaluated properly """
query1 = TestModel.filter(TestModel.test_id == 5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(TestModel.expected_result >= 1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_using_invalid_column_names_in_filter_kwargs_raises_error(self):
"""
Tests that using invalid or nonexistant column names for filter args raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(nonsense=5)
def test_using_nonexistant_column_names_in_query_args_raises_error(self):
"""
Tests that using invalid or nonexistant columns for query args raises an error
"""
with self.assertRaises(AttributeError):
TestModel.objects(TestModel.nonsense == 5)
def test_using_non_query_operators_in_query_args_raises_error(self):
"""
Tests that providing query args that are not query operator instances raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(5)
def test_queryset_is_immutable(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
assert len(query1._where) == 1
def test_queryset_limit_immutability(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset with same limit
"""
query1 = TestModel.objects(test_id=5).limit(1)
assert query1._limit == 1
query2 = query1.filter(expected_result__gte=1)
assert query2._limit == 1
query3 = query1.filter(expected_result__gte=1).limit(2)
assert query1._limit == 1
assert query3._limit == 2
def test_the_all_method_duplicates_queryset(self):
"""
Tests that calling all on a queryset with previously defined filters duplicates queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
query3 = query2.all()
assert query3 == query2
def test_queryset_with_distinct(self):
"""
Tests that calling distinct on a queryset w/without parameter are evaluated properly.
"""
query1 = TestModel.objects.distinct()
self.assertEqual(len(query1._distinct_fields), 1)
query2 = TestModel.objects.distinct(['test_id'])
self.assertEqual(len(query2._distinct_fields), 1)
query3 = TestModel.objects.distinct(['test_id', 'attempt_id'])
self.assertEqual(len(query3._distinct_fields), 2)
def test_defining_only_fields(self):
"""
Tests defining only fields
@since 3.5
@jira_ticket PYTHON-560
@expected_result deferred fields should not be returned
@test_category object_mapper
"""
# simple only definition
q = TestModel.objects.only(['attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['attempt_id', 'description'])
with self.assertRaises(query.QueryException):
TestModel.objects.only(['nonexistent_field'])
# Cannot define more than once only fields
with self.assertRaises(query.QueryException):
TestModel.objects.only(['description']).only(['attempt_id'])
# only with defer fields
q = TestModel.objects.only(['attempt_id', 'description'])
q = q.defer(['description'])
self.assertEqual(q._select_fields(), ['attempt_id'])
# Eliminate all results confirm exception is thrown
q = TestModel.objects.only(['description'])
q = q.defer(['description'])
with self.assertRaises(query.QueryException):
q._select_fields()
q = TestModel.objects.filter(test_id=0).only(['test_id', 'attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['attempt_id', 'description'])
# no fields to select
with self.assertRaises(query.QueryException):
q = TestModel.objects.only(['test_id']).defer(['test_id'])
q._select_fields()
with self.assertRaises(query.QueryException):
q = TestModel.objects.filter(test_id=0).only(['test_id'])
q._select_fields()
def test_defining_defer_fields(self):
"""
Tests defining defer fields
@since 3.5
@jira_ticket PYTHON-560
@jira_ticket PYTHON-599
@expected_result deferred fields should not be returned
@test_category object_mapper
"""
# simple defer definition
q = TestModel.objects.defer(['attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['test_id', 'expected_result', 'test_result'])
with self.assertRaises(query.QueryException):
TestModel.objects.defer(['nonexistent_field'])
# defer more than one
q = TestModel.objects.defer(['attempt_id', 'description'])
q = q.defer(['expected_result'])
self.assertEqual(q._select_fields(), ['test_id', 'test_result'])
# defer with only
q = TestModel.objects.defer(['description', 'attempt_id'])
q = q.only(['description', 'test_id'])
self.assertEqual(q._select_fields(), ['test_id'])
# Eliminate all results confirm exception is thrown
q = TestModel.objects.defer(['description', 'attempt_id'])
q = q.only(['description'])
with self.assertRaises(query.QueryException):
q._select_fields()
# implicit defer
q = TestModel.objects.filter(test_id=0)
self.assertEqual(q._select_fields(), ['attempt_id', 'description', 'expected_result', 'test_result'])
# when all fields are defered, it fallbacks select the partition keys
q = TestModel.objects.defer(['test_id', 'attempt_id', 'description', 'expected_result', 'test_result'])
self.assertEqual(q._select_fields(), ['test_id'])
class BaseQuerySetUsage(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BaseQuerySetUsage, cls).setUpClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
sync_table(TestModel)
sync_table(IndexedTestModel)
sync_table(TestMultiClusteringModel)
TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=2, description='try3', expected_result=15, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=3, description='try4', expected_result=20, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=0, description='try5', expected_result=5, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=1, description='try6', expected_result=10, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=2, description='try7', expected_result=15, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=3, description='try8', expected_result=20, test_result=20)
TestModel.objects.create(test_id=2, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=2, attempt_id=3, description='try12', expected_result=75, test_result=45)
IndexedTestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
IndexedTestModel.objects.create(test_id=1, attempt_id=1, description='try2', expected_result=10, test_result=30)
IndexedTestModel.objects.create(test_id=2, attempt_id=2, description='try3', expected_result=15, test_result=30)
IndexedTestModel.objects.create(test_id=3, attempt_id=3, description='try4', expected_result=20, test_result=25)
IndexedTestModel.objects.create(test_id=4, attempt_id=0, description='try5', expected_result=5, test_result=25)
IndexedTestModel.objects.create(test_id=5, attempt_id=1, description='try6', expected_result=10, test_result=25)
IndexedTestModel.objects.create(test_id=6, attempt_id=2, description='try7', expected_result=15, test_result=25)
IndexedTestModel.objects.create(test_id=7, attempt_id=3, description='try8', expected_result=20, test_result=20)
IndexedTestModel.objects.create(test_id=8, attempt_id=0, description='try9', expected_result=50, test_result=40)
IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60,
test_result=40)
IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70,
test_result=45)
IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75,
test_result=45)
if(CASSANDRA_VERSION >= '2.1'):
drop_table(IndexedCollectionsTestModel)
sync_table(IndexedCollectionsTestModel)
IndexedCollectionsTestModel.objects.create(test_id=12, attempt_id=3, description='list12', expected_result=75,
test_result=45, test_list=[1, 2, 42], test_set=set([1, 2, 3]),
test_map={'1': 1, '2': 2, '3': 3})
IndexedCollectionsTestModel.objects.create(test_id=13, attempt_id=3, description='list13', expected_result=75,
test_result=45, test_list=[3, 4, 5], test_set=set([4, 5, 42]),
test_map={'1': 5, '2': 6, '3': 7})
IndexedCollectionsTestModel.objects.create(test_id=14, attempt_id=3, description='list14', expected_result=75,
test_result=45, test_list=[1, 2, 3], test_set=set([1, 2, 3]),
test_map={'1': 1, '2': 2, '3': 42})
IndexedCollectionsTestModel.objects.create(test_id=15, attempt_id=4, description='list14', expected_result=75,
test_result=45, test_list_no_index=[1, 2, 3], test_set_no_index=set([1, 2, 3]),
test_map_no_index={'1': 1, '2': 2, '3': 42})
@classmethod
def tearDownClass(cls):
super(BaseQuerySetUsage, cls).tearDownClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
drop_table(TestMultiClusteringModel)
class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage):
@execute_count(2)
def test_count(self):
""" Tests that adding filtering statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(test_id=0)
assert q.count() == 4
@execute_count(2)
def test_query_expression_count(self):
""" Tests that adding query statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(TestModel.test_id == 0)
assert q.count() == 4
@execute_count(3)
def test_iteration(self):
""" Tests that iterating over a query set pulls back all of the expected results """
q = TestModel.objects(test_id=0)
# tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with regular filtering
q = TestModel.objects(attempt_id=3).allow_filtering()
assert len(q) == 3
# tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with query method
q = TestModel.objects(TestModel.attempt_id == 3).allow_filtering()
assert len(q) == 3
# tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
@execute_count(2)
def test_multiple_iterations_work_properly(self):
""" Tests that iterating over a query set more than once works """
# test with both the filtering method and the query method
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
# tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# try it again
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
@execute_count(2)
def test_multiple_iterators_are_isolated(self):
"""
tests that the use of one iterator does not affect the behavior of another
"""
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
q = q.order_by('attempt_id')
expected_order = [0, 1, 2, 3]
iter1 = iter(q)
iter2 = iter(q)
for attempt_id in expected_order:
assert next(iter1).attempt_id == attempt_id
assert next(iter2).attempt_id == attempt_id
@execute_count(3)
def test_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.objects.get(test_id=0, attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0, attempt_id=0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0)
m = q.get(attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
@execute_count(3)
def test_query_expression_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.get(TestModel.test_id == 0, TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0, TestModel.attempt_id == 0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0)
m = q.get(TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
@execute_count(1)
def test_get_doesnotexist_exception(self):
"""
Tests that get calls that don't return a result raises a DoesNotExist error
"""
with self.assertRaises(TestModel.DoesNotExist):
TestModel.objects.get(test_id=100)
@execute_count(1)
def test_get_multipleobjects_exception(self):
"""
Tests that get calls that return multiple results raise a MultipleObjectsReturned error
"""
with self.assertRaises(TestModel.MultipleObjectsReturned):
TestModel.objects.get(test_id=1)
def test_allow_filtering_flag(self):
"""
"""
@execute_count(4)
def test_non_quality_filtering():
class NonEqualityFilteringModel(Model):
example_id = columns.UUID(primary_key=True, default=uuid.uuid4)
sequence_id = columns.Integer(primary_key=True) # sequence_id is a clustering key
example_type = columns.Integer(index=True)
created_at = columns.DateTime()
drop_table(NonEqualityFilteringModel)
sync_table(NonEqualityFilteringModel)
# setup table, etc.
NonEqualityFilteringModel.create(sequence_id=1, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=3, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=5, example_type=1, created_at=datetime.now())
qa = NonEqualityFilteringModel.objects(NonEqualityFilteringModel.sequence_id > 3).allow_filtering()
num = qa.count()
assert num == 1, num
class TestQuerySetDistinct(BaseQuerySetUsage):
@execute_count(1)
def test_distinct_without_parameter(self):
q = TestModel.objects.distinct()
self.assertEqual(len(q), 3)
@execute_count(1)
def test_distinct_with_parameter(self):
q = TestModel.objects.distinct(['test_id'])
self.assertEqual(len(q), 3)
@execute_count(1)
def test_distinct_with_filter(self):
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2])
self.assertEqual(len(q), 2)
@execute_count(1)
def test_distinct_with_non_partition(self):
with self.assertRaises(InvalidRequest):
q = TestModel.objects.distinct(['description']).filter(test_id__in=[1, 2])
len(q)
@execute_count(1)
def test_zero_result(self):
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[52])
self.assertEqual(len(q), 0)
@greaterthancass21
@execute_count(2)
def test_distinct_with_explicit_count(self):
q = TestModel.objects.distinct(['test_id'])
self.assertEqual(q.count(), 3)
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2])
self.assertEqual(q.count(), 2)
class TestQuerySetOrdering(BaseQuerySetUsage):
@execute_count(2)
def test_order_by_success_case(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
q = q.order_by('-attempt_id')
expected_order.reverse()
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
def test_ordering_by_non_second_primary_keys_fail(self):
# kwarg filtering
with self.assertRaises(query.QueryException):
TestModel.objects(test_id=0).order_by('test_id')
# kwarg filtering
with self.assertRaises(query.QueryException):
TestModel.objects(TestModel.test_id == 0).order_by('test_id')
def test_ordering_by_non_primary_keys_fails(self):
with self.assertRaises(query.QueryException):
TestModel.objects(test_id=0).order_by('description')
def test_ordering_on_indexed_columns_fails(self):
with self.assertRaises(query.QueryException):
IndexedTestModel.objects(test_id=0).order_by('attempt_id')
@execute_count(8)
def test_ordering_on_multiple_clustering_columns(self):
TestMultiClusteringModel.create(one=1, two=1, three=4)
TestMultiClusteringModel.create(one=1, two=1, three=2)
TestMultiClusteringModel.create(one=1, two=1, three=5)
TestMultiClusteringModel.create(one=1, two=1, three=1)
TestMultiClusteringModel.create(one=1, two=1, three=3)
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('-two', '-three')
assert [r.three for r in results] == [5, 4, 3, 2, 1]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two', 'three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two').order_by('three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
class TestQuerySetSlicing(BaseQuerySetUsage):
@execute_count(1)
def test_out_of_range_index_raises_error(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
with self.assertRaises(IndexError):
q[10]
@execute_count(1)
def test_array_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for i in range(len(q)):
assert q[i].attempt_id == expected_order[i]
@execute_count(1)
def test_negative_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
assert q[-1].attempt_id == expected_order[-1]
assert q[-2].attempt_id == expected_order[-2]
@execute_count(1)
def test_slicing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[1:3], expected_order[1:3]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[0:3:2], expected_order[0:3:2]):
self.assertEqual(model.attempt_id, expect)
@execute_count(1)
def test_negative_slicing(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[-3:], expected_order[-3:]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[:-1], expected_order[:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[1:-1], expected_order[1:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[-3:-1], expected_order[-3:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[-3:-1:2], expected_order[-3:-1:2]):
self.assertEqual(model.attempt_id, expect)
class TestQuerySetValidation(BaseQuerySetUsage):
def test_primary_key_or_index_must_be_specified(self):
"""
Tests that queries that don't have an equals relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_result=25)
list([i for i in q])
def test_primary_key_or_index_must_have_equal_relation_filter(self):
"""
Tests that queries that don't have non equal (>,<, etc) relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id__gt=0)
list([i for i in q])
@greaterthancass20
@execute_count(7)
def test_indexed_field_can_be_queried(self):
"""
Tests that queries on an indexed field will work without any primary key relations specified
"""
q = IndexedTestModel.objects(test_result=25)
self.assertEqual(q.count(), 4)
q = IndexedCollectionsTestModel.objects.filter(test_list__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_list__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.objects.filter(test_set__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_set__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.objects.filter(test_map__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_map__contains=13)
self.assertEqual(q.count(), 0)
class TestQuerySetDelete(BaseQuerySetUsage):
@execute_count(9)
def test_delete(self):
TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45)
assert TestModel.objects.count() == 16
assert TestModel.objects(test_id=3).count() == 4
TestModel.objects(test_id=3).delete()
assert TestModel.objects.count() == 12
assert TestModel.objects(test_id=3).count() == 0
def test_delete_without_partition_key(self):
""" Tests that attempting to delete a model without defining a partition key fails """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
def test_delete_without_any_where_args(self):
""" Tests that attempting to delete a whole table without any arguments will fail """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
@unittest.skipIf(CASSANDRA_VERSION < '3.0', "range deletion was introduce in C* 3.0, currently running {0}".format(CASSANDRA_VERSION))
@execute_count(18)
def test_range_deletion(self):
"""
Tests that range deletion work as expected
"""
for i in range(10):
TestMultiClusteringModel.objects().create(one=1, two=i, three=i)
TestMultiClusteringModel.objects(one=1, two__gte=0, two__lte=3).delete()
self.assertEqual(6, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one=1, two__gt=3, two__lt=5).delete()
self.assertEqual(5, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one=1, two__in=[8, 9]).delete()
self.assertEqual(3, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one__in=[1], two__gte=0).delete()
self.assertEqual(0, len(TestMultiClusteringModel.objects.all()))
class TimeUUIDQueryModel(Model):
partition = columns.UUID(primary_key=True)
time = columns.TimeUUID(primary_key=True)
data = columns.Text(required=False)
class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).setUpClass()
sync_table(TimeUUIDQueryModel)
@classmethod
def tearDownClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass()
drop_table(TimeUUIDQueryModel)
@execute_count(7)
def test_tzaware_datetime_support(self):
"""Test that using timezone aware datetime instances works with the
MinTimeUUID/MaxTimeUUID functions.
"""
pk = uuid4()
midpoint_utc = datetime.utcnow().replace(tzinfo=TzOffset(0))
midpoint_helsinki = midpoint_utc.astimezone(TzOffset(3))
# Assert pre-condition that we have the same logical point in time
assert midpoint_utc.utctimetuple() == midpoint_helsinki.utctimetuple()
assert midpoint_utc.timetuple() != midpoint_helsinki.timetuple()
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc - timedelta(minutes=1)),
data='1')
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc),
data='2')
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc + timedelta(minutes=1)),
data='3')
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_utc))]
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_helsinki))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_utc))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_helsinki))]
@execute_count(8)
def test_success_case(self):
""" Test that the min and max time uuid functions work as expected """
pk = uuid4()
startpoint = datetime.utcnow()
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=1)), data='1')
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=2)), data='2')
midpoint = startpoint + timedelta(seconds=3)
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=4)), data='3')
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=5)), data='4')
# test kwarg filtering
q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint))
q = [d for d in q]
self.assertEqual(len(q), 2, msg="Got: %s" % q)
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(partition=pk, time__gte=functions.MinTimeUUID(midpoint))
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
# test query expression filtering
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint)
)
q = [d for d in q]
assert len(q) == 2
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint)
)
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
class TestInOperator(BaseQuerySetUsage):
@execute_count(1)
def test_kwarg_success_case(self):
""" Tests the in operator works with the kwarg query method """
q = TestModel.filter(test_id__in=[0, 1])
assert q.count() == 8
@execute_count(1)
def test_query_expression_success_case(self):
""" Tests the in operator works with the query expression query method """
q = TestModel.filter(TestModel.test_id.in_([0, 1]))
assert q.count() == 8
@execute_count(5)
def test_bool(self):
"""
Adding coverage to cqlengine for bool types.
@since 3.6
@jira_ticket PYTHON-596
@expected_result bool results should be filtered appropriately
@test_category object_mapper
"""
class bool_model(Model):
k = columns.Integer(primary_key=True)
b = columns.Boolean(primary_key=True)
v = columns.Integer(default=3)
sync_table(bool_model)
bool_model.create(k=0, b=True)
bool_model.create(k=0, b=False)
self.assertEqual(len(bool_model.objects.all()), 2)
self.assertEqual(len(bool_model.objects.filter(k=0, b=True)), 1)
self.assertEqual(len(bool_model.objects.filter(k=0, b=False)), 1)
@execute_count(3)
def test_bool_filter(self):
"""
Test to ensure that we don't translate boolean objects to String unnecessarily in filter clauses
@since 3.6
@jira_ticket PYTHON-596
@expected_result We should not receive a server error
@test_category object_mapper
"""
class bool_model2(Model):
k = columns.Boolean(primary_key=True)
b = columns.Integer(primary_key=True)
v = columns.Text()
drop_table(bool_model2)
sync_table(bool_model2)
bool_model2.create(k=True, b=1, v='a')
bool_model2.create(k=False, b=1, v='b')
self.assertEqual(len(list(bool_model2.objects(k__in=(True, False)))), 2)
@greaterthancass20
class TestContainsOperator(BaseQuerySetUsage):
@execute_count(6)
def test_kwarg_success_case(self):
""" Tests the CONTAINS operator works with the kwarg query method """
q = IndexedCollectionsTestModel.filter(test_list__contains=1)
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(test_list__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(test_set__contains=3)
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(test_set__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(test_map__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.filter(test_map__contains=13)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_list_no_index__contains=1)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_set_no_index__contains=1)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_map_no_index__contains=1)
self.assertEqual(q.count(), 0)
@execute_count(6)
def test_query_expression_success_case(self):
""" Tests the CONTAINS operator works with the query expression query method """
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(1))
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(13))
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_set.contains_(3))
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_set.contains_(13))
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map.contains_(42))
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map.contains_(13))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
class TestValuesList(BaseQuerySetUsage):
@execute_count(2)
def test_values_list(self):
q = TestModel.objects.filter(test_id=0, attempt_id=1)
item = q.values_list('test_id', 'attempt_id', 'description', 'expected_result', 'test_result').first()
assert item == [0, 1, 'try2', 10, 30]
item = q.values_list('expected_result', flat=True).first()
assert item == 10
class TestObjectsProperty(BaseQuerySetUsage):
@execute_count(1)
def test_objects_property_returns_fresh_queryset(self):
assert TestModel.objects._result_cache is None
len(TestModel.objects) # evaluate queryset
assert TestModel.objects._result_cache is None
class PageQueryTests(BaseCassEngTestCase):
@execute_count(3)
def test_paged_result_handling(self):
if PROTOCOL_VERSION < 2:
raise unittest.SkipTest("Paging requires native protocol 2+, currently using: {0}".format(PROTOCOL_VERSION))
# addresses #225
class PagingTest(Model):
id = columns.Integer(primary_key=True)
val = columns.Integer()
sync_table(PagingTest)
PagingTest.create(id=1, val=1)
PagingTest.create(id=2, val=2)
session = get_session()
with mock.patch.object(session, 'default_fetch_size', 1):
results = PagingTest.objects()[:]
assert len(results) == 2
class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage):
def test_default_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects())
self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects().timeout(0.5))
self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects().timeout(None))
self.assertEqual(mock_execute.call_args[-1]['timeout'], None)
class DMLQueryTimeoutTestCase(BaseQuerySetUsage):
def setUp(self):
self.model = TestModel(test_id=1, attempt_id=1, description='timeout test')
super(DMLQueryTimeoutTestCase, self).setUp()
def test_default_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.timeout(0.5).save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.timeout(None).save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], None)
def test_timeout_then_batch(self):
b = query.BatchQuery()
m = self.model.timeout(None)
with self.assertRaises(AssertionError):
m.batch(b)
def test_batch_then_timeout(self):
b = query.BatchQuery()
m = self.model.batch(b)
with self.assertRaises(AssertionError):
m.timeout(0.5)
class DBFieldModel(Model):
k0 = columns.Integer(partition_key=True, db_field='a')
k1 = columns.Integer(partition_key=True, db_field='b')
c0 = columns.Integer(primary_key=True, db_field='c')
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(db_field='e', index=True)
class DBFieldModelMixed1(Model):
k0 = columns.Integer(partition_key=True, db_field='a')
k1 = columns.Integer(partition_key=True)
c0 = columns.Integer(primary_key=True, db_field='c')
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(index=True)
class DBFieldModelMixed2(Model):
k0 = columns.Integer(partition_key=True)
k1 = columns.Integer(partition_key=True, db_field='b')
c0 = columns.Integer(primary_key=True)
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(index=True, db_field='e')
class TestModelQueryWithDBField(BaseCassEngTestCase):
def setUp(cls):
super(TestModelQueryWithDBField, cls).setUpClass()
cls.model_list = [DBFieldModel, DBFieldModelMixed1, DBFieldModelMixed2]
for model in cls.model_list:
sync_table(model)
def tearDown(cls):
super(TestModelQueryWithDBField, cls).tearDownClass()
for model in cls.model_list:
drop_table(model)
@execute_count(33)
def test_basic_crud(self):
"""
Tests creation update and delete of object model queries that are using db_field mappings.
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 2, 'c0': 3, 'v0': 4, 'v1': 5}
# create
i = model.create(**values)
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
# create
values['v0'] = 101
i.update(v0=values['v0'])
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
# delete
model.objects(k0=i.k0, k1=i.k1).delete()
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertIsNone(i)
i = model.create(**values)
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
i.delete()
model.objects(k0=i.k0, k1=i.k1).delete()
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertIsNone(i)
@execute_count(21)
def test_slice(self):
"""
Tests slice queries for object models that are using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 3, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0=i.c0).count(), 1)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__lt=i.c0).count(), len(clustering_values[:-1]))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__gt=0).count(), len(clustering_values[1:]))
@execute_count(15)
def test_order(self):
"""
Tests order by queries for object models that are using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 4, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('c0').first().c0, clustering_values[0])
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('-c0').first().c0, clustering_values[-1])
@execute_count(15)
def test_index(self):
"""
Tests queries using index fields for object models using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 5, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
values['v1'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1)
@execute_count(1)
def test_db_field_names_used(self):
"""
Tests to ensure that with generated cql update statements correctly utilize the db_field values.
@since 3.2
@jira_ticket PYTHON-530
@expected_result resulting cql_statements will use the db_field values
@test_category object_mapper
"""
values = ('k0', 'k1', 'c0', 'v0', 'v1')
# Test QuerySet Path
b = BatchQuery()
DBFieldModel.objects(k0=1).batch(b).update(
v0=0,
v1=9,
)
for value in values:
self.assertTrue(value not in str(b.queries[0]))
# Test DML path
b2 = BatchQuery()
dml_field_model = DBFieldModel.create(k0=1, k1=5, c0=3, v0=4, v1=5)
dml_field_model.batch(b2).update(
v0=0,
v1=9,
)
for value in values:
self.assertTrue(value not in str(b2.queries[0]))
class TestModelSmall(Model):
test_id = columns.Integer(primary_key=True)
class TestModelQueryWithFetchSize(BaseCassEngTestCase):
"""
Test FetchSize, and ensure that results are returned correctly
regardless of the paging size
@since 3.1
@jira_ticket PYTHON-324
@expected_result results are properly retrieved and the correct size
@test_category object_mapper
"""
@classmethod
def setUpClass(cls):
super(TestModelQueryWithFetchSize, cls).setUpClass()
sync_table(TestModelSmall)
@classmethod
def tearDownClass(cls):
super(TestModelQueryWithFetchSize, cls).tearDownClass()
drop_table(TestModelSmall)
@execute_count(9)
def test_defaultFetchSize(self):
with BatchQuery() as b:
for i in range(5100):
TestModelSmall.batch(b).create(test_id=i)
self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(500)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(4999)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5000)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5001)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5100)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5101)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100)
with self.assertRaises(QueryException):
TestModelSmall.objects.fetch_size(0)
with self.assertRaises(QueryException):
TestModelSmall.objects.fetch_size(-1)
class People(Model):
__table_name__ = "people"
last_name = columns.Text(primary_key=True, partition_key=True)
first_name = columns.Text(primary_key=True)
birthday = columns.DateTime()
class People2(Model):
__table_name__ = "people"
last_name = columns.Text(primary_key=True, partition_key=True)
first_name = columns.Text(primary_key=True)
middle_name = columns.Text()
birthday = columns.DateTime()
class TestModelQueryWithDifferedFeld(BaseCassEngTestCase):
"""
Tests that selects with filter will deffer population of known values until after the results are returned.
I.E. Instead of generating SELECT * FROM People WHERE last_name="Smith" It will generate
SELECT first_name, birthday FROM People WHERE last_name="Smith"
Where last_name 'smith' will populated post query
@since 3.2
@jira_ticket PYTHON-520
@expected_result only needed fields are included in the query
@test_category object_mapper
"""
@classmethod
def setUpClass(cls):
super(TestModelQueryWithDifferedFeld, cls).setUpClass()
sync_table(People)
@classmethod
def tearDownClass(cls):
super(TestModelQueryWithDifferedFeld, cls).tearDownClass()
drop_table(People)
@execute_count(8)
def test_defaultFetchSize(self):
# Populate Table
People.objects.create(last_name="Smith", first_name="John", birthday=datetime.now())
People.objects.create(last_name="Bestwater", first_name="Alan", birthday=datetime.now())
People.objects.create(last_name="Smith", first_name="Greg", birthday=datetime.now())
People.objects.create(last_name="Smith", first_name="Adam", birthday=datetime.now())
# Check query constructions
expected_fields = ['first_name', 'birthday']
self.assertEqual(People.filter(last_name="Smith")._select_fields(), expected_fields)
# Validate correct fields are fetched
smiths = list(People.filter(last_name="Smith"))
self.assertEqual(len(smiths), 3)
self.assertTrue(smiths[0].last_name is not None)
# Modify table with new value
sync_table(People2)
# populate new format
People2.objects.create(last_name="Smith", first_name="Chris", middle_name="Raymond", birthday=datetime.now())
People2.objects.create(last_name="Smith", first_name="Andrew", middle_name="Micheal", birthday=datetime.now())
# validate query construction
expected_fields = ['first_name', 'middle_name', 'birthday']
self.assertEqual(People2.filter(last_name="Smith")._select_fields(), expected_fields)
# validate correct items are returneds
smiths = list(People2.filter(last_name="Smith"))
self.assertEqual(len(smiths), 5)
self.assertTrue(smiths[0].last_name is not None)
| apache-2.0 |
lmazuel/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/partition_health_state.py | 1 | 1552 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .entity_health_state import EntityHealthState
class PartitionHealthState(EntityHealthState):
"""Represents the health state of a partition, which contains the partition
identifier and its aggregated health state.
:param aggregated_health_state: The health state of a Service Fabric
entity such as Cluster, Node, Application, Service, Partition, Replica
etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error',
'Unknown'
:type aggregated_health_state: str or
~azure.servicefabric.models.HealthState
:param partition_id: Id of the partition whose health state is described
by this object.
:type partition_id: str
"""
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
}
def __init__(self, aggregated_health_state=None, partition_id=None):
super(PartitionHealthState, self).__init__(aggregated_health_state=aggregated_health_state)
self.partition_id = partition_id
| mit |
Azure/azure-sdk-for-python | sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2015_07_01/operations/_global_administrator_operations.py | 1 | 3625 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GlobalAdministratorOperations(object):
"""GlobalAdministratorOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.authorization.v2015_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def elevate_access(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Elevates access for a Global Administrator.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
# Construct URL
url = self.elevate_access.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
elevate_access.metadata = {'url': '/providers/Microsoft.Authorization/elevateAccess'} # type: ignore
| mit |
justintweaver/mtchi-cert-game | makahiki/apps/managers/log_mgr/admin.py | 9 | 1266 | """log model admin."""
from django.contrib import admin
from django.db import models
from django.forms.widgets import TextInput
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.log_mgr.models import MakahikiLog
from apps.admin.admin import challenge_designer_site, challenge_manager_site, developer_site
class MakahikiLogAdmin(admin.ModelAdmin):
"""admin"""
list_display = ('request_url', "remote_user", 'remote_ip', 'request_time',
'request_method', 'response_status')
list_filter = ('response_status', 'remote_user')
search_fields = ('request_url', 'remote_ip')
ordering = ["-request_time"]
date_hierarchy = "request_time"
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size': '100'})},
}
def has_add_permission(self, request):
return False
admin.site.register(MakahikiLog, MakahikiLogAdmin)
challenge_designer_site.register(MakahikiLog, MakahikiLogAdmin)
challenge_manager_site.register(MakahikiLog, MakahikiLogAdmin)
developer_site.register(MakahikiLog, MakahikiLogAdmin)
challenge_mgr.register_admin_challenge_info_model("Status", 1, MakahikiLog, 1)
challenge_mgr.register_developer_challenge_info_model("Status", 4, MakahikiLog, 1)
| gpl-3.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/contrib/comments/admin.py | 361 | 3299 | from django.contrib import admin
from django.contrib.comments.models import Comment
from django.utils.translation import ugettext_lazy as _, ungettext
from django.contrib.comments import get_model
from django.contrib.comments.views.moderation import perform_flag, perform_approve, perform_delete
class CommentsAdmin(admin.ModelAdmin):
fieldsets = (
(None,
{'fields': ('content_type', 'object_pk', 'site')}
),
(_('Content'),
{'fields': ('user', 'user_name', 'user_email', 'user_url', 'comment')}
),
(_('Metadata'),
{'fields': ('submit_date', 'ip_address', 'is_public', 'is_removed')}
),
)
list_display = ('name', 'content_type', 'object_pk', 'ip_address', 'submit_date', 'is_public', 'is_removed')
list_filter = ('submit_date', 'site', 'is_public', 'is_removed')
date_hierarchy = 'submit_date'
ordering = ('-submit_date',)
raw_id_fields = ('user',)
search_fields = ('comment', 'user__username', 'user_name', 'user_email', 'user_url', 'ip_address')
actions = ["flag_comments", "approve_comments", "remove_comments"]
def get_actions(self, request):
actions = super(CommentsAdmin, self).get_actions(request)
# Only superusers should be able to delete the comments from the DB.
if not request.user.is_superuser and 'delete_selected' in actions:
actions.pop('delete_selected')
if not request.user.has_perm('comments.can_moderate'):
if 'approve_comments' in actions:
actions.pop('approve_comments')
if 'remove_comments' in actions:
actions.pop('remove_comments')
return actions
def flag_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_flag,
lambda n: ungettext('flagged', 'flagged', n))
flag_comments.short_description = _("Flag selected comments")
def approve_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_approve,
lambda n: ungettext('approved', 'approved', n))
approve_comments.short_description = _("Approve selected comments")
def remove_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_delete,
lambda n: ungettext('removed', 'removed', n))
remove_comments.short_description = _("Remove selected comments")
def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext(u'1 comment was successfully %(action)s.',
u'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)})
# Only register the default admin if the model is the built-in comment model
# (this won't be true if there's a custom comment app).
if get_model() is Comment:
admin.site.register(Comment, CommentsAdmin)
| bsd-3-clause |
liangazhou/django-rdp | packages/Django-1.8.6/tests/defer/models.py | 112 | 1184 | """
Tests for defer() and only().
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Secondary(models.Model):
first = models.CharField(max_length=50)
second = models.CharField(max_length=50)
@python_2_unicode_compatible
class Primary(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=50)
related = models.ForeignKey(Secondary)
def __str__(self):
return self.name
class Child(Primary):
pass
class BigChild(Primary):
other = models.CharField(max_length=50)
class ChildProxy(Child):
class Meta:
proxy = True
class RefreshPrimaryProxy(Primary):
class Meta:
proxy = True
def refresh_from_db(self, using=None, fields=None, **kwargs):
# Reloads all deferred fields if any of the fields is deferred.
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super(RefreshPrimaryProxy, self).refresh_from_db(using, fields, **kwargs)
| apache-2.0 |
JoeJimFlood/NFLPrediction2014 | week_pre_conf.py | 1 | 4840 | import pandas as pd
import matchup
import xlsxwriter
import xlautofit
import xlrd
import sys
import time
import collections
import os
week_timer = time.time()
week_number = 'conf_matrix'
matchups = collections.OrderedDict()
matchups['Matchups'] = [('NE', 'IND'),
('NE', 'SEA'),
('NE', 'GB'),
('IND', 'SEA'),
('IND', 'GB'),
('SEA', 'GB')]
location = os.getcwd().replace('\\', '/')
output_file = location + '/Weekly Forecasts/Week' + str(week_number) + '.xlsx'
for read_data in range(2):
week_book = xlsxwriter.Workbook(output_file)
header_format = week_book.add_format({'align': 'center', 'bold': True, 'bottom': True})
index_format = week_book.add_format({'align': 'right', 'bold': True})
score_format = week_book.add_format({'num_format': '#0', 'align': 'right'})
percent_format = week_book.add_format({'num_format': '#0%', 'align': 'right'})
if read_data:
colwidths = xlautofit.even_widths_single_index(output_file)
for game_time in matchups:
if read_data:
data_book = xlrd.open_workbook(output_file)
data_sheet = data_book.sheet_by_name(game_time)
sheet = week_book.add_worksheet(game_time)
sheet.write_string(1, 0, 'Chance of Winning', index_format)
sheet.write_string(2, 0, 'Expected Score', index_format)
sheet.write_string(3, 0, '2.5th Percentile Score', index_format)
sheet.write_string(4, 0, '10th Percentile Score', index_format)
sheet.write_string(5, 0, '25th Percentile Score', index_format)
sheet.write_string(6, 0, '50th Percentile Score', index_format)
sheet.write_string(7, 0, '75th Percentile Score', index_format)
sheet.write_string(8, 0, '90th Percentile Score', index_format)
sheet.write_string(9, 0, '97.5th Percentile score', index_format)
sheet.freeze_panes(0, 1)
games = matchups[game_time]
for i in range(len(games)):
home = games[i][0]
away = games[i][1]
homecol = 3 * i + 1
awaycol = 3 * i + 2
sheet.write_string(0, homecol, home, header_format)
sheet.write_string(0, awaycol, away, header_format)
if read_data:
sheet.write_number(1, homecol, data_sheet.cell(1, homecol).value, percent_format)
sheet.write_number(1, awaycol, data_sheet.cell(1, awaycol).value, percent_format)
for rownum in range(2, 10):
sheet.write_number(rownum, homecol, data_sheet.cell(rownum, homecol).value, score_format)
sheet.write_number(rownum, awaycol, data_sheet.cell(rownum, awaycol).value, score_format)
else:
results = matchup.matchup(home, away)
probwin = results['ProbWin']
sheet.write_number(1, homecol, probwin[home], percent_format)
sheet.write_number(1, awaycol, probwin[away], percent_format)
home_dist = results['Scores'][home]
away_dist = results['Scores'][away]
sheet.write_number(2, homecol, home_dist['mean'], score_format)
sheet.write_number(2, awaycol, away_dist['mean'], score_format)
sheet.write_number(3, homecol, home_dist['2.5%'], score_format)
sheet.write_number(3, awaycol, away_dist['2.5%'], score_format)
sheet.write_number(4, homecol, home_dist['10%'], score_format)
sheet.write_number(4, awaycol, away_dist['10%'], score_format)
sheet.write_number(5, homecol, home_dist['25%'], score_format)
sheet.write_number(5, awaycol, away_dist['25%'], score_format)
sheet.write_number(6, homecol, home_dist['50%'], score_format)
sheet.write_number(6, awaycol, away_dist['50%'], score_format)
sheet.write_number(7, homecol, home_dist['75%'], score_format)
sheet.write_number(7, awaycol, away_dist['75%'], score_format)
sheet.write_number(8, homecol, home_dist['90%'], score_format)
sheet.write_number(8, awaycol, away_dist['90%'], score_format)
sheet.write_number(9, homecol, home_dist['97.5%'], score_format)
sheet.write_number(9, awaycol, away_dist['97.5%'], score_format)
if i != len(games) - 1:
sheet.write_string(0, 3 * i + 3, ' ')
if read_data:
for colnum in range(sheet.dim_colmax):
sheet.set_column(colnum, colnum, colwidths[sheet.name][colnum])
week_book.close()
print('Week ' + str(week_number) + ' predictions calculated in ' + str(round((time.time() - week_timer) / 60, 2)) + ' minutes') | mit |
DakRomo/2017Challenges | challenge_4/python/slandau/reverse_tree.py | 5 | 2555 | #!/usr/bin/env python3
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def __repr__(self):
"""
Represent tells the computer how this class should look when printed
:return: The printing of this class depends on the data.
"""
return str(self.data)
def reverse_tree(node: Node):
"""
Reverses a given Binary Tree
:param node: The root of the binary tree
:return: The root of the new (reversed) binary tree
"""
# Create a variable to store the reference to node.left so it is not lost
# When we set it to node.right
temp = node.left
node.left = node.right
node.right = temp
if node.left is not None:
# Go through the tree recursively, swapping all referenes to child nodes
node.right = reverse_tree(node.left)
if node.right is not None:
node.left = reverse_tree(node.right)
return node
def create_nodes():
"""
Constructs a tree of any size.
:return: The head of the newly created tree
"""
head = Node(1)
# We create a queue in order to perform Breadth First Search https://en.wikipedia.org/wiki/Breadth-first_search
# Because we need to deque the head in order to access all other parts of the tree,
# the head is the first object in the queue
q = [head]
i = 1
nodes_to_create = 200 # Change if you want to create a different number of nodes
while i <= nodes_to_create:
node = q[0]
q.remove(node)
i += 1
node.left = Node(i)
q.append(node.left)
i+=1
node.right = Node(i)
q.append(node.right)
return head
if __name__ == '__main__':
root = create_nodes()
reversed_root = reverse_tree(root)
# testing. Only use these if you have a lot of nodes.
print(root.left == reversed_root.right)
print(root.left.left == reversed_root.right.right)
print(root.left.right == reversed_root.right.left)
print(root.left.left.left == reversed_root.right.right.right)
print(root.left.left.right == reversed_root.right.right.left)
print(root.left.right.left == reversed_root.right.left.right)
print(root.left.right.right == reversed_root.right.left.left)
print(root.right.left.left == reversed_root.left.right.right)
print(root.right.left.right == reversed_root.left.right.left)
print(root.right.right.left == reversed_root.left.left.right)
print(root.right.right.right == reversed_root.left.left.left) | mit |
OpusVL/odoo | addons/account_anglo_saxon/stock.py | 8 | 2861 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class stock_move(osv.Model):
_inherit = "stock.move"
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
""" Add a reference to the stock.move in the invoice line
In anglo-saxon the price for COGS should be taken from stock.move
if possible (fallback on standard_price)
"""
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
res.update({
'move_id': move.id,
})
return res
class stock_picking(osv.osv):
_inherit = "stock.picking"
_description = "Picking List"
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
'''Return ids of created invoices for the pickings'''
res = super(stock_picking,self).action_invoice_create(cr, uid, ids, journal_id, group, type, context=context)
if type in ('in_invoice', 'in_refund'):
for inv in self.pool.get('account.invoice').browse(cr, uid, res, context=context):
for ol in inv.invoice_line:
if ol.product_id.type != 'service':
oa = ol.product_id.property_stock_account_input and ol.product_id.property_stock_account_input.id
if not oa:
oa = ol.product_id.categ_id.property_stock_account_input_categ and ol.product_id.categ_id.property_stock_account_input_categ.id
if oa:
fpos = ol.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
self.pool.get('account.invoice.line').write(cr, uid, [ol.id], {'account_id': a})
return res
| agpl-3.0 |
TyRoXx/cdm | original_sources/boost_1_59_0/tools/build/src/tools/doxproc.py | 50 | 36903 | #!/usr/bin/python
# Copyright 2006 Rene Rivera
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
'''
Processing of Doxygen generated XML.
'''
import os
import os.path
import sys
import time
import string
import getopt
import glob
import re
import xml.dom.minidom
def usage():
print '''
Usage:
%s options
Options:
--xmldir Directory with the Doxygen xml result files.
--output Write the output BoostBook to the given location.
--id The ID of the top level BoostBook section.
--title The title of the top level BoostBook section.
--enable-index Generate additional index sections for classes and
types.
''' % ( sys.argv[0] )
def get_args( argv = sys.argv[1:] ):
spec = [
'xmldir=',
'output=',
'id=',
'title=',
'enable-index',
'help' ]
options = {
'--xmldir' : 'xml',
'--output' : None,
'--id' : 'dox',
'--title' : 'Doxygen'
}
( option_pairs, other ) = getopt.getopt( argv, '', spec )
map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs )
if options.has_key( '--help' ):
usage()
sys.exit(1)
return {
'xmldir' : options['--xmldir'],
'output' : options['--output'],
'id' : options['--id'],
'title' : options['--title'],
'index' : options.has_key('--enable-index')
}
def if_attribute(node, attribute, true_value, false_value=None):
if node.getAttribute(attribute) == 'yes':
return true_value
else:
return false_value
class Doxygen2BoostBook:
def __init__( self, **kwargs ):
##
self.args = kwargs
self.args.setdefault('id','')
self.args.setdefault('title','')
self.args.setdefault('last_revision', time.asctime())
self.args.setdefault('index', False)
self.id = '%(id)s.reference' % self.args
self.args['id'] = self.id
#~ This is our template BoostBook document we insert the generated content into.
self.boostbook = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
<section id="%(id)s" name="%(title)s" last-revision="%(last_revision)s">
<title>%(title)s</title>
<library-reference id="%(id)s.headers">
<title>Headers</title>
</library-reference>
<index id="%(id)s.classes">
<title>Classes</title>
</index>
<index id="%(id)s.index">
<title>Index</title>
</index>
</section>
''' % self.args )
self.section = {
'headers' : self._getChild('library-reference',id='%(id)s.headers' % self.args),
'classes' : self._getChild('index',id='%(id)s.classes' % self.args),
'index' : self._getChild('index',id='%(id)s.index' % self.args)
}
#~ Remove the index sections if we aren't generating it.
if not self.args['index']:
self.section['classes'].parentNode.removeChild(self.section['classes'])
self.section['classes'].unlink()
del self.section['classes']
self.section['index'].parentNode.removeChild(self.section['index'])
self.section['index'].unlink()
del self.section['index']
#~ The symbols, per Doxygen notion, that we translated.
self.symbols = {}
#~ Map of Doxygen IDs and BoostBook IDs, so we can translate as needed.
self.idmap = {}
#~ Marks generation, to prevent redoing it.
self.generated = False
#~ Add an Doxygen generated XML document to the content we are translating.
def addDox( self, document ):
self._translateNode(document.documentElement)
#~ Turns the internal XML tree into an output UTF-8 string.
def tostring( self ):
self._generate()
#~ return self.boostbook.toprettyxml(' ')
return self.boostbook.toxml('utf-8')
#~ Does post-processing on the partial generated content to generate additional info
#~ now that we have the complete source documents.
def _generate( self ):
if not self.generated:
self.generated = True
symbols = self.symbols.keys()
symbols.sort()
#~ Populate the header section.
for symbol in symbols:
if self.symbols[symbol]['kind'] in ('header'):
self.section['headers'].appendChild(self.symbols[symbol]['dom'])
for symbol in symbols:
if self.symbols[symbol]['kind'] not in ('namespace', 'header'):
container = self._resolveContainer(self.symbols[symbol],
self.symbols[self.symbols[symbol]['header']]['dom'])
if container.nodeName != 'namespace':
## The current BoostBook to Docbook translation doesn't
## respect, nor assign, IDs to inner types of any kind.
## So nuke the ID entry so as not create bogus links.
del self.idmap[self.symbols[symbol]['id']]
container.appendChild(self.symbols[symbol]['dom'])
self._rewriteIDs(self.boostbook.documentElement)
#~ Rewrite the various IDs from Doxygen references to the newly created
#~ BoostBook references.
def _rewriteIDs( self, node ):
if node.nodeName in ('link'):
if (self.idmap.has_key(node.getAttribute('linkend'))):
#~ A link, and we have someplace to repoint it at.
node.setAttribute('linkend',self.idmap[node.getAttribute('linkend')])
else:
#~ A link, but we don't have a generated target for it.
node.removeAttribute('linkend')
elif hasattr(node,'hasAttribute') and node.hasAttribute('id') and self.idmap.has_key(node.getAttribute('id')):
#~ Simple ID, and we have a translation.
node.setAttribute('id',self.idmap[node.getAttribute('id')])
#~ Recurse, and iterate, depth-first traversal which turns out to be
#~ left-to-right and top-to-bottom for the document.
if node.firstChild:
self._rewriteIDs(node.firstChild)
if node.nextSibling:
self._rewriteIDs(node.nextSibling)
def _resolveContainer( self, cpp, root ):
container = root
for ns in cpp['namespace']:
node = self._getChild('namespace',name=ns,root=container)
if not node:
node = container.appendChild(
self._createNode('namespace',name=ns))
container = node
for inner in cpp['name'].split('::'):
node = self._getChild(name=inner,root=container)
if not node:
break
container = node
return container
def _setID( self, id, name ):
self.idmap[id] = name.replace('::','.').replace('/','.')
#~ print '--| setID:',id,'::',self.idmap[id]
#~ Translate a given node within a given context.
#~ The translation dispatches to a local method of the form
#~ "_translate[_context0,...,_contextN]", and the keyword args are
#~ passed along. If there is no translation handling method we
#~ return None.
def _translateNode( self, *context, **kwargs ):
node = None
names = [ ]
for c in context:
if c:
if not isinstance(c,xml.dom.Node):
suffix = '_'+c.replace('-','_')
else:
suffix = '_'+c.nodeName.replace('-','_')
node = c
names.append('_translate')
names = map(lambda x: x+suffix,names)
if node:
for name in names:
if hasattr(self,name):
return getattr(self,name)(node,**kwargs)
return None
#~ Translates the children of the given parent node, appending the results
#~ to the indicated target. For nodes not translated by the translation method
#~ it copies the child over and recurses on that child to translate any
#~ possible interior nodes. Hence this will translate the entire subtree.
def _translateChildren( self, parent, **kwargs ):
target = kwargs['target']
for n in parent.childNodes:
child = self._translateNode(n,target=target)
if child:
target.appendChild(child)
else:
child = n.cloneNode(False)
if hasattr(child,'data'):
child.data = re.sub(r'\s+',' ',child.data)
target.appendChild(child)
self._translateChildren(n,target=child)
#~ Translate the given node as a description, into the description subnode
#~ of the target. If no description subnode is present in the target it
#~ is created.
def _translateDescription( self, node, target=None, tag='description', **kwargs ):
description = self._getChild(tag,root=target)
if not description:
description = target.appendChild(self._createNode(tag))
self._translateChildren(node,target=description)
return description
#~ Top level translation of: <doxygen ...>...</doxygen>,
#~ translates the children.
def _translate_doxygen( self, node ):
#~ print '_translate_doxygen:', node.nodeName
result = []
for n in node.childNodes:
newNode = self._translateNode(n)
if newNode:
result.append(newNode)
return result
#~ Top level translation of:
#~ <doxygenindex ...>
#~ <compound ...>
#~ <member ...>
#~ <name>...</name>
#~ </member>
#~ ...
#~ </compound>
#~ ...
#~ </doxygenindex>
#~ builds the class and symbol sections, if requested.
def _translate_doxygenindex( self, node ):
#~ print '_translate_doxygenindex:', node.nodeName
if self.args['index']:
entries = []
classes = []
#~ Accumulate all the index entries we care about.
for n in node.childNodes:
if n.nodeName == 'compound':
if n.getAttribute('kind') not in ('file','dir','define'):
cpp = self._cppName(self._getChildData('name',root=n))
entry = {
'name' : cpp['name'],
'compoundname' : cpp['compoundname'],
'id' : n.getAttribute('refid')
}
if n.getAttribute('kind') in ('class','struct'):
classes.append(entry)
entries.append(entry)
for m in n.childNodes:
if m.nodeName == 'member':
cpp = self._cppName(self._getChildData('name',root=m))
entry = {
'name' : cpp['name'],
'compoundname' : cpp['compoundname'],
'id' : n.getAttribute('refid')
}
if hasattr(m,'getAttribute') and m.getAttribute('kind') in ('class','struct'):
classes.append(entry)
entries.append(entry)
#~ Put them in a sensible order.
entries.sort(lambda x,y: cmp(x['name'].lower(),y['name'].lower()))
classes.sort(lambda x,y: cmp(x['name'].lower(),y['name'].lower()))
#~ And generate the BoostBook for them.
self._translate_index_(entries,target=self.section['index'])
self._translate_index_(classes,target=self.section['classes'])
return None
#~ Translate a set of index entries in the BoostBook output. The output
#~ is grouped into groups of the first letter of the entry names.
def _translate_index_(self, entries, target=None, **kwargs ):
i = 0
targetID = target.getAttribute('id')
while i < len(entries):
dividerKey = entries[i]['name'][0].upper()
divider = target.appendChild(self._createNode('indexdiv',id=targetID+'.'+dividerKey))
divider.appendChild(self._createText('title',dividerKey))
while i < len(entries) and dividerKey == entries[i]['name'][0].upper():
iename = entries[i]['name']
ie = divider.appendChild(self._createNode('indexentry'))
ie = ie.appendChild(self._createText('primaryie',iename))
while i < len(entries) and entries[i]['name'] == iename:
ie.appendChild(self.boostbook.createTextNode(' ('))
ie.appendChild(self._createText(
'link',entries[i]['compoundname'],linkend=entries[i]['id']))
ie.appendChild(self.boostbook.createTextNode(')'))
i += 1
#~ Translate a <compounddef ...>...</compounddef>,
#~ by retranslating with the "kind" of compounddef.
def _translate_compounddef( self, node, target=None, **kwargs ):
return self._translateNode(node,node.getAttribute('kind'))
#~ Translate a <compounddef kind="namespace"...>...</compounddef>. For
#~ namespaces we just collect the information for later use as there is no
#~ currently namespaces are not included in the BoostBook format. In the future
#~ it might be good to generate a namespace index.
def _translate_compounddef_namespace( self, node, target=None, **kwargs ):
namespace = {
'id' : node.getAttribute('id'),
'kind' : 'namespace',
'name' : self._getChildData('compoundname',root=node),
'brief' : self._getChildData('briefdescription',root=node),
'detailed' : self._getChildData('detaileddescription',root=node),
'parsed' : False
}
if self.symbols.has_key(namespace['name']):
if not self.symbols[namespace['name']]['parsed']:
self.symbols[namespace['name']]['parsed'] = True
#~ for n in node.childNodes:
#~ if hasattr(n,'getAttribute'):
#~ self._translateNode(n,n.getAttribute('kind'),target=target,**kwargs)
else:
self.symbols[namespace['name']] = namespace
#~ self._setID(namespace['id'],namespace['name'])
return None
#~ Translate a <compounddef kind="class"...>...</compounddef>, which
#~ forwards to the kind=struct as they are the same.
def _translate_compounddef_class( self, node, target=None, **kwargs ):
return self._translate_compounddef_struct(node,tag='class',target=target,**kwargs)
#~ Translate a <compounddef kind="struct"...>...</compounddef> into:
#~ <header id="?" name="?">
#~ <struct name="?">
#~ ...
#~ </struct>
#~ </header>
def _translate_compounddef_struct( self, node, tag='struct', target=None, **kwargs ):
result = None
includes = self._getChild('includes',root=node)
if includes:
## Add the header into the output table.
self._translate_compounddef_includes_(includes,includes,**kwargs)
## Compounds are the declared symbols, classes, types, etc.
## We add them to the symbol table, along with the partial DOM for them
## so that they can be organized into the output later.
compoundname = self._getChildData('compoundname',root=node)
compoundname = self._cppName(compoundname)
self._setID(node.getAttribute('id'),compoundname['compoundname'])
struct = self._createNode(tag,name=compoundname['name'].split('::')[-1])
self.symbols[compoundname['compoundname']] = {
'header' : includes.firstChild.data,
'namespace' : compoundname['namespace'],
'id' : node.getAttribute('id'),
'kind' : tag,
'name' : compoundname['name'],
'dom' : struct
}
## Add the children which will be the members of the struct.
for n in node.childNodes:
self._translateNode(n,target=struct,scope=compoundname['compoundname'])
result = struct
return result
#~ Translate a <compounddef ...><includes ...>...</includes></compounddef>,
def _translate_compounddef_includes_( self, node, target=None, **kwargs ):
name = node.firstChild.data
if not self.symbols.has_key(name):
self._setID(node.getAttribute('refid'),name)
self.symbols[name] = {
'kind' : 'header',
'id' : node.getAttribute('refid'),
'dom' : self._createNode('header',
id=node.getAttribute('refid'),
name=name)
}
return None
#~ Translate a <basecompoundref...>...</basecompoundref> into:
#~ <inherit access="?">
#~ ...
#~ </inherit>
def _translate_basecompoundref( self, ref, target=None, **kwargs ):
inherit = target.appendChild(self._createNode('inherit',
access=ref.getAttribute('prot')))
self._translateChildren(ref,target=inherit)
return
#~ Translate:
#~ <templateparamlist>
#~ <param>
#~ <type>...</type>
#~ <declname>...</declname>
#~ <defname>...</defname>
#~ <defval>...</defval>
#~ </param>
#~ ...
#~ </templateparamlist>
#~ Into:
#~ <template>
#~ <template-type-parameter name="?" />
#~ <template-nontype-parameter name="?">
#~ <type>?</type>
#~ <default>?</default>
#~ </template-nontype-parameter>
#~ </template>
def _translate_templateparamlist( self, templateparamlist, target=None, **kwargs ):
template = target.appendChild(self._createNode('template'))
for param in templateparamlist.childNodes:
if param.nodeName == 'param':
type = self._getChildData('type',root=param)
defval = self._getChild('defval',root=param)
paramKind = None
if type in ('class','typename'):
paramKind = 'template-type-parameter'
else:
paramKind = 'template-nontype-parameter'
templateParam = template.appendChild(
self._createNode(paramKind,
name=self._getChildData('declname',root=param)))
if paramKind == 'template-nontype-parameter':
template_type = templateParam.appendChild(self._createNode('type'))
self._translate_type(
self._getChild('type',root=param),target=template_type)
if defval:
value = self._getChildData('ref',root=defval.firstChild)
if not value:
value = self._getData(defval)
templateParam.appendChild(self._createText('default',value))
return template
#~ Translate:
#~ <briefdescription>...</briefdescription>
#~ Into:
#~ <purpose>...</purpose>
def _translate_briefdescription( self, brief, target=None, **kwargs ):
self._translateDescription(brief,target=target,**kwargs)
return self._translateDescription(brief,target=target,tag='purpose',**kwargs)
#~ Translate:
#~ <detaileddescription>...</detaileddescription>
#~ Into:
#~ <description>...</description>
def _translate_detaileddescription( self, detailed, target=None, **kwargs ):
return self._translateDescription(detailed,target=target,**kwargs)
#~ Translate:
#~ <sectiondef kind="?">...</sectiondef>
#~ With kind specific translation.
def _translate_sectiondef( self, sectiondef, target=None, **kwargs ):
self._translateNode(sectiondef,sectiondef.getAttribute('kind'),target=target,**kwargs)
#~ Translate non-function sections.
def _translate_sectiondef_x_( self, sectiondef, target=None, **kwargs ):
for n in sectiondef.childNodes:
if hasattr(n,'getAttribute'):
self._translateNode(n,n.getAttribute('kind'),target=target,**kwargs)
return None
#~ Translate:
#~ <sectiondef kind="public-type">...</sectiondef>
def _translate_sectiondef_public_type( self, sectiondef, target=None, **kwargs ):
return self._translate_sectiondef_x_(sectiondef,target=target,**kwargs)
#~ Translate:
#~ <sectiondef kind="public-sttrib">...</sectiondef>
def _translate_sectiondef_public_attrib( self, sectiondef, target=None, **kwargs):
return self._translate_sectiondef_x_(sectiondef,target=target,**kwargs)
#~ Translate:
#~ <sectiondef kind="?-func">...</sectiondef>
#~ All the various function group translations end up here for which
#~ they are translated into:
#~ <method-group name="?">
#~ ...
#~ </method-group>
def _translate_sectiondef_func_( self, sectiondef, name='functions', target=None, **kwargs ):
members = target.appendChild(self._createNode('method-group',name=name))
for n in sectiondef.childNodes:
if hasattr(n,'getAttribute'):
self._translateNode(n,n.getAttribute('kind'),target=members,**kwargs)
return members
#~ Translate:
#~ <sectiondef kind="public-func">...</sectiondef>
def _translate_sectiondef_public_func( self, sectiondef, target=None, **kwargs ):
return self._translate_sectiondef_func_(sectiondef,
name='public member functions',target=target,**kwargs)
#~ Translate:
#~ <sectiondef kind="public-static-func">...</sectiondef>
def _translate_sectiondef_public_static_func( self, sectiondef, target=None, **kwargs):
return self._translate_sectiondef_func_(sectiondef,
name='public static functions',target=target,**kwargs)
#~ Translate:
#~ <sectiondef kind="protected-func">...</sectiondef>
def _translate_sectiondef_protected_func( self, sectiondef, target=None, **kwargs ):
return self._translate_sectiondef_func_(sectiondef,
name='protected member functions',target=target,**kwargs)
#~ Translate:
#~ <sectiondef kind="private-static-func">...</sectiondef>
def _translate_sectiondef_private_static_func( self, sectiondef, target=None, **kwargs):
return self._translate_sectiondef_func_(sectiondef,
name='private static functions',target=target,**kwargs)
#~ Translate:
#~ <sectiondef kind="public-func">...</sectiondef>
def _translate_sectiondef_private_func( self, sectiondef, target=None, **kwargs ):
return self._translate_sectiondef_func_(sectiondef,
name='private member functions',target=target,**kwargs)
#~ Translate:
#~ <sectiondef kind="user-defined"><header>...</header>...</sectiondef>
def _translate_sectiondef_user_defined( self, sectiondef, target=None, **kwargs ):
return self._translate_sectiondef_func_(sectiondef,
name=self._getChildData('header', root=sectiondef),target=target,**kwargs)
#~ Translate:
#~ <memberdef kind="typedef" id="?">
#~ <name>...</name>
#~ </memberdef>
#~ To:
#~ <typedef id="?" name="?">
#~ <type>...</type>
#~ </typedef>
def _translate_memberdef_typedef( self, memberdef, target=None, scope=None, **kwargs ):
self._setID(memberdef.getAttribute('id'),
scope+'::'+self._getChildData('name',root=memberdef))
typedef = target.appendChild(self._createNode('typedef',
id=memberdef.getAttribute('id'),
name=self._getChildData('name',root=memberdef)))
typedef_type = typedef.appendChild(self._createNode('type'))
self._translate_type(self._getChild('type',root=memberdef),target=typedef_type)
return typedef
#~ Translate:
#~ <memberdef kind="function" id="?" const="?" static="?" explicit="?" inline="?">
#~ <name>...</name>
#~ </memberdef>
#~ To:
#~ <method name="?" cv="?" specifiers="?">
#~ ...
#~ </method>
def _translate_memberdef_function( self, memberdef, target=None, scope=None, **kwargs ):
name = self._getChildData('name',root=memberdef)
self._setID(memberdef.getAttribute('id'),scope+'::'+name)
## Check if we have some specific kind of method.
if name == scope.split('::')[-1]:
kind = 'constructor'
target = target.parentNode
elif name == '~'+scope.split('::')[-1]:
kind = 'destructor'
target = target.parentNode
elif name == 'operator=':
kind = 'copy-assignment'
target = target.parentNode
else:
kind = 'method'
method = target.appendChild(self._createNode(kind,
# id=memberdef.getAttribute('id'),
name=name,
cv=' '.join([
if_attribute(memberdef,'const','const','').strip()
]),
specifiers=' '.join([
if_attribute(memberdef,'static','static',''),
if_attribute(memberdef,'explicit','explicit',''),
if_attribute(memberdef,'inline','inline','')
]).strip()
))
## We iterate the children to translate each part of the function.
for n in memberdef.childNodes:
self._translateNode(memberdef,'function',n,target=method)
return method
#~ Translate:
#~ <memberdef kind="function"...><templateparamlist>...</templateparamlist></memberdef>
def _translate_memberdef_function_templateparamlist(
self, templateparamlist, target=None, **kwargs ):
return self._translate_templateparamlist(templateparamlist,target=target,**kwargs)
#~ Translate:
#~ <memberdef kind="function"...><type>...</type></memberdef>
#~ To:
#~ ...<type>?</type>
def _translate_memberdef_function_type( self, resultType, target=None, **kwargs ):
methodType = self._createNode('type')
self._translate_type(resultType,target=methodType)
if methodType.hasChildNodes():
target.appendChild(methodType)
return methodType
#~ Translate:
#~ <memberdef kind="function"...><briefdescription>...</briefdescription></memberdef>
def _translate_memberdef_function_briefdescription( self, description, target=None, **kwargs ):
result = self._translateDescription(description,target=target,**kwargs)
## For functions if we translate the brief docs to the purpose they end up
## right above the regular description. And since we just added the brief to that
## on the previous line, don't bother with the repetition.
# result = self._translateDescription(description,target=target,tag='purpose',**kwargs)
return result
#~ Translate:
#~ <memberdef kind="function"...><detaileddescription>...</detaileddescription></memberdef>
def _translate_memberdef_function_detaileddescription( self, description, target=None, **kwargs ):
return self._translateDescription(description,target=target,**kwargs)
#~ Translate:
#~ <memberdef kind="function"...><inbodydescription>...</inbodydescription></memberdef>
def _translate_memberdef_function_inbodydescription( self, description, target=None, **kwargs ):
return self._translateDescription(description,target=target,**kwargs)
#~ Translate:
#~ <memberdef kind="function"...><param>...</param></memberdef>
def _translate_memberdef_function_param( self, param, target=None, **kwargs ):
return self._translate_param(param,target=target,**kwargs)
#~ Translate:
#~ <memberdef kind="variable" id="?">
#~ <name>...</name>
#~ <type>...</type>
#~ </memberdef>
#~ To:
#~ <data-member id="?" name="?">
#~ <type>...</type>
#~ </data-member>
def _translate_memberdef_variable( self, memberdef, target=None, scope=None, **kwargs ):
self._setID(memberdef.getAttribute('id'),
scope+'::'+self._getChildData('name',root=memberdef))
data_member = target.appendChild(self._createNode('data-member',
id=memberdef.getAttribute('id'),
name=self._getChildData('name',root=memberdef)))
data_member_type = data_member.appendChild(self._createNode('type'))
self._translate_type(self._getChild('type',root=memberdef),target=data_member_type)
#~ Translate:
#~ <memberdef kind="enum" id="?">
#~ <name>...</name>
#~ ...
#~ </memberdef>
#~ To:
#~ <enum id="?" name="?">
#~ ...
#~ </enum>
def _translate_memberdef_enum( self, memberdef, target=None, scope=None, **kwargs ):
self._setID(memberdef.getAttribute('id'),
scope+'::'+self._getChildData('name',root=memberdef))
enum = target.appendChild(self._createNode('enum',
id=memberdef.getAttribute('id'),
name=self._getChildData('name',root=memberdef)))
for n in memberdef.childNodes:
self._translateNode(memberdef,'enum',n,target=enum,scope=scope,**kwargs)
return enum
#~ Translate:
#~ <memberdef kind="enum"...>
#~ <enumvalue id="?">
#~ <name>...</name>
#~ <initializer>...</initializer>
#~ </enumvalue>
#~ </memberdef>
#~ To:
#~ <enumvalue id="?" name="?">
#~ <default>...</default>
#~ </enumvalue>
def _translate_memberdef_enum_enumvalue( self, enumvalue, target=None, scope=None, **kwargs ):
self._setID(enumvalue.getAttribute('id'),
scope+'::'+self._getChildData('name',root=enumvalue))
value = target.appendChild(self._createNode('enumvalue',
id=enumvalue.getAttribute('id'),
name=self._getChildData('name',root=enumvalue)))
initializer = self._getChild('initializer',root=enumvalue)
if initializer:
self._translateChildren(initializer,
target=target.appendChild(self._createNode('default')))
return value
#~ Translate:
#~ <param>
#~ <type>...</type>
#~ <declname>...</declname>
#~ <defval>...</defval>
#~ </param>
#~ To:
#~ <parameter name="?">
#~ <paramtype>...</paramtype>
#~ ...
#~ </parameter>
def _translate_param( self, param, target=None, **kwargs):
parameter = target.appendChild(self._createNode('parameter',
name=self._getChildData('declname',root=param)))
paramtype = parameter.appendChild(self._createNode('paramtype'))
self._translate_type(self._getChild('type',root=param),target=paramtype)
defval = self._getChild('defval',root=param)
if defval:
self._translateChildren(self._getChild('defval',root=param),target=parameter)
return parameter
#~ Translate:
#~ <ref kindref="?" ...>...</ref>
def _translate_ref( self, ref, **kwargs ):
return self._translateNode(ref,ref.getAttribute('kindref'))
#~ Translate:
#~ <ref refid="?" kindref="compound">...</ref>
#~ To:
#~ <link linkend="?"><classname>...</classname></link>
def _translate_ref_compound( self, ref, **kwargs ):
result = self._createNode('link',linkend=ref.getAttribute('refid'))
classname = result.appendChild(self._createNode('classname'))
self._translateChildren(ref,target=classname)
return result
#~ Translate:
#~ <ref refid="?" kindref="member">...</ref>
#~ To:
#~ <link linkend="?">...</link>
def _translate_ref_member( self, ref, **kwargs ):
result = self._createNode('link',linkend=ref.getAttribute('refid'))
self._translateChildren(ref,target=result)
return result
#~ Translate:
#~ <type>...</type>
def _translate_type( self, type, target=None, **kwargs ):
result = self._translateChildren(type,target=target,**kwargs)
#~ Filter types to clean up various readability problems, most notably
#~ with really long types.
xml = target.toxml('utf-8');
if (
xml.startswith('<type>boost::mpl::') or
xml.startswith('<type>BOOST_PP_') or
re.match('<type>boost::(lazy_)?(enable|disable)_if',xml)
):
while target.firstChild:
target.removeChild(target.firstChild)
target.appendChild(self._createText('emphasis','unspecified'))
return result
def _getChild( self, tag = None, id = None, name = None, root = None ):
if not root:
root = self.boostbook.documentElement
for n in root.childNodes:
found = True
if tag and found:
found = found and tag == n.nodeName
if id and found:
if n.hasAttribute('id'):
found = found and n.getAttribute('id') == id
else:
found = found and n.hasAttribute('id') and n.getAttribute('id') == id
if name and found:
found = found and n.hasAttribute('name') and n.getAttribute('name') == name
if found:
#~ print '--|', n
return n
return None
def _getChildData( self, tag, **kwargs ):
return self._getData(self._getChild(tag,**kwargs),**kwargs)
def _getData( self, node, **kwargs ):
if node:
text = self._getChild('#text',root=node)
if text:
return text.data.strip()
return ''
def _cppName( self, type ):
parts = re.search('^([^<]+)[<]?(.*)[>]?$',type.strip().strip(':'))
result = {
'compoundname' : parts.group(1),
'namespace' : parts.group(1).split('::')[0:-1],
'name' : parts.group(1).split('::')[-1],
'specialization' : parts.group(2)
}
if result['namespace'] and len(result['namespace']) > 0:
namespace = '::'.join(result['namespace'])
while (
len(result['namespace']) > 0 and (
not self.symbols.has_key(namespace) or
self.symbols[namespace]['kind'] != 'namespace')
):
result['name'] = result['namespace'].pop()+'::'+result['name']
namespace = '::'.join(result['namespace'])
return result
def _createNode( self, tag, **kwargs ):
result = self.boostbook.createElement(tag)
for k in kwargs.keys():
if kwargs[k] != '':
if k == 'id':
result.setAttribute('id',kwargs[k])
else:
result.setAttribute(k,kwargs[k])
return result
def _createText( self, tag, data, **kwargs ):
result = self._createNode(tag,**kwargs)
data = data.strip()
if len(data) > 0:
result.appendChild(self.boostbook.createTextNode(data))
return result
def main( xmldir=None, output=None, id=None, title=None, index=False ):
#~ print '--- main: xmldir = %s, output = %s' % (xmldir,output)
input = glob.glob( os.path.abspath( os.path.join( xmldir, "*.xml" ) ) )
input.sort
translator = Doxygen2BoostBook(id=id, title=title, index=index)
#~ Feed in the namespaces first to build up the set of namespaces
#~ and definitions so that lookup is unambiguous when reading in the definitions.
namespace_files = filter(
lambda x:
os.path.basename(x).startswith('namespace'),
input)
decl_files = filter(
lambda x:
not os.path.basename(x).startswith('namespace') and not os.path.basename(x).startswith('_'),
input)
for dox in namespace_files:
#~ print '--|',os.path.basename(dox)
translator.addDox(xml.dom.minidom.parse(dox))
for dox in decl_files:
#~ print '--|',os.path.basename(dox)
translator.addDox(xml.dom.minidom.parse(dox))
if output:
output = open(output,'w')
else:
output = sys.stdout
if output:
output.write(translator.tostring())
main( **get_args() )
| mit |
kifcaliph/odoo | addons/website/models/ir_http.py | 41 | 13868 | # -*- coding: utf-8 -*-
import datetime
import hashlib
import logging
import os
import re
import traceback
import werkzeug
import werkzeug.routing
import werkzeug.utils
import openerp
from openerp.addons.base import ir
from openerp.addons.base.ir import ir_qweb
from openerp.addons.website.models.website import slug, url_for, _UNSLUG_RE
from openerp.http import request
from openerp.tools import config
from openerp.osv import orm
logger = logging.getLogger(__name__)
class RequestUID(object):
def __init__(self, **kw):
self.__dict__.update(kw)
class ir_http(orm.AbstractModel):
_inherit = 'ir.http'
rerouting_limit = 10
geo_ip_resolver = None
def _get_converters(self):
return dict(
super(ir_http, self)._get_converters(),
model=ModelConverter,
page=PageConverter,
)
def _auth_method_public(self):
# TODO: select user_id from matching website
if not request.session.uid:
request.uid = self.pool['ir.model.data'].xmlid_to_res_id(request.cr, openerp.SUPERUSER_ID, 'base.public_user')
else:
request.uid = request.session.uid
def _dispatch(self):
first_pass = not hasattr(request, 'website')
request.website = None
func = None
try:
func, arguments = self._find_handler()
request.website_enabled = func.routing.get('website', False)
except werkzeug.exceptions.NotFound:
# either we have a language prefixed route, either a real 404
# in all cases, website processes them
request.website_enabled = True
request.website_multilang = request.website_enabled and func and func.routing.get('multilang', True)
if 'geoip' not in request.session:
record = {}
if self.geo_ip_resolver is None:
try:
import GeoIP
# updated database can be downloaded on MaxMind website
# http://dev.maxmind.com/geoip/legacy/install/city/
geofile = config.get('geoip_database')
if os.path.exists(geofile):
self.geo_ip_resolver = GeoIP.open(geofile, GeoIP.GEOIP_STANDARD)
else:
self.geo_ip_resolver = False
logger.warning('GeoIP database file %r does not exists', geofile)
except ImportError:
self.geo_ip_resolver = False
if self.geo_ip_resolver and request.httprequest.remote_addr:
record = self.geo_ip_resolver.record_by_addr(request.httprequest.remote_addr) or {}
request.session['geoip'] = record
if request.website_enabled:
try:
if func:
self._authenticate(func.routing['auth'])
else:
self._auth_method_public()
except Exception as e:
return self._handle_exception(e)
request.redirect = lambda url, code=302: werkzeug.utils.redirect(url_for(url), code)
request.website = request.registry['website'].get_current_website(request.cr, request.uid, context=request.context)
langs = [lg[0] for lg in request.website.get_languages()]
path = request.httprequest.path.split('/')
if first_pass:
if request.website_multilang:
# If the url doesn't contains the lang and that it's the first connection, we to retreive the user preference if it exists.
if not path[1] in langs and not request.httprequest.cookies.get('session_id'):
if request.lang not in langs:
# Try to find a similar lang. Eg: fr_BE and fr_FR
short = request.lang.split('_')[0]
langs_withshort = [lg[0] for lg in request.website.get_languages() if lg[0].startswith(short)]
if len(langs_withshort):
request.lang = langs_withshort[0]
else:
request.lang = request.website.default_lang_code
# We redirect with the right language in url
if request.lang != request.website.default_lang_code:
path.insert(1, request.lang)
path = '/'.join(path) or '/'
return request.redirect(path + '?' + request.httprequest.query_string)
else:
request.lang = request.website.default_lang_code
request.context['lang'] = request.lang
if not func:
if path[1] in langs:
request.lang = request.context['lang'] = path.pop(1)
path = '/'.join(path) or '/'
if request.lang == request.website.default_lang_code:
# If language is in the url and it is the default language, redirect
# to url without language so google doesn't see duplicate content
return request.redirect(path + '?' + request.httprequest.query_string, code=301)
return self.reroute(path)
# bind modified context
request.website = request.website.with_context(request.context)
return super(ir_http, self)._dispatch()
def reroute(self, path):
if not hasattr(request, 'rerouting'):
request.rerouting = [request.httprequest.path]
if path in request.rerouting:
raise Exception("Rerouting loop is forbidden")
request.rerouting.append(path)
if len(request.rerouting) > self.rerouting_limit:
raise Exception("Rerouting limit exceeded")
request.httprequest.environ['PATH_INFO'] = path
# void werkzeug cached_property. TODO: find a proper way to do this
for key in ('path', 'full_path', 'url', 'base_url'):
request.httprequest.__dict__.pop(key, None)
return self._dispatch()
def _postprocess_args(self, arguments, rule):
super(ir_http, self)._postprocess_args(arguments, rule)
for key, val in arguments.items():
# Replace uid placeholder by the current request.uid
if isinstance(val, orm.BaseModel) and isinstance(val._uid, RequestUID):
arguments[key] = val.sudo(request.uid)
try:
_, path = rule.build(arguments)
assert path is not None
except Exception, e:
return self._handle_exception(e, code=404)
if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'):
generated_path = werkzeug.url_unquote_plus(path)
current_path = werkzeug.url_unquote_plus(request.httprequest.path)
if generated_path != current_path:
if request.lang != request.website.default_lang_code:
path = '/' + request.lang + path
if request.httprequest.query_string:
path += '?' + request.httprequest.query_string
return werkzeug.utils.redirect(path, code=301)
def _serve_attachment(self):
domain = [('type', '=', 'binary'), ('url', '=', request.httprequest.path)]
attach = self.pool['ir.attachment'].search_read(request.cr, openerp.SUPERUSER_ID, domain, ['__last_update', 'datas', 'mimetype'], context=request.context)
if attach:
wdate = attach[0]['__last_update']
datas = attach[0]['datas']
response = werkzeug.wrappers.Response()
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
try:
response.last_modified = datetime.datetime.strptime(wdate, server_format + '.%f')
except ValueError:
# just in case we have a timestamp without microseconds
response.last_modified = datetime.datetime.strptime(wdate, server_format)
response.set_etag(hashlib.sha1(datas).hexdigest())
response.make_conditional(request.httprequest)
if response.status_code == 304:
return response
response.mimetype = attach[0]['mimetype'] or 'application/octet-stream'
response.data = datas.decode('base64')
return response
def _handle_exception(self, exception, code=500):
# This is done first as the attachment path may
# not match any HTTP controller, so the request
# may not be website-enabled.
attach = self._serve_attachment()
if attach:
return attach
is_website_request = bool(getattr(request, 'website_enabled', False) and request.website)
if not is_website_request:
# Don't touch non website requests exception handling
return super(ir_http, self)._handle_exception(exception)
else:
try:
response = super(ir_http, self)._handle_exception(exception)
if isinstance(response, Exception):
exception = response
else:
# if parent excplicitely returns a plain response, then we don't touch it
return response
except Exception, e:
exception = e
values = dict(
exception=exception,
traceback=traceback.format_exc(exception),
)
code = getattr(exception, 'code', code)
if isinstance(exception, openerp.exceptions.AccessError):
code = 403
if isinstance(exception, ir_qweb.QWebException):
values.update(qweb_exception=exception)
if isinstance(exception.qweb.get('cause'), openerp.exceptions.AccessError):
code = 403
if isinstance(exception, werkzeug.exceptions.HTTPException) and code is None:
# Hand-crafted HTTPException likely coming from abort(),
# usually for a redirect response -> return it directly
return exception
if code == 500:
logger.error("500 Internal Server Error:\n\n%s", values['traceback'])
if 'qweb_exception' in values:
view = request.registry.get("ir.ui.view")
views = view._views_get(request.cr, request.uid, exception.qweb['template'], request.context)
to_reset = [v for v in views if v.model_data_id.noupdate is True and not v.page]
values['views'] = to_reset
elif code == 403:
logger.warn("403 Forbidden:\n\n%s", values['traceback'])
values.update(
status_message=werkzeug.http.HTTP_STATUS_CODES[code],
status_code=code,
)
if not request.uid:
self._auth_method_public()
try:
html = request.website._render('website.%s' % code, values)
except Exception:
html = request.website._render('website.http_error', values)
return werkzeug.wrappers.Response(html, status=code, content_type='text/html;charset=utf-8')
class ModelConverter(ir.ir_http.ModelConverter):
def __init__(self, url_map, model=False, domain='[]'):
super(ModelConverter, self).__init__(url_map, model)
self.domain = domain
self.regex = _UNSLUG_RE.pattern
def to_url(self, value):
return slug(value)
def to_python(self, value):
m = re.match(self.regex, value)
_uid = RequestUID(value=value, match=m, converter=self)
record_id = int(m.group(2))
if record_id < 0:
# limited support for negative IDs due to our slug pattern, assume abs() if not found
if not request.registry[self.model].exists(request.cr, _uid, [record_id]):
record_id = abs(record_id)
return request.registry[self.model].browse(
request.cr, _uid, record_id, context=request.context)
def generate(self, cr, uid, query=None, args=None, context=None):
obj = request.registry[self.model]
domain = eval( self.domain, (args or {}).copy())
if query:
domain.append((obj._rec_name, 'ilike', '%'+query+'%'))
for record in obj.search_read(cr, uid, domain=domain, fields=['write_date',obj._rec_name], context=context):
if record.get(obj._rec_name, False):
yield {'loc': (record['id'], record[obj._rec_name])}
class PageConverter(werkzeug.routing.PathConverter):
""" Only point of this converter is to bundle pages enumeration logic """
def generate(self, cr, uid, query=None, args={}, context=None):
View = request.registry['ir.ui.view']
views = View.search_read(cr, uid, [['page', '=', True]],
fields=['xml_id','priority','write_date'], order='name', context=context)
for view in views:
xid = view['xml_id'].startswith('website.') and view['xml_id'][8:] or view['xml_id']
# the 'page/homepage' url is indexed as '/', avoid aving the same page referenced twice
# when we will have an url mapping mechanism, replace this by a rule: page/homepage --> /
if xid=='homepage': continue
if query and query.lower() not in xid.lower():
continue
record = {'loc': xid}
if view['priority'] <> 16:
record['__priority'] = min(round(view['priority'] / 32.0,1), 1)
if view['write_date']:
record['__lastmod'] = view['write_date'][:10]
yield record
| agpl-3.0 |
VirtueSecurity/aws-extender | BappModules/boto/glacier/exceptions.py | 185 | 2195 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
class UnexpectedHTTPResponseError(Exception):
def __init__(self, expected_responses, response):
self.status = response.status
self.body = response.read()
self.code = None
try:
body = json.loads(self.body)
self.code = body["code"]
msg = 'Expected %s, got ' % expected_responses
msg += '(%d, code=%s, message=%s)' % (response.status,
self.code,
body["message"])
except Exception:
msg = 'Expected %s, got (%d, %s)' % (expected_responses,
response.status,
self.body)
super(UnexpectedHTTPResponseError, self).__init__(msg)
class ArchiveError(Exception):
pass
class UploadArchiveError(ArchiveError):
pass
class DownloadArchiveError(ArchiveError):
pass
class TreeHashDoesNotMatchError(ArchiveError):
pass
| mit |
wfxiang08/django197 | django/contrib/gis/gdal/raster/const.py | 253 | 1539 | """
GDAL - Constant definitions
"""
from ctypes import (
c_double, c_float, c_int16, c_int32, c_ubyte, c_uint16, c_uint32,
)
# See http://www.gdal.org/gdal_8h.html#a22e22ce0a55036a96f652765793fb7a4
GDAL_PIXEL_TYPES = {
0: 'GDT_Unknown', # Unknown or unspecified type
1: 'GDT_Byte', # Eight bit unsigned integer
2: 'GDT_UInt16', # Sixteen bit unsigned integer
3: 'GDT_Int16', # Sixteen bit signed integer
4: 'GDT_UInt32', # Thirty-two bit unsigned integer
5: 'GDT_Int32', # Thirty-two bit signed integer
6: 'GDT_Float32', # Thirty-two bit floating point
7: 'GDT_Float64', # Sixty-four bit floating point
8: 'GDT_CInt16', # Complex Int16
9: 'GDT_CInt32', # Complex Int32
10: 'GDT_CFloat32', # Complex Float32
11: 'GDT_CFloat64', # Complex Float64
}
# A list of gdal datatypes that are integers.
GDAL_INTEGER_TYPES = [1, 2, 3, 4, 5]
# Lookup values to convert GDAL pixel type indices into ctypes objects.
# The GDAL band-io works with ctypes arrays to hold data to be written
# or to hold the space for data to be read into. The lookup below helps
# selecting the right ctypes object for a given gdal pixel type.
GDAL_TO_CTYPES = [
None, c_ubyte, c_uint16, c_int16, c_uint32, c_int32,
c_float, c_double, None, None, None, None
]
# List of resampling algorithms that can be used to warp a GDALRaster.
GDAL_RESAMPLE_ALGORITHMS = {
'NearestNeighbour': 0,
'Bilinear': 1,
'Cubic': 2,
'CubicSpline': 3,
'Lanczos': 4,
'Average': 5,
'Mode': 6,
}
| bsd-3-clause |
marcialhernandez/Proyecto-titulacion | Pytaxo/Modulos/Definiciones/MC_definicionPareada.py | 1 | 23065 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#import sys
import itertools, hashlib, threading,copy, logging
from archivos import nombres, xmlSalida
from clases import plantilla,alternativa
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
logging.basicConfig( level=logging.DEBUG, format='[%(levelname)s] (%(threadName)-10s) %(message)s',)
class Total(object):
def __init__(self, valorInicial=0):
self.lock = threading.Lock()
self.valor = valorInicial
def incrementar(self,incremento):
logging.debug('Waiting for lock')
self.lock.acquire()
try:
logging.debug('Acquired lock')
self.valor = self.valor + incremento
finally:
self.lock.release()
#Convierte una lista de lista de alternativas en una lista de alternativas
def comprimeAlternativas(MatrizAlternativas):
listaAlternativasComprimidas=list()
for cadaListaAlternativas in MatrizAlternativas:
listaAlternativasComprimidas.append(comprimeAlternativasSingle(cadaListaAlternativas))
return listaAlternativasComprimidas
#Convierte lista de alternativas en una alternativa
##(Debido a que cada alternativa es realidad un grupo de terminos que tambien son alternativas)
def comprimeAlternativasSingle(listaAlternativas):
llave=''
tipo=''
puntaje=0
glosa=''
comentarios=''
for cadaAlternativa in listaAlternativas:
llave+=cadaAlternativa.llave
if cadaAlternativa.tipo not in tipo:
tipo+=cadaAlternativa.tipo+' '
puntaje+=int(cadaAlternativa.puntaje)
glosa+='"'+cadaAlternativa.glosa+'" ' #se debe agregar con rstrip()
if hasattr(cadaAlternativa,'comentario')==True:
comentarios+='"'+cadaAlternativa.comentario+'" '
else:
if cadaAlternativa.tipo=='solucion':
comentarios+='"Termino correcto"'+' '
else:
comentarios+='"Sin comentario"'+' '
if 'solucion' in tipo and 'distractor' in tipo:
tipo='distractor'
#puntaje=float(puntaje)/cantidadAlternativas
#print puntaje
return alternativa.alternativa(llave,tipo.rstrip(),float(puntaje)/len(listaAlternativas),glosa.rstrip(),comentario=comentarios.rstrip())
#No preserva el orden y no admite datos tipo list() como por ejemplo matrices
def quitaDuplicados(seq):
return {}.fromkeys(seq).keys()
def retornaSignificadoCadena(cadenaSimbolos,xmlEntradaObject,distractores,solucion,cantidadAlternativas,**kwuargs):
listaCombinatoria=list()
listaConjuntoAlternativas=list()
listaConjuntoDistractores=None
#Caso en que tenga varios comandos
if '+' in cadenaSimbolos:
for simbolos in quitaDuplicados(cadenaSimbolos.split('+')):
for conjunto in retornaSignificadoSimbolo(simbolos, xmlEntradaObject, distractores, solucion):
if conjunto not in listaCombinatoria:
listaCombinatoria.append(conjunto)
listaConjuntoDistractores=list(itertools.combinations(listaCombinatoria, cantidadAlternativas))
else:
listaConjuntoDistractores=list(itertools.combinations(retornaSignificadoSimbolo(cadenaSimbolos, xmlEntradaObject, distractores, solucion), cantidadAlternativas))
if len(listaConjuntoDistractores)>0:
for conjunto in listaConjuntoDistractores:
conjunto=list(conjunto)
conjunto.append(comprimeAlternativasSingle(solucion))
if 'orderBy' in kwuargs.keys():
if kwuargs['orderBy'].lower()=='largocreciente':
conjunto.sort(key=lambda x:len(x.glosa))
elif kwuargs['orderBy'].lower()=='largodecreciente':
conjunto.sort(key=lambda x:len(x.glosa),reverse=True)
elif kwuargs['orderBy'].lower()=='alfabeticocreciente':
conjunto.sort(key=lambda x:x.glosa.lower)
elif kwuargs['orderBy'].lower()=='alfabeticodecreciente':
conjunto.sort(key=lambda x:x.glosa.lower,reverse=True)
else:
#No se ordena quedando la alternativa solucion siempre al final
pass
#Luego se agrega el conjunto al conjunto de alternativas Validas
listaConjuntoAlternativas.append(conjunto)
#Si no se presenta el comando, no se ordena quedando la alternativa solucion siempre al final
else:
listaConjuntoAlternativas.append(conjunto)
return listaConjuntoAlternativas
def retornaSignificadoSimbolo(simbolo,xmlEntradaObject,distractores,solucion):
simbolo=simbolo.lstrip().rstrip().lower()
if simbolo.isdigit()==True:
if int(simbolo)<=0:
return list()
else:
return pozoDistractoresNesimo(xmlEntradaObject,distractores,int(simbolo),solucion=solucion)
else:
return list()
def posiblesSolucionesYDistractoresConjunto(xmlEntradaObject,conjuntoTerminos):
salida=dict()
listaDeListaDeOpciones=list()
distractores=list()
for cadaDefinicion in conjuntoTerminos:
posiblesTerminos=list()
for cadaTermino in xmlEntradaObject.alternativas['terminos'][cadaDefinicion]:
posiblesTerminos.append(cadaTermino)
listaDeListaDeOpciones.append(posiblesTerminos)
if cadaDefinicion in xmlEntradaObject.alternativas['distractores'].keys():
#Con esto se evita errores de intentar acceder a una llave que no existe
for cadaDistractor in xmlEntradaObject.alternativas['distractores'][cadaDefinicion]:
distractores.append(cadaDistractor)
#Se obtiene una lista de posibles soluciones de la variante actual
salida['soluciones']=list(itertools.product(*listaDeListaDeOpciones))
salida['distractores']=distractores
return salida
#def pozoDistractoresNesimo(xmlEntradaObject,solucion,distractores,cantidadDistractores,**kwargs):#cantidadDistractores,pozoNesimo)
def pozoDistractoresNesimo(xmlEntradaObject,distractores,cantidadReemplazoDistractores,**kwargs):#cantidadDistractores,pozoNesimo)
#Implica que es el primer ciclo
if 'solucion' in kwargs.keys():
pozoDistractoresN=list()
#Si quiere 0 reemplazos por los distractores, , es la solucion misma
if cantidadReemplazoDistractores<=0 or len(distractores)==0:
pozoDistractoresN.append(kwargs['solucion'])
return pozoDistractoresN
#Se valida que la cantidad de distractores pedidos puedan ser soportados por los distractores que se piden
#En caso que no hayan distractores, no se realizara este proceso
if len(xmlEntradaObject.alternativas['distractores'].keys())<=cantidadReemplazoDistractores:
cantidadReemplazoDistractores=len(xmlEntradaObject.alternativas['distractores'].keys())
for cadaDistractor in distractores:
contador=0
for cadaTermino in kwargs['solucion']:
if cadaTermino.llave==cadaDistractor.llave:
conjuntoDistractor=list(kwargs['solucion'])
conjuntoDistractor[contador]=cadaDistractor
if conjuntoDistractor not in pozoDistractoresN:
pozoDistractoresN.append(conjuntoDistractor)
break
contador+=1
cantidadReemplazoDistractores=cantidadReemplazoDistractores-1
#Se termina recursion
if cantidadReemplazoDistractores==0:
return comprimeAlternativas(pozoDistractoresN)
else:
return pozoDistractoresNesimo(xmlEntradaObject,distractores,cantidadReemplazoDistractores,nuevoPozo=pozoDistractoresN)
#Implica que es el nesimo ciclo
if 'nuevoPozo' in kwargs.keys():
pozoDistractoresD=list()
for cadaConjunto in kwargs['nuevoPozo']:
for cadaDistractor in distractores:
contador=0
for cadaTermino in cadaConjunto:
if cadaTermino.llave==cadaDistractor.llave and cadaTermino.tipo=='solucion':
conjuntoDistractor=cadaConjunto[:]
conjuntoDistractor[contador]=cadaDistractor
if conjuntoDistractor not in pozoDistractoresD:
pozoDistractoresD.append(conjuntoDistractor)
break
contador+=1
cantidadReemplazoDistractores=cantidadReemplazoDistractores-1
if cantidadReemplazoDistractores==0:
return comprimeAlternativas(pozoDistractoresD)
else:
return pozoDistractoresNesimo(xmlEntradaObject,distractores,cantidadReemplazoDistractores,nuevoPozo=pozoDistractoresD)
#No es solucion pues uno de sus terminos es en realidad un distractor
#reemplazar 1 elemento
#de la solucion por 1 de la lista de distractores
#el reemplazo tiene que se por ID, significa que se reemplaza
#un termino por su propio distractor
def pozoDistractoresSingle(xmlEntradaObject,solucion,distractores):
pozoDistractoresS=list()
if len(distractores)>=1:
#En caso que no hayan distractores, no se realizara este proceso
for cadaDistractor in distractores:
contador=0
for cadaTermino in solucion:
if cadaTermino.llave==cadaDistractor.llave:
conjuntoDistractor=list(solucion)
conjuntoDistractor[contador]=cadaDistractor
if conjuntoDistractor not in pozoDistractoresS:
pozoDistractoresS.append(conjuntoDistractor)
break
contador+=1
return pozoDistractoresS
def pozoDistractoresDouble(xmlEntradaObject,solucion,distractores):
pozoDistractoresD=list()
#En caso que no hayan mas de un tipo de distractor, no se realizara este proceso
if len(xmlEntradaObject.alternativas['distractores'].keys())>1:
pozoDistractoresS=pozoDistractoresSingle(xmlEntradaObject, solucion, distractores)
for cadaConjunto in pozoDistractoresS:
for cadaDistractor in distractores:
contador=0
for cadaTermino in cadaConjunto:
if cadaTermino.llave==cadaDistractor.llave and cadaTermino.tipo=='solucion':
conjuntoDistractor=cadaConjunto[:]
conjuntoDistractor[contador]=cadaDistractor
if conjuntoDistractor not in pozoDistractoresD:
pozoDistractoresD.append(conjuntoDistractor)
break
contador+=1
return pozoDistractoresD
#Admite entrada kwuargs[] 'especificacion' de la forma
#2+1 ->alternativas derivadas de distractores con 1 y 2 reemplazos
#1+1 ->alternativas derivadas de distractores con 1 reemplazo (se eliminan repetidos)
#2 ->alternativas derivadas de distractores que tienen si o si 2 reemplazos
#admite n sumas, pero entre más tenga, más recursos utiliza
#ejemplo -> 1+2+3
#0 ->retorna una lista vacia
#0+1 -> genera alternativas derivadas de distractores que tienen si o si 1 reemplazo
#En caso que no se especifique, genera alternativas correspondientes a la entrada 1+2
#Admite entrada kwuargs[] 'orderBy' para ordenar los conjuntos por medio de un criterio
#Admite entrada kwuargs[] 'creciente', si es True los ordena de menor a mayor, False de menor a mayor
#Los criterios disponibles son largo y alfabetico, por default sin orden y la solucion sera la ultima alternativa
def agrupamientoPareado(xmlEntradaObject,solucion,distractores,cantidadAlternativas,**kwuargs):
if 'especificacion' in kwuargs.keys():
if 'orderBy' in kwuargs.keys():
return retornaSignificadoCadena(kwuargs['especificacion'],xmlEntradaObject,distractores,solucion,cantidadAlternativas-1,orderBy=kwuargs['orderBy'])
else:
return retornaSignificadoCadena(kwuargs['especificacion'],xmlEntradaObject,distractores,solucion,cantidadAlternativas-1)
#default ->1+2
else:
if 'orderBy' in kwuargs.keys():
return retornaSignificadoCadena('1+2',xmlEntradaObject,distractores,solucion,cantidadAlternativas-1,orderBy=kwuargs['orderBy'])
else:
return retornaSignificadoCadena('1+2',xmlEntradaObject,distractores,solucion,cantidadAlternativas-1)
#Valida el correcto funcionamiento, reemplazando el return por listaAlternativas
# print type(listaAlternativas)
# print len(listaAlternativas)
# for conjunto in listaAlternativas:
# print 'conjunto'
# for elem in conjunto:
# print elem.tipo
def procesoPareador(conjuntoDefiniciones,plantillaSalida,xmlEntradaObject,cantidadAlternativas,banderaEstado,directorioSalida, total): #Se tiene que pasar una copia de subraizSalida si se quiere utilizar con hebras
#falta revisar como hacer que todas las hebras puedan incrementar el total, para luego imprimirlo
#de momento cada una lo incrementa, pero este efecto no se ve reflejado en la variable global
subRaizSalida=None
for elem in plantillaSalida.iter('opciones'):
subRaizSalida=elem
# Proceso necesario cuando era monohebra
# for elem in subRaizSalida.getchildren():
# subRaizSalida.remove(elem)
contador=0
# for elem in subRaizSalida.getchildren():
# subRaizSalida.remove(elem)
seccionDefiniciones=ET.SubElement(subRaizSalida,'definiciones')
idPreguntaGenerada=""
for definicion in conjuntoDefiniciones:
subRaizDefinicion=ET.SubElement(seccionDefiniciones,'definicion')
subRaizDefinicion.text=definicion
idPreguntaGenerada+=definicion+' '
idPreguntaGenerada=hashlib.sha256(idPreguntaGenerada.rstrip()).hexdigest()
for x in plantillaSalida.iter('plantilla'): x.set('id', idPreguntaGenerada)
#Seccion donde estaran los terminos
seccionTerminos=ET.SubElement(subRaizSalida,'terminos')
#Seccion donde estaran las alternativas
seccionAlternativas=ET.SubElement(subRaizSalida,'conjuntoAlternativas')
#Aqui se presenta cada posible pregunta
solucionesYDistractores=posiblesSolucionesYDistractoresConjunto(xmlEntradaObject,conjuntoDefiniciones)
#Para cada solucion de la variante actual
for solucion in solucionesYDistractores['soluciones']:
listaTerminos=list(solucion)+solucionesYDistractores['distractores']
#Aqui se presenta el ordenamiento en que aparecen los terminos en el enunciado
#Por default sera alfabetico creciente
if xmlEntradaObject.ordenTerminos.lower()=='alfabeticocreciente':
listaTerminos.sort(key=lambda x:x.glosa.lower())
elif xmlEntradaObject.ordenTerminos.lower()=='alfabeticodecreciente':
listaTerminos.sort(key=lambda x:x.glosa.lower, reverse=True)
elif xmlEntradaObject.ordenTerminos.lower()=='largocreciente':
listaTerminos.sort(key=lambda x:len(x.glosa))
elif xmlEntradaObject.ordenTerminos.lower()=='largodecreciente':
listaTerminos.sort(key=lambda x:len(x.glosa), reverse=True)
else:
#No se ordena
pass
#Por cada ciclo debo eliminar los hijos de la seccion terminos y poner los nuevos
for elem in seccionTerminos.getchildren():
seccionTerminos.remove(elem)
#Agrego los posibles terminos
for cadaTermino in listaTerminos:
subRaizTermino=ET.SubElement(seccionTerminos,'posiblePar')
subRaizTermino.text=cadaTermino.glosa
subRaizTermino.set('id',cadaTermino.llave)
#solucion provisional
ordenamientoDiferente=0 #indica que es el mismo grupo de alternativas pero estan ordenados de forma diferente
for cadaConjunto in agrupamientoPareado(xmlEntradaObject,solucion,solucionesYDistractores['distractores'],cantidadAlternativas,especificacion=xmlEntradaObject.composicionDistractores, orderBy=xmlEntradaObject.criterioOrdenDistractores):
for elem in seccionAlternativas.getchildren():
seccionAlternativas.remove(elem)
glosasAlternativas=""
idAlternativas=""
for cadaTermino in cadaConjunto:
subRaizAlternativa=ET.SubElement(seccionAlternativas,'alternativa')
subRaizAlternativa.text=cadaTermino.glosa
glosasAlternativas+=cadaTermino.glosa
subRaizAlternativa.set('puntaje',str(cadaTermino.puntaje))
subRaizAlternativa.set('id',cadaTermino.llave)
subRaizAlternativa.set('tipo',cadaTermino.tipo)
subRaizComentario=ET.SubElement(subRaizAlternativa,'comentario')
subRaizComentario.text=cadaTermino.comentario
idAlternativas+=cadaTermino.identificador()
subRaizSalida.set('idAlternativasGenerada',idAlternativas.rstrip())
ordenamientoDiferente+=1
# if contador==4:
# print str(contador)+' Creados'
# return 0
if banderaEstado==True:
#Se instancia la plantilla como un elemento de element tree
xmlSalida.escribePlantilla(directorioSalida,xmlEntradaObject.tipo,threading.currentThread().getName()+ idPreguntaGenerada+' '+idAlternativas.rstrip()+' '+str(ordenamientoDiferente) +' '+str(contador), plantillaSalida,'xml')
else:
print ET.tostring(plantillaSalida, 'utf-8', method="xml")
contador+=1
#Condicion de carrera
total.incrementar(contador)
#Descomentar para validar funcionamiento
#print threading.currentThread().getName()+' '+str(total.valor)
return 0
#print contador
#pass
#Funcion que analiza la plantilla que corresponde a este tipo de pregunta
#A esa plantilla se le añaden los datos obtenidos desde la entrada de
#su mismo tipo, luego una vez completada la pregunta, se imprime
#por pantalla para que la informacion pueda ser recogida por el programa
#principal
def recogePlantillas(nombreDirectorioPlantillas,tipoPregunta):
validaPlantilla=False
plantillasValidas=list()
for archivoPlantilla in nombres.especificDirectoryNames(nombreDirectorioPlantillas):
nombreDirectorioArchivoPlantilla=nombres.directorioReal(nombreDirectorioPlantillas+"/"+archivoPlantilla)
arbolXmlPlantillaEntrada = ET.ElementTree() # instantiate an object of *class* `ElementTree`
arbolXmlPlantillaEntrada.parse(nombreDirectorioArchivoPlantilla)
#arbolXml=ET.ElementTree(file=nombreDirectorioArchivoPlantilla)
for subRaiz in arbolXmlPlantillaEntrada.iter('plantilla'):
if subRaiz.attrib['tipo']==tipoPregunta:
validaPlantilla=True
if validaPlantilla==True:
enunciado=""
for subRaiz in arbolXmlPlantillaEntrada.iter():
if subRaiz.tag=='glosa':
enunciado=enunciado+subRaiz.text
if subRaiz.tag=='termino':
enunciado=enunciado+' @termino'
#plantillasValidas.append(arbolXmlPlantillaEntrada)
plantillasValidas.append(plantilla.plantilla(tipoPregunta,enunciado.rstrip()))
return plantillasValidas
def retornaPlantilla(nombreDirectorioPlantillas,xmlEntradaObject,cantidadAlternativas, tipoPregunta, **kwuargs): #,xmlEntradaObject):
#Esto era requerido cuando se tomaba como tipo el nombre de la plantilla, y no un atributo incrustado en el xml
#tipoPregunta=nombres.nombreScript(__file__)
#Variable compartida, pues cada hebra aumenta el total de archivos creados
total=Total()
hilos=[]
listaDeConjuntoDefiniciones=list()
banderaEstado=False
if 'directorioSalida' in kwuargs.keys():
banderaEstado=True #Indica si se debe imprimir o no el estado de la cantidad de salidas
for plantilla in recogePlantillas(nombreDirectorioPlantillas,tipoPregunta):
plantillaSalida=xmlSalida.plantillaGenericaSalida()
for subRaizSalida in plantillaSalida.iter():
if subRaizSalida.tag=='plantilla':
subRaizSalida.set('tipo',xmlEntradaObject.tipo)
subRaizSalida.set('id',xmlEntradaObject.id)
subRaizSalida.set('idOrigenEntrada',xmlEntradaObject.idOrigenEntrada)
if subRaizSalida.tag=='enunciado':
subRaizSalida.text=plantilla.enunciado
if subRaizSalida.tag=='opciones':
cantidadCombinacionesDefiniciones=0
#Si la cantidad de combinaciones de definiciones es 0, no se genera nada
if xmlEntradaObject.cantidadCombinacionesDefiniciones==0:
pass
#Si la cantidad de combinaciones de definiciones es 1, se trabaja con la entrada
#tal como se ingreso por el usuario
elif xmlEntradaObject.cantidadCombinacionesDefiniciones==1:
listaDeConjuntoDefiniciones.append(xmlEntradaObject.alternativas['terminos'].keys())
else:
listaDeConjuntoDefiniciones=xmlEntradaObject.barajaDefiniciones()
for conjuntoDefiniciones in listaDeConjuntoDefiniciones:
if xmlEntradaObject.cantidadCombinacionesDefiniciones==cantidadCombinacionesDefiniciones:
break
t = threading.Thread(target=procesoPareador, args=(conjuntoDefiniciones,copy.deepcopy(plantillaSalida),xmlEntradaObject, cantidadAlternativas,banderaEstado,kwuargs['directorioSalida'],total) )
t.setDaemon(True)
hilos.append(t)
t.start()
t.join()
cantidadCombinacionesDefiniciones+=1
#Se imprime solo si se especifica directorio de salida
if banderaEstado==True:
print str(total.valor)+' Creados'
return 0
# Declaracion de directorio de entradas
nombreDirectorioEntradas="./Entradas/Definiciones"
nombreDirectorioPlantillas="./Plantillas"
nombreDirectorioSalidas="Salidas"
nombreCompilador="python"
tipoPregunta='definicionPareada'
listaXmlEntrada=list()
# Almacenamiento usando el parser para este tipo de pregunta
#Ahora la entrada que indica la cantidad de alternativas viene incrustada como atributo en los respectivos
#XML de entrada
#cantidadAlternativas=xmlSalida.argParse()
if nombres.validaExistenciasSubProceso(nombreDirectorioEntradas)==True:
listaXmlEntrada=xmlSalida.lecturaXmls(nombreDirectorioEntradas, tipoPregunta)
for cadaXmlEntrada in listaXmlEntrada:
retornaPlantilla(nombreDirectorioPlantillas, cadaXmlEntrada, cadaXmlEntrada.cantidadAlternativas,tipoPregunta, directorioSalida=nombreDirectorioSalidas+'/'+tipoPregunta) | gpl-2.0 |
curoverse/libcloud | libcloud/compute/drivers/vpsnet.py | 59 | 6640 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VPS.net driver
"""
import base64
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.common.types import InvalidCredsError, MalformedResponseError
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
API_HOST = 'api.vps.net'
API_VERSION = 'api10json'
RAM_PER_NODE = 256
DISK_PER_NODE = 10
BANDWIDTH_PER_NODE = 250
class VPSNetResponse(JsonResponse):
def parse_body(self):
try:
return super(VPSNetResponse, self).parse_body()
except MalformedResponseError:
return self.body
def success(self):
# vps.net wrongly uses 406 for invalid auth creds
if self.status == 406 or self.status == 403:
raise InvalidCredsError()
return True
def parse_error(self):
try:
errors = super(VPSNetResponse, self).parse_body()['errors'][0]
except MalformedResponseError:
return self.body
else:
return "\n".join(errors)
class VPSNetConnection(ConnectionUserAndKey):
"""
Connection class for the VPS.net driver
"""
host = API_HOST
responseCls = VPSNetResponse
allow_insecure = False
def add_default_headers(self, headers):
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class VPSNetNodeDriver(NodeDriver):
"""
VPS.net node driver
"""
type = Provider.VPSNET
api_name = 'vps_net'
name = "vps.net"
website = 'http://vps.net/'
connectionCls = VPSNetConnection
def _to_node(self, vm):
if vm['running']:
state = NodeState.RUNNING
else:
state = NodeState.PENDING
n = Node(id=vm['id'],
name=vm['label'],
state=state,
public_ips=[vm.get('primary_ip_address', None)],
private_ips=[],
extra={'slices_count': vm['slices_count']},
# Number of nodes consumed by VM
driver=self.connection.driver)
return n
def _to_image(self, image, cloud):
image = NodeImage(id=image['id'],
name="%s: %s" % (cloud, image['label']),
driver=self.connection.driver)
return image
def _to_size(self, num):
size = NodeSize(id=num,
name="%d Node" % (num,),
ram=RAM_PER_NODE * num,
disk=DISK_PER_NODE,
bandwidth=BANDWIDTH_PER_NODE * num,
price=self._get_price_per_node(num) * num,
driver=self.connection.driver)
return size
def _get_price_per_node(self, num):
single_node_price = self._get_size_price(size_id='1')
return num * single_node_price
def create_node(self, name, image, size, **kwargs):
"""Create a new VPS.net node
@inherits: :class:`NodeDriver.create_node`
:keyword ex_backups_enabled: Enable automatic backups
:type ex_backups_enabled: ``bool``
:keyword ex_fqdn: Fully Qualified domain of the node
:type ex_fqdn: ``str``
"""
headers = {'Content-Type': 'application/json'}
request = {'virtual_machine':
{'label': name,
'fqdn': kwargs.get('ex_fqdn', ''),
'system_template_id': image.id,
'backups_enabled': kwargs.get('ex_backups_enabled', 0),
'slices_required': size.id}}
res = self.connection.request('/virtual_machines.%s' % (API_VERSION,),
data=json.dumps(request),
headers=headers,
method='POST')
node = self._to_node(res.object['virtual_machine'])
return node
def reboot_node(self, node):
res = self.connection.request(
'/virtual_machines/%s/%s.%s' % (node.id,
'reboot',
API_VERSION),
method="POST")
node = self._to_node(res.object['virtual_machine'])
return True
def list_sizes(self, location=None):
res = self.connection.request('/nodes.%s' % (API_VERSION,))
available_nodes = len([size for size in res.object
if size['slice']['virtual_machine_id']])
sizes = [self._to_size(i) for i in range(1, available_nodes + 1)]
return sizes
def destroy_node(self, node):
res = self.connection.request('/virtual_machines/%s.%s'
% (node.id, API_VERSION),
method='DELETE')
return res.status == 200
def list_nodes(self):
res = self.connection.request('/virtual_machines.%s' % (API_VERSION,))
return [self._to_node(i['virtual_machine']) for i in res.object]
def list_images(self, location=None):
res = self.connection.request('/available_clouds.%s' % (API_VERSION,))
images = []
for cloud in res.object:
label = cloud['cloud']['label']
templates = cloud['cloud']['system_templates']
images.extend([self._to_image(image, label)
for image in templates])
return images
def list_locations(self):
return [NodeLocation(0, "VPS.net Western US", 'US', self)]
| apache-2.0 |
indico/indico | indico/modules/events/timetable/views/__init__.py | 4 | 5581 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import posixpath
from itertools import groupby
from operator import attrgetter
from flask import render_template, request, session
from sqlalchemy.orm import joinedload
from indico.core import signals
from indico.modules.events.layout import theme_settings
from indico.modules.events.management.views import WPEventManagement
from indico.modules.events.timetable.models.entries import TimetableEntryType
from indico.modules.events.timetable.views.weeks import inject_week_timetable
from indico.modules.events.util import get_theme
from indico.modules.events.views import WPConferenceDisplayBase
from indico.util.signals import values_from_signal
from indico.web.flask.templating import register_template_hook, template_hook
register_template_hook('week-meeting-body', inject_week_timetable)
class WPManageTimetable(WPEventManagement):
template_prefix = 'events/timetable/'
sidemenu_option = 'timetable'
bundles = ('module_events.contributions.js',)
def __init__(self, rh, event_, **kwargs):
custom_links = dict(values_from_signal(signals.event.timetable_buttons.send(self)))
WPEventManagement.__init__(self, rh, event_, custom_links=custom_links, **kwargs)
class WPDisplayTimetable(WPConferenceDisplayBase):
template_prefix = 'events/timetable/'
menu_entry_name = 'timetable'
@template_hook('meeting-body')
def inject_meeting_body(event, **kwargs):
event.preload_all_acl_entries()
event_tz = event.display_tzinfo
show_date = request.args.get('showDate') or 'all'
show_session = request.args.get('showSession') or 'all'
detail_level = request.args.get('detailLevel') or 'contribution'
view = request.args.get('view')
children_strategy = joinedload('children')
children_strategy.joinedload('session_block').joinedload('person_links')
children_strategy.joinedload('break_')
children_contrib_strategy = children_strategy.subqueryload('contribution')
children_contrib_strategy.joinedload('person_links')
children_contrib_strategy.joinedload('subcontributions')
children_contrib_strategy.joinedload('references')
children_contrib_strategy.joinedload('own_room')
children_contrib_strategy.joinedload('note')
children_subcontrib_strategy = children_contrib_strategy.joinedload('subcontributions')
children_subcontrib_strategy.joinedload('person_links')
children_subcontrib_strategy.joinedload('references')
contrib_strategy = joinedload('contribution')
contrib_strategy.joinedload('person_links')
contrib_strategy.joinedload('references')
contrib_strategy.joinedload('note')
subcontrib_strategy = contrib_strategy.joinedload('subcontributions')
subcontrib_strategy.joinedload('person_links')
subcontrib_strategy.joinedload('references')
subcontrib_strategy.joinedload('note')
# try to minimize the number of DB queries
options = [contrib_strategy,
children_strategy,
joinedload('session_block').joinedload('person_links'),
joinedload('session_block').joinedload('own_room'),
joinedload('break_')]
entries = []
show_siblings_location = False
show_children_location = {}
for entry in event.timetable_entries.filter_by(parent=None).options(*options):
if show_date != 'all' and entry.start_dt.astimezone(event_tz).date().isoformat() != show_date:
continue
if (entry.type == TimetableEntryType.CONTRIBUTION and
(detail_level not in ('contribution', 'all') or show_session != 'all')):
continue
elif (entry.type == TimetableEntryType.SESSION_BLOCK and show_session != 'all' and
str(entry.object.session.friendly_id) != show_session):
continue
if entry.type == TimetableEntryType.BREAK:
entries.append(entry)
elif entry.object.can_access(session.user):
entries.append(entry)
if not entry.object.inherit_location:
show_siblings_location = True
show_children_location[entry.id] = not all(child.object.inherit_location for child in entry.children)
entries.sort(key=attrgetter('end_dt'), reverse=True)
entries.sort(key=lambda entry: (entry.start_dt, _entry_title_key(entry)))
days = [(day, list(e)) for day, e in groupby(entries, lambda e: e.start_dt.astimezone(event_tz).date())]
theme = theme_settings.themes[get_theme(event, view)[0]]
plugin = theme.get('plugin')
tpl_name = theme.get('tt_template', theme['template'])
tt_tpl = ((plugin.name + tpl_name)
if (plugin and tpl_name[0] == ':')
else posixpath.join('events/timetable/display', tpl_name))
multiple_days = event.start_dt.astimezone(event_tz).date() != event.end_dt.astimezone(event_tz).date()
return render_template(tt_tpl, event=event, entries=entries, days=days,
timezone=event_tz.zone, tz_object=event_tz, hide_contribs=(detail_level == 'session'),
theme_settings=theme.get('settings', {}), show_siblings_location=show_siblings_location,
show_children_location=show_children_location, multiple_days=multiple_days, **kwargs)
def _entry_title_key(entry):
obj = entry.object
return obj.full_title if entry.type == TimetableEntryType.SESSION_BLOCK else obj.title
| mit |
hryamzik/ansible | lib/ansible/modules/network/onyx/onyx_igmp.py | 59 | 7850 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_igmp
version_added: "2.7"
author: "Samer Deeb (@samerd)"
short_description: Configures IGMP globl parameters
description:
- This module provides declarative management of IGMP protocol params
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.6107
options:
state:
description:
- IGMP state.
required: true
choices: ['enabled', 'disabled']
last_member_query_interval:
description:
- Configure the last member query interval, range 1-25
mrouter_timeout:
description:
- Configure the mrouter timeout, range 60-600
port_purge_timeout:
description:
- Configure the host port purge timeout, range 130-1225
proxy_reporting:
description:
- Configure ip igmp snooping proxy and enable reporting mode
choices: ['enabled', 'disabled']
report_suppression_interval:
description:
- Configure the report suppression interval, range 1-25
unregistered_multicast:
description:
- Configure the unregistered multicast mode
Flood unregistered multicast
Forward unregistered multicast to mrouter ports
choices: ['flood', 'forward-to-mrouter-ports']
default_version:
description:
- Configure the default operating version of the IGMP snooping
choices: ['V2','V3']
"""
EXAMPLES = """
- name: configure igmp
onyx_igmp:
state: enabled
unregistered_multicast: flood
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- ip igmp snooping
- ip igmp snooping last-member-query-interval 10
- ip igmp snooping mrouter-timeout 150
- ip igmp snooping port-purge-timeout 150
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.onyx.onyx import show_cmd
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxIgmpModule(BaseOnyxModule):
TIME_INTERVAL_REGEX = re.compile(r'^(\d+)\s+seconds')
_RANGE_INTERVALS = dict(
last_member_query_interval=(1, 25, 'Last member query interval'),
mrouter_timeout=(60, 600, 'Mrouter timeout'),
port_purge_timeout=(130, 1225, 'Port purge timeout'),
report_suppression_interval=(1, 25, 'Report suppression interval'),
)
def init_module(self):
""" initialize module
"""
element_spec = dict(
state=dict(choices=['enabled', 'disabled'], required=True),
last_member_query_interval=dict(type='int'),
mrouter_timeout=dict(type='int'),
port_purge_timeout=dict(type='int'),
proxy_reporting=dict(choices=['enabled', 'disabled']),
report_suppression_interval=dict(type='int'),
unregistered_multicast=dict(
choices=['flood', 'forward-to-mrouter-ports']),
default_version=dict(choices=['V2', 'V3']),
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def _validate_key(self, param, key):
interval_params = self._RANGE_VALIDATORS.get(key)
if interval_params:
min_val, max_val = interval_params[0], interval_params[1]
value = param.get(key)
self._validate_range(key, min_val, max_val, value)
else:
super(OnyxIgmpModule, self)._validate_key(param, key)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self.validate_param_values(self._required_config)
def _set_igmp_config(self, igmp_config):
igmp_config = igmp_config[0]
if not igmp_config:
return
self._current_config['state'] = igmp_config.get(
'IGMP snooping globally', 'disabled')
self._current_config['proxy_reporting'] = igmp_config.get(
'Proxy-reporting globally', 'disabled')
self._current_config['default_version'] = igmp_config.get(
'IGMP default version for new VLAN', 'V3')
self._current_config['unregistered_multicast'] = igmp_config.get(
'IGMP snooping unregistered multicast', 'flood')
for interval_name, interval_params in iteritems(self._RANGE_INTERVALS):
display_str = interval_params[2]
value = igmp_config.get(display_str, '')
match = self.TIME_INTERVAL_REGEX.match(value)
if match:
interval_value = int(match.group(1))
else:
interval_value = None
self._current_config[interval_name] = interval_value
def _show_igmp(self):
cmd = "show ip igmp snooping"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
igmp_config = self._show_igmp()
if igmp_config:
self._set_igmp_config(igmp_config)
def generate_commands(self):
state = self._required_config['state']
if state == 'enabled':
self._generate_igmp_cmds()
else:
self._generate_no_igmp_cmds()
def _generate_igmp_cmds(self):
curr_state = self._current_config.get('state', 'disabled')
if curr_state == 'disabled':
self._commands.append('ip igmp snooping')
for interval_name in self._RANGE_INTERVALS:
req_val = self._required_config.get(interval_name)
if not req_val:
continue
curr_value = self._current_config.get(interval_name)
if curr_value == req_val:
continue
interval_cmd = interval_name.replace('_', '-')
self._commands.append(
'ip igmp snooping %s %s' % (interval_cmd, req_val))
req_val = self._required_config.get('unregistered_multicast')
if req_val:
curr_value = self._current_config.get(
'unregistered_multicast', 'flood')
if req_val != curr_value:
self._commands.append(
'ip igmp snooping unregistered multicast %s' % req_val)
req_val = self._required_config.get('proxy_reporting')
if req_val:
curr_value = self._current_config.get(
'proxy_reporting', 'disabled')
if req_val != curr_value:
cmd = 'ip igmp snooping proxy reporting'
if req_val == 'disabled':
cmd = 'no %s' % cmd
self._commands.append(cmd)
req_val = self._required_config.get('default_version')
if req_val:
curr_value = self._current_config.get(
'default_version', 'V3')
if req_val != curr_value:
version = req_val[1] # remove the 'V' and take the number only
self._commands.append(
'ip igmp snooping version %s' % version)
def _generate_no_igmp_cmds(self):
curr_state = self._current_config.get('state', 'disabled')
if curr_state != 'disabled':
self._commands.append('no ip igmp snooping')
def main():
""" main entry point for module execution
"""
OnyxIgmpModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
petrus-v/odoo | addons/project/report/__init__.py | 444 | 1069 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ashleyholman/bitcoin | qa/rpc-tests/test_framework/socks5.py | 182 | 5701 | # Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Dummy Socks5 server for testing.
'''
from __future__ import print_function, division, unicode_literals
import socket, threading, Queue
import traceback, sys
### Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
### Utility functions
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
### Implementation classes
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = str(recvall(self.conn, n))
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception,e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = Queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
| mit |
seanli9jan/tensorflow | tensorflow/compiler/tests/scan_ops_test.py | 10 | 7707 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.cached_session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.cached_session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.cached_session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.cached_session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.cached_session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.cached_session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
druuu/django | django/core/serializers/base.py | 273 | 7678 | """
Module for abstract serializer/unserializer base classes.
"""
from django.db import models
from django.utils import six
class SerializerDoesNotExist(KeyError):
"""The requested serializer was not found."""
pass
class SerializationError(Exception):
"""Something bad happened during serialization."""
pass
class DeserializationError(Exception):
"""Something bad happened during deserialization."""
@classmethod
def WithData(cls, original_exc, model, fk, field_value):
"""
Factory method for creating a deserialization error which has a more
explanatory messsage.
"""
return cls("%s: (%s:pk=%s) field_value was '%s'" % (original_exc, model, fk, field_value))
class ProgressBar(object):
progress_width = 75
def __init__(self, output, total_count):
self.output = output
self.total_count = total_count
self.prev_done = 0
def update(self, count):
if not self.output:
return
perc = count * 100 // self.total_count
done = perc * self.progress_width // 100
if self.prev_done >= done:
return
self.prev_done = done
cr = '' if self.total_count == 1 else '\r'
self.output.write(cr + '[' + '.' * done + ' ' * (self.progress_width - done) + ']')
if done == self.progress_width:
self.output.write('\n')
self.output.flush()
class Serializer(object):
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
progress_class = ProgressBar
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", six.StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_foreign_keys = options.pop('use_natural_foreign_keys', False)
self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False)
progress_bar = self.progress_class(
options.pop('progress_output', None), options.pop('object_count', 0)
)
self.start_serialization()
self.first = True
for count, obj in enumerate(queryset, start=1):
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
progress_bar.update(count)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue()
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError('subclasses of Serializer must provide a start_serialization() method')
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError('subclasses of Serializer must provide a start_object() method')
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_field() method')
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_fk_field() method')
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_m2m_field() method')
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
class Deserializer(six.Iterator):
"""
Abstract base deserializer class.
"""
def __init__(self, stream_or_string, **options):
"""
Init this serializer given a stream or a string
"""
self.options = options
if isinstance(stream_or_string, six.string_types):
self.stream = six.StringIO(stream_or_string)
else:
self.stream = stream_or_string
def __iter__(self):
return self
def __next__(self):
"""Iteration iterface -- return the next item in the stream"""
raise NotImplementedError('subclasses of Deserializer must provide a __next__() method')
class DeserializedObject(object):
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None):
self.object = obj
self.m2m_data = m2m_data
def __repr__(self):
return "<DeserializedObject: %s(pk=%s)>" % (
self.object._meta.label, self.object.pk)
def save(self, save_m2m=True, using=None, **kwargs):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# raw=True is passed to any pre/post_save signals.
models.Model.save_base(self.object, using=using, raw=True, **kwargs)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
setattr(self.object, accessor_name, object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
def build_instance(Model, data, db):
"""
Build a model instance.
If the model instance doesn't have a primary key and the model supports
natural keys, try to retrieve it from the database.
"""
obj = Model(**data)
if (obj.pk is None and hasattr(Model, 'natural_key') and
hasattr(Model._default_manager, 'get_by_natural_key')):
natural_key = obj.natural_key()
try:
obj.pk = Model._default_manager.db_manager(db).get_by_natural_key(*natural_key).pk
except Model.DoesNotExist:
pass
return obj
| bsd-3-clause |
aspectron/jsx | extern/boost/libs/python/test/newtest.py | 46 | 3629 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from m1 import *
>>> from m2 import *
Prove that we get an appropriate error from trying to return a type
for which we have no registered to_python converter
>>> def check_unregistered(f, msgprefix):
... try:
... f(1)
... except TypeError, x:
... if not str(x).startswith(msgprefix):
... print str(x)
... else:
... print 'expected a TypeError'
...
>>> check_unregistered(make_unregistered, 'No to_python (by-value) converter found for C++ type')
>>> check_unregistered(make_unregistered2, 'No Python class registered for C++ class')
>>> n = new_noddy()
>>> s = new_simple()
>>> unwrap_int(n)
42
>>> unwrap_int_ref(n)
42
>>> unwrap_int_const_ref(n)
42
>>> unwrap_simple(s)
'hello, world'
>>> unwrap_simple_ref(s)
'hello, world'
>>> unwrap_simple_const_ref(s)
'hello, world'
>>> unwrap_int(5)
5
Can't get a non-const reference to a built-in integer object
>>> try:
... unwrap_int_ref(7)
... except: pass
... else: print 'no exception'
>>> unwrap_int_const_ref(9)
9
>>> wrap_int(n)
42
try: wrap_int_ref(n)
... except: pass
... else: print 'no exception'
>>> wrap_int_const_ref(n)
42
>>> unwrap_simple_ref(wrap_simple(s))
'hello, world'
>>> unwrap_simple_ref(wrap_simple_ref(s))
'hello, world'
>>> unwrap_simple_ref(wrap_simple_const_ref(s))
'hello, world'
>>> f(s)
12
>>> unwrap_simple(g(s))
'hello, world'
>>> f(g(s))
12
>>> f_mutable_ref(g(s))
12
>>> f_const_ptr(g(s))
12
>>> f_mutable_ptr(g(s))
12
>>> f2(g(s))
12
Create an extension class which wraps "complicated" (init1 and get_n)
are a complicated constructor and member function, respectively.
>>> c1 = complicated(s, 99)
>>> c1.get_n()
99
>>> c2 = complicated(s)
>>> c2.get_n()
0
a quick regression test for a bug where None could be converted
to the target of any member function. To see it, we need to
access the __dict__ directly, to bypass the type check supplied
by the Method property which wraps the method when accessed as an
attribute.
>>> try: A.__dict__['name'](None)
... except TypeError: pass
... else: print 'expected an exception!'
>>> a = A()
>>> b = B()
>>> c = C()
>>> d = D()
>>> take_a(a).name()
'A'
>>> try:
... take_b(a)
... except: pass
... else: print 'no exception'
>>> try:
... take_c(a)
... except: pass
... else: print 'no exception'
>>> try:
... take_d(a)
... except: pass
... else: print 'no exception'
------
>>> take_a(b).name()
'A'
>>> take_b(b).name()
'B'
>>> try:
... take_c(b)
... except: pass
... else: print 'no exception'
>>> try:
... take_d(b)
... except: pass
... else: print 'no exception'
-------
>>> take_a(c).name()
'A'
>>> try:
... take_b(c)
... except: pass
... else: print 'no exception'
>>> take_c(c).name()
'C'
>>> try:
... take_d(c)
... except: pass
... else: print 'no exception'
-------
>>> take_a(d).name()
'A'
>>> take_b(d).name()
'B'
>>> take_c(d).name()
'C'
>>> take_d(d).name()
'D'
>>> take_d_shared_ptr(d).name()
'D'
>>> d_as_a = d_factory()
>>> dd = take_d(d_as_a)
>>> dd.name()
'D'
>>> print g.__doc__.splitlines()[1]
g( (Simple)arg1) -> Simple :
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| mit |
ElDeveloper/qiita | qiita_db/test/test_portal.py | 3 | 8644 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy.testing as npt
from qiita_core.util import qiita_test_checker
from qiita_core.qiita_settings import qiita_config
import qiita_db as qdb
@qiita_test_checker()
class TestPortal(TestCase):
def setUp(self):
self.portal = qiita_config.portal
self.study = qdb.study.Study(1)
self.analysis = qdb.analysis.Analysis(1)
self.qiita_portal = qdb.portal.Portal('QIITA')
self.emp_portal = qdb.portal.Portal('EMP')
def tearDown(self):
qiita_config.portal = self.portal
def test_list_portals(self):
obs = qdb.portal.Portal.list_portals()
exp = ['EMP']
self.assertEqual(obs, exp)
def test_add_portal(self):
obs = qdb.portal.Portal.create("NEWPORTAL", "SOMEDESC")
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.portal_type")
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1, 'QIITA', 'QIITA portal. Access to all data stored '
'in database.'],
[2, 'EMP', 'EMP portal'],
[4, 'NEWPORTAL', 'SOMEDESC']]
self.assertCountEqual(obs, exp)
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.analysis_portal")
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1, 1], [2, 1], [3, 1], [4, 1], [5, 1], [6, 1], [7, 2], [8, 2],
[9, 2], [10, 2], [11, 4], [12, 4], [13, 4], [14, 4]]
self.assertCountEqual(obs, exp)
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateError):
qdb.portal.Portal.create("EMP", "DOESNTMATTERFORDESC")
qdb.portal.Portal.delete('NEWPORTAL')
def test_remove_portal(self):
qdb.portal.Portal.create("NEWPORTAL", "SOMEDESC")
# Select some samples on a default analysis
qiita_config.portal = "NEWPORTAL"
a = qdb.user.User("test@foo.bar").default_analysis
a.add_samples({1: ['1.SKB8.640193', '1.SKD5.640186']})
qdb.portal.Portal.delete("NEWPORTAL")
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.portal_type")
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1, 'QIITA', 'QIITA portal. Access to all data stored '
'in database.'],
[2, 'EMP', 'EMP portal']]
self.assertCountEqual(obs, exp)
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.analysis_portal")
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[1, 1], [2, 1], [3, 1], [4, 1], [5, 1], [6, 1], [7, 2], [8, 2],
[9, 2], [10, 2]]
self.assertCountEqual(obs, exp)
with self.assertRaises(qdb.exceptions.QiitaDBLookupError):
qdb.portal.Portal.delete("NOEXISTPORTAL")
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.portal.Portal.delete("QIITA")
qdb.portal.Portal.create("NEWPORTAL2", "SOMEDESC")
# Add study to this new portal and make sure error raised
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"principal_investigator_id": qdb.study.StudyPerson(3),
"lab_person_id": qdb.study.StudyPerson(1)
}
qdb.portal.Portal.create("NEWPORTAL3", "SOMEDESC")
qiita_config.portal = "NEWPORTAL3"
qdb.study.Study.create(
qdb.user.User('test@foo.bar'), "Fried chicken microbiome", info)
qiita_config.portal = "QIITA"
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.portal.Portal.delete("NEWPORTAL3")
def test_check_studies(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.qiita_portal._check_studies([2000000000000, 122222222222222])
def test_check_analyses(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.qiita_portal._check_analyses([2000000000000, 122222222222222])
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.qiita_portal._check_analyses([8, 9])
def test_get_studies_by_portal(self):
obs = self.emp_portal.get_studies()
self.assertEqual(obs, set())
obs = self.qiita_portal.get_studies()
self.assertEqual(obs, {qdb.study.Study(1)})
def test_add_study_portals(self):
obs = qdb.portal.Portal.create("NEWPORTAL4", "SOMEDESC")
obs.add_studies([self.study.id])
self.assertCountEqual(self.study._portals, ['NEWPORTAL4', 'QIITA'])
npt.assert_warns(qdb.exceptions.QiitaDBWarning, obs.add_studies,
[self.study.id])
obs.remove_studies([self.study.id])
qdb.portal.Portal.delete("NEWPORTAL4")
def test_remove_study_portals(self):
with self.assertRaises(ValueError):
self.qiita_portal.remove_studies([self.study.id])
self.emp_portal.add_studies([1])
# Set up the analysis in EMP portal
self.emp_portal.add_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertCountEqual(obs, ['QIITA', 'EMP'])
# Test study removal failure
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.emp_portal.remove_studies([self.study.id])
obs = self.study._portals
self.assertCountEqual(obs, ['QIITA', 'EMP'])
# Test study removal
self.emp_portal.remove_analyses([self.analysis.id])
self.emp_portal.remove_studies([self.study.id])
obs = self.study._portals
self.assertEqual(obs, ['QIITA'])
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning, self.emp_portal.remove_studies,
[self.study.id])
def test_get_analyses_by_portal(self):
qiita_config.portal = 'EMP'
exp = {qdb.analysis.Analysis(7), qdb.analysis.Analysis(8),
qdb.analysis.Analysis(9), qdb.analysis.Analysis(10)}
obs = self.emp_portal.get_analyses()
self.assertEqual(obs, exp)
qiita_config.portal = 'QIITA'
exp = {qdb.analysis.Analysis(1), qdb.analysis.Analysis(2),
qdb.analysis.Analysis(3), qdb.analysis.Analysis(4),
qdb.analysis.Analysis(5), qdb.analysis.Analysis(6)}
obs = self.qiita_portal.get_analyses()
self.assertEqual(obs, exp)
def test_add_analysis_portals(self):
obs = self.analysis._portals
self.assertEqual(obs, ['QIITA'])
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.emp_portal.add_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertEqual(obs, ['QIITA'])
self.emp_portal.add_studies([1])
self.emp_portal.add_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertEqual(obs, ['EMP', 'QIITA'])
npt.assert_warns(
qdb.exceptions.QiitaDBWarning, self.emp_portal.add_analyses,
[self.analysis.id])
self.emp_portal.remove_analyses([self.analysis.id])
self.emp_portal.remove_studies([1])
def test_remove_analysis_portals(self):
with self.assertRaises(ValueError):
self.qiita_portal.remove_analyses([self.analysis.id])
# set up the analysis in EMP portal
self.emp_portal.add_studies([1])
self.emp_portal.add_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertCountEqual(obs, ['QIITA', 'EMP'])
# Test removal
self.emp_portal.remove_analyses([self.analysis.id])
obs = self.analysis._portals
self.assertEqual(obs, ['QIITA'])
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning, self.emp_portal.remove_analyses,
[self.analysis.id])
self.emp_portal.remove_studies([1])
if __name__ == '__main__':
main()
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/containerregistry/azure-containerregistry/samples/async_samples/sample_create_client_async.py | 1 | 2454 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_create_client_async.py
DESCRIPTION:
These samples demonstrate creating a ContainerRegistryClient and a ContainerRepository
USAGE:
python sample_create_client_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONTAINERREGISTRY_URL - The URL of you Container Registry account
"""
import asyncio
from dotenv import find_dotenv, load_dotenv
import os
class CreateClients(object):
def __init__(self):
load_dotenv(find_dotenv())
async def create_registry_client(self):
# Instantiate the ContainerRegistryClient
# [START create_registry_client]
from azure.containerregistry.aio import ContainerRegistryClient
from azure.identity.aio import DefaultAzureCredential
account_url = os.environ["CONTAINERREGISTRY_ENDPOINT"]
client = ContainerRegistryClient(account_url, DefaultAzureCredential())
# [END create_registry_client]
async def basic_sample(self):
from azure.containerregistry.aio import ContainerRegistryClient
from azure.identity.aio import DefaultAzureCredential
account_url = os.environ["CONTAINERREGISTRY_ENDPOINT"]
# Instantiate the client
client = ContainerRegistryClient(account_url, DefaultAzureCredential())
async with client:
# Iterate through all the repositories
async for repository_name in client.list_repository_names():
if repository_name == "hello-world":
# Create a repository client from the registry client
async for tag in client.list_tag_properties(repository_name):
print(tag.digest)
# [START delete_repository]
await client.delete_repository(repository_name, "hello-world")
# [END delete_repository]
async def main():
sample = CreateClients()
await sample.create_registry_client()
await sample.basic_sample()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| mit |
McNoggins/latgen | setup.py | 1 | 1433 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='latgen',
version='0.1.0',
description='Computational library to generate lattice points',
long_description=readme + '\n\n' + history,
author='Denis Gagnon',
author_email='gagnon88@gmail.com',
url='https://github.com/McNoggins/latgen',
packages=['latgen'],
package_dir={'latgen':
'latgen'},
include_package_data=True,
install_requires=['numpy'],
license="GPL",
zip_safe=False,
keywords='latgen',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| gpl-2.0 |
bakhtout/odoo-educ | addons/openeducat_erp/report/exam_student_lable.py | 1 | 3860 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.report import report_sxw
from openerp.osv import osv
class exam_student_lable_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(exam_student_lable_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_student_data':self.get_student_data
})
def format_list(self,temp_list):
cnt = 1
temp = {}
lst = []
for i in temp_list:
if cnt <= 3:
temp.update({str(cnt): i})
cnt += 1
else:
cnt = 1
lst.append(temp)
temp = {}
temp.update({str(cnt): i})
cnt += 1
index = len(temp_list) - len(temp_list)%3
if len(temp_list)%3 == 1:
lst.append({'1': temp_list[index]})
elif len(temp_list)%3 == 2:
lst.append({'1': temp_list[index],'2': temp_list[index+1]})
else:
lst.append({'1': temp_list[-3],'2': temp_list[-2],'3': temp_list[-1]})
return lst
def get_student_data(self, exam_session_ids):
student_pool = self.pool.get('op.student')
ret_list = []
for line in exam_session_ids:
student_ids = student_pool.search(self.cr, self.uid, [('course_id', '=', line.course_id.id),
('standard_id', '=', line.standard_id.id),
], order= 'id asc')
temp_list = []
for student in student_pool.browse(self.cr, self.uid, student_ids):
res={
'student': student.name,
'middle_name': student.middle_name,
'last_name': student.last_name,
'course': student.course_id.name,
'roll_number': student.roll_number,
'std': student.standard_id.name
}
temp_list.append(res)
ret_list.append({'course': line.course_id.name, 'standard': line.standard_id.name, 'line': self.format_list(temp_list)})
return ret_list
class report_exam_student_lable_report(osv.AbstractModel):
_name = 'report.openeducat_erp.report_exam_student_lable_report'
_inherit = 'report.abstract_report'
_template = 'openeducat_erp.report_exam_student_lable_report'
_wrapped_report_class = exam_student_lable_report
#report_sxw.report_sxw('report.op.exam.student.lable','op.exam.res.allocation', 'addons/openeducat_erp/report/exam_student_lable.rml', parser=exam_student_lable_report, header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dmacvicar/spacewalk | proxy/proxy/redirect/rhnRedirect.py | 3 | 17780 | # Spacewalk Proxy Server SSL Redirect handler code.
#
# Copyright (c) 2008--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# language imports
import socket
import re
from urlparse import urlparse, urlunparse
# common module imports
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnTB import Traceback
from spacewalk.common import rhnFlags, rhnLib, apache
# local module imports
from proxy.rhnShared import SharedHandler
from proxy import rhnConstants
# rhnlib imports
from rhn import connections
# Main apache entry point for the proxy.
class RedirectHandler(SharedHandler):
""" Spacewalk Proxy SSL Redirect specific handler code called by rhnApache.
Workflow is:
Client -> Apache:Broker -> Squid -> Apache:Redirect -> Satellite
Redirect handler get all request for localhost:80 and they come
from Broker handler through Squid, which hadle caching.
Redirect module transform destination url to parent or http proxy.
Depend on what we have in CFG.
"""
def __init__(self, req):
SharedHandler.__init__(self, req)
self.componentType = 'proxy.redirect'
self._initConnectionVariables(req)
self.rhnParentXMLRPC = None
def _initConnectionVariables(self, _req):
""" set connection variables
NOTE: self.{caChain,rhnParent,httpProxy*} are initialized
in SharedHandler
"""
effectiveURI = self._getEffectiveURI()
effectiveURI_parts = urlparse(effectiveURI)
scheme = 'http'
if CFG.USE_SSL:
scheme = 'https'
else:
self.caChain = ''
self.rhnParentXMLRPC = urlunparse((scheme, self.rhnParent, '/XMLRPC', '', '', ''))
self.rhnParent = urlunparse((scheme, self.rhnParent) + effectiveURI_parts[2:])
log_debug(3, 'remapped self.rhnParent: %s' % self.rhnParent)
log_debug(3, 'remapped self.rhnParentXMLRPC: %s' % self.rhnParentXMLRPC)
def handler(self):
""" Main handler for all requests pumped through this server. """
log_debug(4, 'In redirect handler')
self._prepHandler()
# Rebuild the X-Forwarded-For header so that it reflects the actual
# path of the request. We must do this because squid is unable to
# determine the "real" client, and will make each entry in the chain
# 127.0.0.1.
_oto = rhnFlags.get('outputTransportOptions')
_oto['X-Forwarded-For'] = _oto['X-RHN-IP-Path']
self.rhnParent = self.rhnParent or '' # paranoid
log_debug(4, 'Connecting to parent...')
self._connectToParent() # part 1
log_debug(4, 'Initiating communication with server...')
status = self._serverCommo() # part 2
if (status != apache.OK) and (status != apache.HTTP_PARTIAL_CONTENT):
log_debug(3, "Leaving handler with status code %s" % status)
return status
log_debug(4, 'Initiating communication with client...')
# If we got this far, it has to be a good response
return self._clientCommo(status)
def _handleServerResponse(self, status):
""" Here, we'll override the default behavior for handling server responses
so that we can adequately handle 302's.
We will follow redirects unless it is redirect to (re)login page. In which
case we change protocol to https and return redirect to user.
"""
# In case of a 302, redirect the original request to the location
# specified in the response.
if status == apache.HTTP_MOVED_TEMPORARILY or \
status == apache.HTTP_MOVED_PERMANENTLY:
log_debug(1, "Received redirect response: ", status)
# if we redirected to ssl version of login page, send redirect directly to user
headers = self.responseContext.getHeaders()
if headers is not None:
for headerKey in headers.keys():
if headerKey == 'location':
location = self._get_header(headerKey)
relogin = re.compile(r'https?://.*(/rhn/(Re)?Login.do\?.*)')
m = relogin.match(location[0])
if m:
# pull server name out of "t:o:k:e:n:hostname1,t:o:k:e:n:hostname2,..."
proxy_auth = self.req.headers_in['X-RHN-Proxy-Auth']
last_auth = proxy_auth.split(',')[-1]
server_name = last_auth.split(':')[-1]
log_debug(1, "Redirecting to SSL version of login page")
rhnLib.setHeaderValue(self.req.headers_out, 'Location',
"https://%s%s" % (server_name, m.group(1)))
return apache.HTTP_MOVED_PERMANENTLY
redirectStatus = self.__redirectToNextLocation()
# At this point, we've either:
#
# (a) successfully redirected to the 3rd party
# (b) been told to redirect somewhere else from the 3rd party
# (c) run out of retry attempts
#
# We'll keep redirecting until we've received HTTP_OK or an error.
while redirectStatus == apache.HTTP_MOVED_PERMANENTLY or \
redirectStatus == apache.HTTP_MOVED_TEMPORARILY:
# We've been told to redirect again. We'll pass a special
# argument to ensure that if we end up back at the server, we
# won't be redirected again.
log_debug(1, "Redirected again! Code=", redirectStatus)
redirectStatus = self.__redirectToNextLocation(True)
if (redirectStatus != apache.HTTP_OK) and (redirectStatus != apache.HTTP_PARTIAL_CONTENT):
# We must have run out of retry attempts. Fail over to Hosted
# to perform the request.
log_debug(1, "Redirection failed; retries exhausted. " \
"Failing over. Code=", \
redirectStatus)
redirectStatus = self.__redirectFailover()
return SharedHandler._handleServerResponse(self, redirectStatus)
else:
# Otherwise, revert to default behavior.
return SharedHandler._handleServerResponse(self, status)
def __redirectToNextLocation(self, loopProtection = False):
""" This function will perform a redirection to the next location, as
specified in the last response's "Location" header. This function will
return an actual HTTP response status code. If successful, it will
return apache.HTTP_OK, not apache.OK. If unsuccessful, this function
will retry a configurable number of times, as defined in
CFG.NETWORK_RETRIES. The following codes define "success".
HTTP_OK
HTTP_PARTIAL_CONTENT
HTTP_MOVED_TEMPORARILY
HTTP_MOVED_PERMANENTLY
Upon successful completion of this function, the responseContext
should be populated with the response.
Arguments:
loopProtection - If True, this function will insert a special
header into the new request that tells the RHN
server not to issue another redirect to us, in case
that's where we end up being redirected.
Return:
This function may return any valid HTTP_* response code. See
__redirectToNextLocationNoRetry for more info.
"""
retriesLeft = CFG.NETWORK_RETRIES
# We'll now try to redirect to the 3rd party. We will keep
# retrying until we exhaust the number of allowed attempts.
# Valid response codes are:
# HTTP_OK
# HTTP_PARTIAL_CONTENT
# HTTP_MOVED_PERMANENTLY
# HTTP_MOVED_TEMPORARILY
redirectStatus = self.__redirectToNextLocationNoRetry(loopProtection)
while redirectStatus != apache.HTTP_OK and \
redirectStatus != apache.HTTP_PARTIAL_CONTENT and \
redirectStatus != apache.HTTP_MOVED_PERMANENTLY and \
redirectStatus != apache.HTTP_MOVED_TEMPORARILY and \
retriesLeft > 0:
retriesLeft = retriesLeft - 1
log_debug(1, "Redirection failed; trying again. " \
"Retries left=", \
retriesLeft, \
"Code=", \
redirectStatus)
# Pop the current response context and restore the state to
# the last successful response. The acts of remove the current
# context will cause all of its open connections to be closed.
self.responseContext.remove()
# XXX: Possibly sleep here for a second?
redirectStatus = \
self.__redirectToNextLocationNoRetry(loopProtection)
return redirectStatus
def __redirectToNextLocationNoRetry(self, loopProtection = False):
""" This function will perform a redirection to the next location, as
specified in the last response's "Location" header. This function will
return an actual HTTP response status code. If successful, it will
return apache.HTTP_OK, not apache.OK. If unsuccessful, this function
will simply return; no retries will be performed. The following error
codes can be returned:
HTTP_OK,HTTP_PARTIAL_CONTENT - Redirect successful.
HTTP_MOVED_TEMPORARILY - Redirect was redirected again by 3rd party.
HTTP_MOVED_PERMANENTLY - Redirect was redirected again by 3rd party.
HTTP_INTERNAL_SERVER_ERROR - Error extracting redirect information
HTTP_SERVICE_UNAVAILABLE - Could not connect to 3rd party server,
connection was reset, or a read error
occurred during communication.
HTTP_* - Any other HTTP status code may also be
returned.
Upon successful completion of this function, a new responseContext
will be created and pushed onto the stack.
"""
# Obtain the redirect location first before we replace the current
# response context. It's contained in the Location header of the
# previous response.
redirectLocation = self._get_header(rhnConstants.HEADER_LOCATION)
# We are about to redirect to a new location so now we'll push a new
# response context before we return any errors.
self.responseContext.add()
# There should always be a redirect URL passed back to us. If not,
# there's an error.
if not redirectLocation or len(redirectLocation) == 0:
log_error(" No redirect location specified!")
Traceback(mail = 0)
return apache.HTTP_INTERNAL_SERVER_ERROR
# The _get_header function returns the value as a list. There should
# always be exactly one location specified.
redirectLocation = redirectLocation[0]
log_debug(1, " Redirecting to: ", redirectLocation)
# Tear apart the redirect URL. We need the scheme, the host, the
# port (if not the default), and the URI.
_scheme, host, port, uri = self._parse_url(redirectLocation)
# Add any params onto the URI since _parse_url doesn't include them.
if redirectLocation.find('?') > -1:
uri += redirectLocation[redirectLocation.index('?'):]
# Now create a new connection. We'll use SSL if configured to do
# so.
params = {
'host' : host,
'port' : port,
}
if CFG.has_key('timeout'):
params['timeout'] = CFG.TIMEOUT
if CFG.USE_SSL:
log_debug(1, " Redirecting with SSL. Cert= ", self.caChain)
params['trusted_certs'] = [self.caChain]
connection = connections.HTTPSConnection(**params)
else:
log_debug(1, " Redirecting withOUT SSL.")
connection = connections.HTTPConnection(**params)
# Put the connection into the current response context.
self.responseContext.setConnection(connection)
# Now open the connection to the 3rd party server.
log_debug(4, "Attempting to connect to 3rd party server...")
try:
connection.connect()
except socket.error, e:
log_error("Error opening redirect connection", redirectLocation, e)
Traceback(mail = 0)
return apache.HTTP_SERVICE_UNAVAILABLE
log_debug(4, "Connected to 3rd party server:",
connection.sock.getpeername())
# Put the request out on the wire.
response = None
try:
# We'll redirect to the URI made in the original request, but with
# the new server instead.
log_debug(4, "Making request: ", self.req.method, uri)
connection.putrequest(self.req.method, uri)
# Add some custom headers.
if loopProtection:
connection.putheader(rhnConstants.HEADER_RHN_REDIRECT, '0')
log_debug(4, " Adding original URL header: ", self.rhnParent)
connection.putheader(rhnConstants.HEADER_RHN_ORIG_LOC,
self.rhnParent)
# Add all the other headers in the original request in case we
# need to re-authenticate with Hosted.
for hdr in self.req.headers_in.keys():
if hdr.lower().startswith("x-rhn"):
connection.putheader(hdr, self.req.headers_in[hdr])
log_debug(4, "Passing request header: ",
hdr,
self.req.headers_in[hdr])
connection.endheaders()
response = connection.getresponse()
except IOError, ioe:
# Raised by getresponse() if server closes connection on us.
log_error("Redirect connection reset by peer.",
redirectLocation,
ioe)
Traceback(mail = 0)
# The connection is saved in the current response context, and
# will be closed when the caller pops the context.
return apache.HTTP_SERVICE_UNAVAILABLE
except socket.error, se:
# Some socket error occurred. Possibly a read error.
log_error("Redirect request failed.", redirectLocation, se)
Traceback(mail = 0)
# The connection is saved in the current response context, and
# will be closed when the caller pops the context.
return apache.HTTP_SERVICE_UNAVAILABLE
# Save the response headers and body FD in the current communication
# context.
self.responseContext.setBodyFd(response)
self.responseContext.setHeaders(response.msg)
log_debug(4, "Response headers: ",
self.responseContext.getHeaders().items())
log_debug(4, "Got redirect response. Status=", response.status)
# Return the HTTP status to the caller.
return response.status
def __redirectFailover(self):
""" This routine resends the original request back to the satellite/hosted
system if a redirect to a 3rd party failed. To prevent redirection loops
from occurring, an "X-RHN-Redirect: 0" header is passed along with the
request. This function will return apache.HTTP_OK if everything
succeeded, otherwise it will return an appropriate HTTP error code.
"""
# Add a special header which will tell the server not to send us any
# more redirects.
headers = rhnFlags.get('outputTransportOptions')
headers[rhnConstants.HEADER_RHN_REDIRECT] = '0'
log_debug(4, "Added X-RHN-Redirect header to outputTransportOptions:", \
headers)
# Reset the existing connection and reconnect to the RHN parent server.
self.responseContext.clear()
self._connectToParent()
# We'll just call serverCommo once more. The X-RHN-Redirect constant
# will prevent us from falling into an infinite loop. Only GETs are
# redirected, so we can safely pass an empty string in as the request
# body.
status = self._serverCommo()
# This little hack isn't pretty, but lets us normalize our result code.
if status == apache.OK:
status = apache.HTTP_OK
return status
#===============================================================================
| gpl-2.0 |
wskplho/sl4a | python-build/python-libs/gdata/src/gdata/base/__init__.py | 155 | 24048 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Base."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
# XML namespaces which are often used in Google Base entities.
GBASE_NAMESPACE = 'http://base.google.com/ns/1.0'
GBASE_TEMPLATE = '{http://base.google.com/ns/1.0}%s'
GMETA_NAMESPACE = 'http://base.google.com/ns-metadata/1.0'
GMETA_TEMPLATE = '{http://base.google.com/ns-metadata/1.0}%s'
class ItemAttributeContainer(object):
"""Provides methods for finding Google Base Item attributes.
Google Base item attributes are child nodes in the gbase namespace. Google
Base allows you to define your own item attributes and this class provides
methods to interact with the custom attributes.
"""
def GetItemAttributes(self, name):
"""Returns a list of all item attributes which have the desired name.
Args:
name: str The tag of the desired base attributes. For example, calling
this method with 'rating' would return a list of ItemAttributes
represented by a 'g:rating' tag.
Returns:
A list of matching ItemAttribute objects.
"""
result = []
for attrib in self.item_attributes:
if attrib.name == name:
result.append(attrib)
return result
def FindItemAttribute(self, name):
"""Get the contents of the first Base item attribute which matches name.
This method is deprecated, please use GetItemAttributes instead.
Args:
name: str The tag of the desired base attribute. For example, calling
this method with name = 'rating' would search for a tag rating
in the GBase namespace in the item attributes.
Returns:
The text contents of the item attribute, or none if the attribute was
not found.
"""
for attrib in self.item_attributes:
if attrib.name == name:
return attrib.text
return None
def AddItemAttribute(self, name, value, value_type=None, access=None):
"""Adds a new item attribute tag containing the value.
Creates a new extension element in the GBase namespace to represent a
Google Base item attribute.
Args:
name: str The tag name for the new attribute. This must be a valid xml
tag name. The tag will be placed in the GBase namespace.
value: str Contents for the item attribute
value_type: str (optional) The type of data in the vlaue, Examples: text
float
access: str (optional) Used to hide attributes. The attribute is not
exposed in the snippets feed if access is set to 'private'.
"""
new_attribute = ItemAttribute(name, text=value,
text_type=value_type, access=access)
self.item_attributes.append(new_attribute)
def SetItemAttribute(self, name, value):
"""Changes an existing item attribute's value."""
for attrib in self.item_attributes:
if attrib.name == name:
attrib.text = value
return
def RemoveItemAttribute(self, name):
"""Deletes the first extension element which matches name.
Deletes the first extension element which matches name.
"""
for i in xrange(len(self.item_attributes)):
if self.item_attributes[i].name == name:
del self.item_attributes[i]
return
# We need to overwrite _ConvertElementTreeToMember to add special logic to
# convert custom attributes to members
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
elif child_tree.tag.find('{%s}' % GBASE_NAMESPACE) == 0:
# If this is in the gbase namespace, make it into an extension element.
name = child_tree.tag[child_tree.tag.index('}')+1:]
value = child_tree.text
if child_tree.attrib.has_key('type'):
value_type = child_tree.attrib['type']
else:
value_type = None
self.AddItemAttribute(name, value, value_type)
else:
atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
# We need to overwtite _AddMembersToElementTree to add special logic to
# convert custom members to XML nodes.
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Convert all special custom item attributes to nodes
for attribute in self.item_attributes:
attribute._BecomeChildElement(tree)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
class ItemAttribute(atom.Text):
"""An optional or user defined attribute for a GBase item.
Google Base allows items to have custom attribute child nodes. These nodes
have contents and a type attribute which tells Google Base whether the
contents are text, a float value with units, etc. The Atom text class has
the same structure, so this class inherits from Text.
"""
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_attributes['access'] = 'access'
def __init__(self, name, text_type=None, access=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for a GBase item attribute
Args:
name: str The name of the attribute. Examples include
price, color, make, model, pages, salary, etc.
text_type: str (optional) The type associated with the text contents
access: str (optional) If the access attribute is set to 'private', the
attribute will not be included in the item's description in the
snippets feed
text: str (optional) The text data in the this element
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute
value string pairs
"""
self.name = name
self.type = text_type
self.access = access
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def _BecomeChildElement(self, tree):
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.name)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.name))
self._AddMembersToElementTree(new_tree)
return new_tree
def ItemAttributeFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ItemAttributeFromElementTree(element_tree)
def _ItemAttributeFromElementTree(element_tree):
if element_tree.tag.find(GBASE_TEMPLATE % '') == 0:
to_return = ItemAttribute('')
to_return._HarvestElementTree(element_tree)
to_return.name = element_tree.tag[element_tree.tag.index('}')+1:]
if to_return.name and to_return.name != '':
return to_return
return None
class Label(atom.AtomBase):
"""The Google Base label element"""
_tag = 'label'
_namespace = GBASE_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LabelFromString(xml_string):
return atom.CreateClassFromXMLString(Label, xml_string)
class Thumbnail(atom.AtomBase):
"""The Google Base thumbnail element"""
_tag = 'thumbnail'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['width'] = 'width'
_attributes['height'] = 'height'
def __init__(self, width=None, height=None, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.width = width
self.height = height
def ThumbnailFromString(xml_string):
return atom.CreateClassFromXMLString(Thumbnail, xml_string)
class ImageLink(atom.Text):
"""The Google Base image_link element"""
_tag = 'image_link'
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_children['{%s}thumbnail' % GMETA_NAMESPACE] = ('thumbnail', [Thumbnail])
def __init__(self, thumbnail=None, text=None, extension_elements=None,
text_type=None, extension_attributes=None):
self.thumbnail = thumbnail or []
self.text = text
self.type = text_type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ImageLinkFromString(xml_string):
return atom.CreateClassFromXMLString(ImageLink, xml_string)
class ItemType(atom.Text):
"""The Google Base item_type element"""
_tag = 'item_type'
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
def __init__(self, text=None, extension_elements=None,
text_type=None, extension_attributes=None):
self.text = text
self.type = text_type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ItemTypeFromString(xml_string):
return atom.CreateClassFromXMLString(ItemType, xml_string)
class MetaItemType(ItemType):
"""The Google Base item_type element"""
_tag = 'item_type'
_namespace = GMETA_NAMESPACE
_children = ItemType._children.copy()
_attributes = ItemType._attributes.copy()
def MetaItemTypeFromString(xml_string):
return atom.CreateClassFromXMLString(MetaItemType, xml_string)
class Value(atom.AtomBase):
"""Metadata about common values for a given attribute
A value is a child of an attribute which comes from the attributes feed.
The value's text is a commonly used value paired with an attribute name
and the value's count tells how often this value appears for the given
attribute in the search results.
"""
_tag = 'value'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['count'] = 'count'
def __init__(self, count=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Attribute metadata element
Args:
count: str (optional) The number of times the value in text is given
for the parent attribute.
text: str (optional) The value which appears in the search results.
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute value
string pairs
"""
self.count = count
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ValueFromString(xml_string):
return atom.CreateClassFromXMLString(Value, xml_string)
class Attribute(atom.Text):
"""Metadata about an attribute from the attributes feed
An entry from the attributes feed contains a list of attributes. Each
attribute describes the attribute's type and count of the items which
use the attribute.
"""
_tag = 'attribute'
_namespace = GMETA_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_children['{%s}value' % GMETA_NAMESPACE] = ('value', [Value])
_attributes['count'] = 'count'
_attributes['name'] = 'name'
def __init__(self, name=None, attribute_type=None, count=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Attribute metadata element
Args:
name: str (optional) The name of the attribute
attribute_type: str (optional) The type for the attribute. Examples:
test, float, etc.
count: str (optional) The number of times this attribute appears in
the query results.
value: list (optional) The values which are often used for this
attirbute.
text: str (optional) The text contents of the XML for this attribute.
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute value
string pairs
"""
self.name = name
self.type = attribute_type
self.count = count
self.value = value or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def AttributeFromString(xml_string):
return atom.CreateClassFromXMLString(Attribute, xml_string)
class Attributes(atom.AtomBase):
"""A collection of Google Base metadata attributes"""
_tag = 'attributes'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
def __init__(self, attribute=None, extension_elements=None,
extension_attributes=None, text=None):
self.attribute = attribute or []
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class GBaseItem(ItemAttributeContainer, gdata.BatchEntry):
"""An Google Base flavor of an Atom Entry.
Google Base items have required attributes, recommended attributes, and user
defined attributes. The required attributes are stored in this class as
members, and other attributes are stored as extension elements. You can
access the recommended and user defined attributes by using
AddItemAttribute, SetItemAttribute, FindItemAttribute, and
RemoveItemAttribute.
The Base Item
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}label' % GBASE_NAMESPACE] = ('label', [Label])
_children['{%s}item_type' % GBASE_NAMESPACE] = ('item_type', ItemType)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, control=None,
label=None, item_type=None, item_attributes=None,
batch_operation=None, batch_id=None, batch_status=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.label = label or []
self.item_type = item_type
self.item_attributes = item_attributes or []
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseItemFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItem, xml_string)
class GBaseSnippet(GBaseItem):
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = GBaseItem._children.copy()
_attributes = GBaseItem._attributes.copy()
def GBaseSnippetFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseSnippet, xml_string)
class GBaseAttributeEntry(gdata.GDataEntry):
"""An Atom Entry from the attributes feed"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, label=None,
attribute=None, control=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.label = label or []
self.attribute = attribute or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseAttributeEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseAttributeEntry, xml_string)
class GBaseItemTypeEntry(gdata.GDataEntry):
"""An Atom entry from the item types feed
These entries contain a list of attributes which are stored in one
XML node called attributes. This class simplifies the data structure
by treating attributes as a list of attribute instances.
Note that the item_type for an item type entry is in the Google Base meta
namespace as opposed to item_types encountered in other feeds.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}attributes' % GMETA_NAMESPACE] = ('attributes', Attributes)
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
_children['{%s}item_type' % GMETA_NAMESPACE] = ('item_type', MetaItemType)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, label=None,
item_type=None, control=None, attribute=None, attributes=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.label = label or []
self.item_type = item_type
self.attributes = attributes
self.attribute = attribute or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseItemTypeEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemTypeEntry, xml_string)
class GBaseItemFeed(gdata.BatchFeed):
"""A feed containing Google Base Items"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItem])
def GBaseItemFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemFeed, xml_string)
class GBaseSnippetFeed(gdata.GDataFeed):
"""A feed containing Google Base Snippets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseSnippet])
def GBaseSnippetFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseSnippetFeed, xml_string)
class GBaseAttributesFeed(gdata.GDataFeed):
"""A feed containing Google Base Attributes
A query sent to the attributes feed will return a feed of
attributes which are present in the items that match the
query.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[GBaseAttributeEntry])
def GBaseAttributesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseAttributesFeed, xml_string)
class GBaseLocalesFeed(gdata.GDataFeed):
"""The locales feed from Google Base.
This read-only feed defines the permitted locales for Google Base. The
locale value identifies the language, currency, and date formats used in a
feed.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
def GBaseLocalesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseLocalesFeed, xml_string)
class GBaseItemTypesFeed(gdata.GDataFeed):
"""A feed from the Google Base item types feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItemTypeEntry])
def GBaseItemTypesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemTypesFeed, xml_string)
| apache-2.0 |
912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/admin/models.py | 84 | 3028 | from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.utils import quote
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.encoding import smart_text
from django.utils.encoding import python_2_unicode_compatible
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(None, None, user_id, content_type_id, smart_text(object_id), object_repr[:200], action_flag, change_message)
e.save()
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.action_flag == ADDITION:
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.action_flag == CHANGE:
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.change_message,
}
elif self.action_flag == DELETION:
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
This is relative to the Django admin index page.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (self.content_type.app_label, self.content_type.model)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
| gpl-2.0 |
netsamir/dotfiles | dotbot/lib/pyyaml/tests/lib3/test_emitter.py | 62 | 4305 |
import yaml
def _compare_events(events1, events2):
assert len(events1) == len(events2), (events1, events2)
for event1, event2 in zip(events1, events2):
assert event1.__class__ == event2.__class__, (event1, event2)
if isinstance(event1, yaml.NodeEvent):
assert event1.anchor == event2.anchor, (event1, event2)
if isinstance(event1, yaml.CollectionStartEvent):
assert event1.tag == event2.tag, (event1, event2)
if isinstance(event1, yaml.ScalarEvent):
if True not in event1.implicit+event2.implicit:
assert event1.tag == event2.tag, (event1, event2)
assert event1.value == event2.value, (event1, event2)
def test_emitter_on_data(data_filename, canonical_filename, verbose=False):
events = list(yaml.parse(open(data_filename, 'rb')))
output = yaml.emit(events)
if verbose:
print("OUTPUT:")
print(output)
new_events = list(yaml.parse(output))
_compare_events(events, new_events)
test_emitter_on_data.unittest = ['.data', '.canonical']
def test_emitter_on_canonical(canonical_filename, verbose=False):
events = list(yaml.parse(open(canonical_filename, 'rb')))
for canonical in [False, True]:
output = yaml.emit(events, canonical=canonical)
if verbose:
print("OUTPUT (canonical=%s):" % canonical)
print(output)
new_events = list(yaml.parse(output))
_compare_events(events, new_events)
test_emitter_on_canonical.unittest = ['.canonical']
def test_emitter_styles(data_filename, canonical_filename, verbose=False):
for filename in [data_filename, canonical_filename]:
events = list(yaml.parse(open(filename, 'rb')))
for flow_style in [False, True]:
for style in ['|', '>', '"', '\'', '']:
styled_events = []
for event in events:
if isinstance(event, yaml.ScalarEvent):
event = yaml.ScalarEvent(event.anchor, event.tag,
event.implicit, event.value, style=style)
elif isinstance(event, yaml.SequenceStartEvent):
event = yaml.SequenceStartEvent(event.anchor, event.tag,
event.implicit, flow_style=flow_style)
elif isinstance(event, yaml.MappingStartEvent):
event = yaml.MappingStartEvent(event.anchor, event.tag,
event.implicit, flow_style=flow_style)
styled_events.append(event)
output = yaml.emit(styled_events)
if verbose:
print("OUTPUT (filename=%r, flow_style=%r, style=%r)" % (filename, flow_style, style))
print(output)
new_events = list(yaml.parse(output))
_compare_events(events, new_events)
test_emitter_styles.unittest = ['.data', '.canonical']
class EventsLoader(yaml.Loader):
def construct_event(self, node):
if isinstance(node, yaml.ScalarNode):
mapping = {}
else:
mapping = self.construct_mapping(node)
class_name = str(node.tag[1:])+'Event'
if class_name in ['AliasEvent', 'ScalarEvent', 'SequenceStartEvent', 'MappingStartEvent']:
mapping.setdefault('anchor', None)
if class_name in ['ScalarEvent', 'SequenceStartEvent', 'MappingStartEvent']:
mapping.setdefault('tag', None)
if class_name in ['SequenceStartEvent', 'MappingStartEvent']:
mapping.setdefault('implicit', True)
if class_name == 'ScalarEvent':
mapping.setdefault('implicit', (False, True))
mapping.setdefault('value', '')
value = getattr(yaml, class_name)(**mapping)
return value
EventsLoader.add_constructor(None, EventsLoader.construct_event)
def test_emitter_events(events_filename, verbose=False):
events = list(yaml.load(open(events_filename, 'rb'), Loader=EventsLoader))
output = yaml.emit(events)
if verbose:
print("OUTPUT:")
print(output)
new_events = list(yaml.parse(output))
_compare_events(events, new_events)
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
| unlicense |
mmagnus/rna-pdb-tools | rna_tools/tools/rna_alignment/rna_align_seq_to_alignment.py | 2 | 1675 | #!/usr/bin/env python
"""
cmaling::
[mm] thf cmalign RF01831.cm 4lvv.seq
# STOCKHOLM 1.0
#=GF AU Infernal 1.1.2
4lvv -GGAGAGUA-GAUGAUUCGCGUUAAGUGUGUGUGA-AUGGGAUGUCG-UCACACAACGAAGC---GAGA---GCGCGGUGAAUCAUU-GCAUCCGCUCCA
#=GR 4lvv PP .********.******************9999998.***********.8999999******8...5555...8**************.************
#=GC SS_cons (((((----(((((((((((,,,,,<<-<<<<<<<<___________>>>>>>>>>>,,,<<<<______>>>>,,,)))))))))))-------)))))
#=GC RF ggcaGAGUAGggugccgugcGUuAAGUGccggcgggAcGGGgaGUUGcccgccggACGAAgggcaaaauugcccGCGguacggcaccCGCAUcCgCugcc
//
Reads seq files::
>4lvv
GGAGAGUAGAUGAUUCGCGUUAAGUGUGUGUGAAUGGGAUGUCGUCACACAACGAAGCGAGAGCGCGGUGAAUCAUUGCAUCCGCUCCA
"""
import rna_tools.utils.rna_alignment.rna_alignment as ra
import sys
import argparse
def get_parser():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-f', '--file', help="cmalign output")
parser.add_argument('-a', '--alignment', help="alignment file", required=True)
parser.add_argument('-m', '--cm', help="cm model to run cmalign on")
parser.add_argument('-s', '--seq', help="seq fn, fasta!")
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
# don't run cmalign
if args.file:
cma = ra.CMAlign(outputfn=args.file)
else:
cma = ra.CMAlign()#(outputfn=args.file)
cma.run_cmalign(args.seq, args.cm)
seq = cma.get_seq()
a = ra.RNAalignment(args.alignment)
print('cma hit ' + seq)
print('seq ' + a.align_seq(seq))
print('a.rf ' + a.rf)
| gpl-3.0 |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/test/test_site.py | 8 | 15799 | """Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.test_support import run_unittest, TESTFN, EnvironmentVarGuard
from test.test_support import captured_output
import __builtin__
import os
import sys
import re
import encodings
import subprocess
import sysconfig
from copy import copy
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise unittest.SkipTest("importation of site.py suppressed")
if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
The setting of the encoding (set using sys.setdefaultencoding) used by
the Unicode implementation is not tested.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
self.old_base = site.USER_BASE
self.old_site = site.USER_SITE
self.old_prefixes = site.PREFIXES
self.old_vars = copy(sysconfig._CONFIG_VARS)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.PREFIXES = self.old_prefixes
sysconfig._CONFIG_VARS = self.old_vars
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.assertEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.assertEqual(abs_dir, norm_dir)
else:
self.assertEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.assertIn(entry, dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.assertIn(pth_file.imported, sys.modules,
"%s not in sys.modules" % pth_file.imported)
self.assertIn(site.makepath(pth_file.good_dir_path)[0], sys.path)
self.assertFalse(os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def make_pth(self, contents, pth_dir='.', pth_name=TESTFN):
# Create a .pth file and return its (abspath, basename).
pth_dir = os.path.abspath(pth_dir)
pth_basename = pth_name + '.pth'
pth_fn = os.path.join(pth_dir, pth_basename)
pth_file = open(pth_fn, 'w')
self.addCleanup(lambda: os.remove(pth_fn))
pth_file.write(contents)
pth_file.close()
return pth_dir, pth_basename
def test_addpackage_import_bad_syntax(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("import bad)syntax\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 1")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: the previous two should be independent checks so that the
# order doesn't matter. The next three could be a single check
# but my regex foo isn't good enough to write it.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), r'import bad\)syntax')
self.assertRegexpMatches(err_out.getvalue(), 'SyntaxError')
def test_addpackage_import_bad_exec(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 2")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), 'ImportError')
@unittest.skipIf(sys.platform == "win32", "Windows does not raise an "
"error for file paths containing null characters")
def test_addpackage_import_bad_pth_file(self):
# Issue 5258
pth_dir, pth_fn = self.make_pth("abc\x00def\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 1")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), 'TypeError')
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
@unittest.skipUnless(site.ENABLE_USER_SITE, "requires access to PEP 370 "
"user-site (site.ENABLE_USER_SITE)")
def test_s_option(self):
usersite = site.USER_SITE
self.assertIn(usersite, sys.path)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 1, "%r is not in sys.path (sys.exit returned %r)"
% (usersite, rc))
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
def test_getuserbase(self):
site.USER_BASE = None
user_base = site.getuserbase()
# the call sets site.USER_BASE
self.assertEqual(site.USER_BASE, user_base)
# let's set PYTHONUSERBASE and see if it uses it
site.USER_BASE = None
import sysconfig
sysconfig._CONFIG_VARS = None
with EnvironmentVarGuard() as environ:
environ['PYTHONUSERBASE'] = 'xoxo'
self.assertTrue(site.getuserbase().startswith('xoxo'),
site.getuserbase())
def test_getusersitepackages(self):
site.USER_SITE = None
site.USER_BASE = None
user_site = site.getusersitepackages()
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
dirs = site.getsitepackages()
if sys.platform in ('os2emx', 'riscos'):
self.assertEqual(len(dirs), 1)
wanted = os.path.join('xoxo', 'Lib', 'site-packages')
self.assertEqual(dirs[0], wanted)
elif '__pypy__' in sys.builtin_module_names:
self.assertEquals(len(dirs), 1)
wanted = os.path.join('xoxo', 'site-packages')
self.assertEquals(dirs[0], wanted)
elif (sys.platform == "darwin" and
sysconfig.get_config_var("PYTHONFRAMEWORK")):
# OS X framework builds
site.PREFIXES = ['Python.framework']
dirs = site.getsitepackages()
self.assertEqual(len(dirs), 3)
wanted = os.path.join('/Library',
sysconfig.get_config_var("PYTHONFRAMEWORK"),
sys.version[:3],
'site-packages')
self.assertEqual(dirs[2], wanted)
elif os.sep == '/':
# OS X non-framwework builds, Linux, FreeBSD, etc
self.assertEqual(len(dirs), 2)
wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
'site-packages')
self.assertEqual(dirs[0], wanted)
wanted = os.path.join('xoxo', 'lib', 'site-python')
self.assertEqual(dirs[1], wanted)
else:
# other platforms
self.assertEqual(len(dirs), 2)
self.assertEqual(dirs[0], 'xoxo')
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print>>FILE, "#import @bad module name"
print>>FILE, "\n"
print>>FILE, "import %s" % self.imported
print>>FILE, self.good_dirname
print>>FILE, self.bad_dirname
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
def test_abs__file__(self):
# Make sure all imported modules have their __file__ attribute
# as an absolute path.
# Handled by abs__file__()
site.abs__file__()
for module in (sys, os, __builtin__):
try:
self.assertTrue(os.path.isabs(module.__file__), repr(module))
except AttributeError:
continue
# We could try everything in sys.modules; however, when regrtest.py
# runs something like test_frozen before test_site, then we will
# be testing things loaded *after* test_site did path normalization
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.assertNotIn(path, seen_paths)
seen_paths.add(path)
@unittest.skip('test not implemented')
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into __builtin__
self.assertTrue(hasattr(__builtin__, "quit"))
self.assertTrue(hasattr(__builtin__, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in __builtin__
self.assertTrue(hasattr(__builtin__, "copyright"))
self.assertTrue(hasattr(__builtin__, "credits"))
def test_setting_help(self):
# 'help' should be set in __builtin__
self.assertTrue(hasattr(__builtin__, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.itervalues():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_setdefaultencoding_removed(self):
# Make sure sys.setdefaultencoding is gone
self.assertTrue(not hasattr(sys, "setdefaultencoding"))
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
| mit |
hgrif/incubator-airflow | tests/contrib/operators/test_spark_sql_operator.py | 16 | 2737 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
from airflow import DAG, configuration
from airflow.contrib.operators.spark_sql_operator import SparkSqlOperator
DEFAULT_DATE = datetime.datetime(2017, 1, 1)
class TestSparkSqlOperator(unittest.TestCase):
_config = {
'sql': 'SELECT 22',
'conn_id': 'spark_special_conn_id',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/spark@airflow.org',
'master': 'yarn-client',
'name': 'special-application-name',
'num_executors': 8,
'yarn_queue': 'special-queue'
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
def test_execute(self):
# Given / When
operator = SparkSqlOperator(
task_id='spark_sql_job',
dag=self.dag,
**self._config
)
self.assertEqual(self._config['sql'], operator._sql)
self.assertEqual(self._config['conn_id'], operator._conn_id)
self.assertEqual(self._config['total_executor_cores'], operator._total_executor_cores)
self.assertEqual(self._config['executor_cores'], operator._executor_cores)
self.assertEqual(self._config['executor_memory'], operator._executor_memory)
self.assertEqual(self._config['keytab'], operator._keytab)
self.assertEqual(self._config['principal'], operator._principal)
self.assertEqual(self._config['executor_memory'], operator._executor_memory)
self.assertEqual(self._config['keytab'], operator._keytab)
self.assertEqual(self._config['principal'], operator._principal)
self.assertEqual(self._config['master'], operator._master)
self.assertEqual(self._config['name'], operator._name)
self.assertEqual(self._config['num_executors'], operator._num_executors)
self.assertEqual(self._config['yarn_queue'], operator._yarn_queue)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
sdgdsffdsfff/detdup | detdup/services/task.py | 3 | 5544 | # -*- coding: utf-8 -*-
# TODO 多进程 有木桶最短效应。但是目前没法解决变量共享。
import json
import math
import time
from ..utils import *
from ..core import DetDupCore
class DetDupTask(object):
default_attrs = [
"process_count",
"cache_dir",
"original_model",
"items_model",
"features",
"query_check_columns",
]
def __init__(self, opts):
for key1 in DetDupTask.default_attrs:
setattr(self, key1, opts.get(key1, None))
self.process_count = self.process_count or max_process_count
self.items_model = opts['items_model']
self.items_model.cache_dir = self.cache_dir
self.items_model.datadict_type = "sqlite"
self.result_cPickle = os.path.join(self.cache_dir, "detdup.cPickle")
self.result_json = os.path.join(self.cache_dir, "detdup.json")
def new_detdup_core(self, storage_type='memory'):
""" new queryinterface """
# 每个instance只能被generator, 否则这些进程同时访问一个IO, 那么所有CPU就都处于等待IO中不动了
detdup = DetDupCore(self.cache_dir, self.items_model)
detdup.is_inspect_detail = False
detdup.storage_type = storage_type
for feature1 in self.features:
detdup.plug_features(feature1())
# 确保不会覆盖 .original.db
if (detdup.storage_type == 'memory') and detdup.feeded():
detdup.load_features_from_db()
self.items_model.core = detdup
return detdup
def extract(self):
self.items_model.pull_data()
""" 重新生成请全部删除 model.db 和 features.db 文件 """
cprint("[建立 self.items_model 索引] ...", "blue")
core = self.new_detdup_core()
tmp_items = []
def write(tmp_items):
core.feed_items(tmp_items)
return []
for item_id1, item1 in self.items_model.iteritems():
tmp_items.append(item1)
if len(tmp_items) >= 10000:
tmp_items = write(tmp_items)
tmp_items = write(tmp_items)
def train(self):
core = self.new_detdup_core()
def delete_item_ids(table, item_ids_1):
step = 100
for i1 in xrange(0, len(item_ids_1), step):
table.delete().where(table.item_id << item_ids_1[i1:i1+step]).execute()
pickle_filename = os.path.join(self.cache_dir, "detdup.cPickle")
def load_result_func():
core = self.new_detdup_core('memory')
for feature1 in core.features:
table = feature1.features_tree
# 1. 先排除一定是不重复的
candidate_list, uniq_list = feature1.divided_into_two_parts()
delete_item_ids(table, uniq_list)
# 2. 删除内容空白的条目
table.delete().where(table.uniq_chars__len == 0).execute()
core.candidate_dup_count = table.select().count()
# 3. 正式排重
for item1 in process_notifier(feature1.features_tree.select()):
dup_ids = core.detect_duplicated_items_verbose(item1.item_id, verbose=True)
delete_item_ids(table, dup_ids)
return core.result
result = cpickle_cache(pickle_filename, load_result_func)
json.dump(result.result_json(), open(self.result_json, 'wb'))
# def data_check(self):
""" 验证正确率,随机找个重复的,遍历全库比对。 """
"""
from random import randrange
from etl_utils import String
cases_count = 10
recall_program_count = 0
recall_real_count = float(cases_count)
for idx in xrange(cases_count):
result = cpickle_cache(self.result_cPickle, lambda : True)
core = self.new_detdup_core('memory')
total_count = len(result.result)
if not total_count:
print 'NO DUPLICATION FOUND!'
return
program_similar_ids = result.result[randrange(total_count)]
# 随机抽取的一个item
current_item = self.items_model[program_similar_ids[0]]
print "Begin to find %s's similar item_ids" % current_item.item_id
real_similar_ids = set([current_item.item_id])
program_similar_ids = set(program_similar_ids)
table = core.select_feature(current_item).features_tree
basic_query = table.item_id != str(current_item.item_id)
for column1 in self.query_check_columns:
basic_query = basic_query & (getattr(table, column1) == getattr(current_item, column1))
scope = table.select().where(basic_query) #.dicts()
for i1 in process_notifier(scope):
rate1 = String.calculate_text_similarity(current_item.item_content, self.items_model[i1.item_id].item_content)['similarity_rate']
if rate1 > core.similarity_rate:
real_similar_ids.add(i1.item_id)
print "real_similar_ids :", sorted([str(i1) for i1 in real_similar_ids])
print "program_similar_ids:", sorted([str(i1) for i1 in program_similar_ids])
print
if real_similar_ids == program_similar_ids:
recall_program_count += 1
print "recall :", recall_program_count / recall_real_count
"""
| mit |
gram526/VTK | Filters/Core/Testing/Python/TestContourCases.py | 9 | 4313 | # This test requires Numpy.
import sys
import vtk
from vtk.test import Testing
try:
import numpy as np
except ImportError:
print("WARNING: This test requires Numeric Python: http://numpy.sf.net")
sys.exit(0)
def GenerateCell(cellType, points):
cell = vtk.vtkUnstructuredGrid()
pts = vtk.vtkPoints()
for p in points:
pts.InsertNextPoint(p)
cell.SetPoints(pts)
cell.Allocate(1,1)
ids = vtk.vtkIdList()
for i in range(len(points)):
ids.InsertId(i,i)
cell.InsertNextCell(cellType, ids)
return cell
def Combination(sz, n):
c = np.zeros(sz)
i = 0
while n>0:
c[i] = n%2
n=n>>1
i = i + 1
return c
class CellTestBase:
def test_contours(self):
cell = vtk.vtkUnstructuredGrid()
cell.ShallowCopy(self.Cell)
np = self.Cell.GetNumberOfPoints()
ncomb = pow(2, np)
scalar = vtk.vtkDoubleArray()
scalar.SetName("scalar")
scalar.SetNumberOfTuples(np)
cell.GetPointData().SetScalars(scalar)
incorrectCases = []
for i in range(1,ncomb-1):
c = Combination(np, i)
for p in range(np):
scalar.SetTuple1(p, c[p])
gradientFilter = vtk.vtkGradientFilter()
gradientFilter.SetInputData(cell)
gradientFilter.SetInputArrayToProcess(0,0,0,0,'scalar')
gradientFilter.SetResultArrayName('grad')
gradientFilter.Update()
contourFilter = vtk.vtkContourFilter()
contourFilter.SetInputConnection(gradientFilter.GetOutputPort())
contourFilter.SetNumberOfContours(1)
contourFilter.SetValue(0, 0.5)
contourFilter.Update()
normalsFilter = vtk.vtkPolyDataNormals()
normalsFilter.SetInputConnection(contourFilter.GetOutputPort())
normalsFilter.SetConsistency(0)
normalsFilter.SetFlipNormals(0)
normalsFilter.SetSplitting(0)
calcFilter = vtk.vtkArrayCalculator()
calcFilter.SetInputConnection(normalsFilter.GetOutputPort())
calcFilter.SetAttributeModeToUsePointData()
calcFilter.AddVectorArrayName('grad')
calcFilter.AddVectorArrayName('Normals')
calcFilter.SetResultArrayName('dir')
calcFilter.SetFunction('grad.Normals')
calcFilter.Update()
out = vtk.vtkUnstructuredGrid()
out.ShallowCopy(calcFilter.GetOutput())
numPts = out.GetNumberOfPoints()
if numPts > 0:
dirArray = out.GetPointData().GetArray('dir')
for p in range(numPts):
if(dirArray.GetTuple1(p) > 0.0): # all normals are reversed
incorrectCases.append(i)
break
self.assertEquals(','.join([str(i) for i in incorrectCases]), '')
class TestTetra(Testing.vtkTest, CellTestBase):
def setUp(self):
self.Cell = GenerateCell(vtk.VTK_TETRA,
[ ( 1.0, -1.0, -1.0),
( 1.0, 1.0, 1.0),
(-1.0, 1.0, -1.0),
(-1.0, -1.0, 1.0) ])
class TestHexahedron(Testing.vtkTest, CellTestBase):
def setUp(self):
self.Cell = GenerateCell(vtk.VTK_HEXAHEDRON,
[ (-1.0, -1.0, -1.0),
( 1.0, -1.0, -1.0),
( 1.0, 1.0, -1.0),
(-1.0, 1.0, -1.0),
(-1.0, -1.0, 1.0),
( 1.0, -1.0, 1.0),
( 1.0, 1.0, 1.0),
(-1.0, 1.0, 1.0) ])
class TestWedge(Testing.vtkTest, CellTestBase):
def setUp(self):
self.Cell = GenerateCell(vtk.VTK_WEDGE,
[ (-1.0, -1.0, -1.0),
( 1.0, -1.0, -1.0),
( 0.0, -1.0, 1.0),
(-1.0, 1.0, -1.0),
( 1.0, 1.0, -1.0),
( 0.0, 1.0, 0.0) ])
class TestPyramid(Testing.vtkTest, CellTestBase):
def setUp(self):
self.Cell = GenerateCell(vtk.VTK_PYRAMID,
[ (-1.0, -1.0, -1.0),
( 1.0, -1.0, -1.0),
( 1.0, 1.0, -1.0),
(-1.0, 1.0, -1.0),
( 0.0, 0.0, 1.0) ])
if __name__ == '__main__':
Testing.main([(TestPyramid,'test'),(TestWedge,'test'),(TestTetra, 'test'),(TestHexahedron,'test')])
| bsd-3-clause |
ninneko/redash | redash/tasks/queries.py | 2 | 17974 | import json
import time
import logging
import signal
import redis
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redash import redis_connection, models, statsd_client, settings, utils
from redash.utils import gen_query_hash
from redash.worker import celery
from redash.query_runner import InterruptException
from .base import BaseTask
from .alerts import check_alerts_for_query
logger = get_task_logger(__name__)
def _job_lock_id(query_hash, data_source_id):
return "query_hash_job:%s:%s" % (data_source_id, query_hash)
def _unlock(query_hash, data_source_id):
redis_connection.delete(_job_lock_id(query_hash, data_source_id))
# TODO:
# There is some duplication between this class and QueryTask, but I wanted to implement the monitoring features without
# much changes to the existing code, so ended up creating another object. In the future we can merge them.
class QueryTaskTracker(object):
DONE_LIST = 'query_task_trackers:done'
WAITING_LIST = 'query_task_trackers:waiting'
IN_PROGRESS_LIST = 'query_task_trackers:in_progress'
ALL_LISTS = (DONE_LIST, WAITING_LIST, IN_PROGRESS_LIST)
def __init__(self, data):
self.data = data
@classmethod
def create(cls, task_id, state, query_hash, data_source_id, scheduled, metadata):
data = dict(task_id=task_id, state=state,
query_hash=query_hash, data_source_id=data_source_id,
scheduled=scheduled,
username=metadata.get('Username', 'unknown'),
query_id=metadata.get('Query ID', 'unknown'),
retries=0,
scheduled_retries=0,
created_at=time.time(),
started_at=None,
run_time=None)
return cls(data)
def save(self, connection=None):
if connection is None:
connection = redis_connection
self.data['updated_at'] = time.time()
key_name = self._key_name(self.data['task_id'])
connection.set(key_name, utils.json_dumps(self.data))
connection.zadd(self._get_list(), time.time(), key_name)
for l in self.ALL_LISTS:
if l != self._get_list():
connection.zrem(l, key_name)
# TOOD: this is not thread/concurrency safe. In current code this is not an issue, but better to fix this.
def update(self, **kwargs):
self.data.update(kwargs)
self.save()
@staticmethod
def _key_name(task_id):
return 'query_task_tracker:{}'.format(task_id)
def _get_list(self):
if self.state in ('finished', 'failed', 'cancelled'):
return self.DONE_LIST
if self.state in ('created'):
return self.WAITING_LIST
return self.IN_PROGRESS_LIST
@classmethod
def get_by_task_id(cls, task_id, connection=None):
if connection is None:
connection = redis_connection
key_name = cls._key_name(task_id)
data = connection.get(key_name)
return cls.create_from_data(data)
@classmethod
def create_from_data(cls, data):
if data:
data = json.loads(data)
return cls(data)
return None
@classmethod
def all(cls, list_name, offset=0, limit=-1):
if limit != -1:
limit -= 1
if offset != 0:
offset -= 1
ids = redis_connection.zrevrange(list_name, offset, limit)
pipe = redis_connection.pipeline()
for id in ids:
pipe.get(id)
tasks = [cls.create_from_data(data) for data in pipe.execute()]
return tasks
@classmethod
def prune(cls, list_name, keep_count):
count = redis_connection.zcard(list_name)
if count <= keep_count:
return 0
remove_count = count - keep_count
keys = redis_connection.zrange(list_name, 0, remove_count - 1)
redis_connection.delete(*keys)
redis_connection.zremrangebyrank(list_name, 0, remove_count - 1)
return remove_count
def __getattr__(self, item):
return self.data[item]
def __contains__(self, item):
return item in self.data
class QueryTask(object):
# TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
STATUSES = {
'PENDING': 1,
'STARTED': 2,
'SUCCESS': 3,
'FAILURE': 4,
'REVOKED': 4
}
def __init__(self, job_id=None, async_result=None):
if async_result:
self._async_result = async_result
else:
self._async_result = AsyncResult(job_id, app=celery)
@property
def id(self):
return self._async_result.id
def to_dict(self):
if self._async_result.status == 'STARTED':
updated_at = self._async_result.result.get('start_time', 0)
else:
updated_at = 0
status = self.STATUSES[self._async_result.status]
if isinstance(self._async_result.result, Exception):
error = self._async_result.result.message
status = 4
elif self._async_result.status == 'REVOKED':
error = 'Query execution cancelled.'
else:
error = ''
if self._async_result.successful() and not error:
query_result_id = self._async_result.result
else:
query_result_id = None
return {
'id': self._async_result.id,
'updated_at': updated_at,
'status': status,
'error': error,
'query_result_id': query_result_id,
}
@property
def is_cancelled(self):
return self._async_result.status == 'REVOKED'
@property
def celery_status(self):
return self._async_result.status
def ready(self):
return self._async_result.ready()
def cancel(self):
return self._async_result.revoke(terminate=True, signal='SIGINT')
def enqueue_query(query, data_source, user_id, scheduled=False, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None
while try_count < 5:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[%s] Found existing job: %s", query_hash, job_id)
job = QueryTask(job_id=job_id)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(_job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled:
queue_name = data_source.scheduled_queue_name
else:
queue_name = data_source.queue_name
result = execute_query.apply_async(args=(query, data_source.id, metadata, user_id), queue=queue_name)
job = QueryTask(async_result=result)
tracker = QueryTaskTracker.create(result.id, 'created', query_hash, data_source.id, scheduled, metadata)
tracker.save(connection=pipe)
logging.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
@celery.task(name="redash.tasks.refresh_queries", base=BaseTask)
def refresh_queries():
logger.info("Refreshing queries...")
outdated_queries_count = 0
query_ids = []
with statsd_client.timer('manager.outdated_queries_lookup'):
for query in models.Query.outdated_queries():
if settings.FEATURE_DISABLE_REFRESH_QUERIES:
logging.info("Disabled refresh queries.")
elif query.data_source.paused:
logging.info("Skipping refresh of %s because datasource - %s is paused (%s).", query.id, query.data_source.name, query.data_source.pause_reason)
else:
enqueue_query(query.query, query.data_source, query.user_id,
scheduled=True,
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
query_ids.append(query.id)
outdated_queries_count += 1
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
logger.info("Done refreshing queries. Found %d outdated queries: %s" % (outdated_queries_count, query_ids))
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset('redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now,
'query_ids': json.dumps(query_ids)
})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
@celery.task(name="redash.tasks.cleanup_tasks", base=BaseTask)
def cleanup_tasks():
in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
for tracker in in_progress:
result = AsyncResult(tracker.task_id)
# If the AsyncResult status is PENDING it means there is no celery task object for this tracker, and we can
# mark it as "dead":
if result.status == 'PENDING':
logging.info("In progress tracker for %s is no longer enqueued, cancelling (task: %s).",
tracker.query_hash, tracker.task_id)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='cancelled')
if result.ready():
logging.info("in progress tracker %s finished", tracker.query_hash)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='finished')
waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST)
for tracker in waiting:
result = AsyncResult(tracker.task_id)
if result.ready():
logging.info("waiting tracker %s finished", tracker.query_hash)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='finished')
# Maintain constant size of the finished tasks list:
QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)
@celery.task(name="redash.tasks.cleanup_query_results", base=BaseTask)
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than
settings.QUERY_RESULTS_MAX_AGE (a week by default, so it's less likely to be open in someone's browser and be used).
Each time the job deletes only settings.QUERY_RESULTS_CLEANUP_COUNT (100 by default) query results so it won't choke
the database in case of many such results.
"""
logging.info("Running query results clean up (removing maximum of %d unused results, that are %d days old or more)",
settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE)
unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT)
total_unused_query_results = models.QueryResult.unused().count()
deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
logger.info("Deleted %d unused query results out of total of %d." % (deleted_count, total_unused_query_results))
@celery.task(name="redash.tasks.refresh_schemas", base=BaseTask)
def refresh_schemas():
"""
Refreshes the data sources schemas.
"""
blacklist = [int(ds_id) for ds_id in redis_connection.smembers('data_sources:schema:blacklist') if ds_id]
global_start_time = time.time()
logger.info(u"task=refresh_schemas state=start")
for ds in models.DataSource.select():
if ds.paused:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=paused(%s)", ds.id, ds.pause_reason)
elif ds.id in blacklist:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=blacklist", ds.id)
else:
logger.info(u"task=refresh_schema state=start ds_id=%s", ds.id)
start_time = time.time()
try:
ds.get_schema(refresh=True)
logger.info(u"task=refresh_schema state=finished ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
except Exception:
logger.exception(u"Failed refreshing schema for the data source: %s", ds.name)
logger.info(u"task=refresh_schema state=failed ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
logger.info(u"task=refresh_schemas state=finish total_runtime=%.2f", time.time() - global_start_time)
def signal_handler(*args):
raise InterruptException
class QueryExecutionError(Exception):
pass
# We could have created this as a celery.Task derived class, and act as the task itself. But this might result in weird
# issues as the task class created once per process, so decided to have a plain object instead.
class QueryExecutor(object):
def __init__(self, task, query, data_source_id, user_id, metadata):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
if user_id is not None:
self.user = models.User.get_by_id(user_id)
else:
self.user = None
self.query_hash = gen_query_hash(self.query)
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = QueryTaskTracker.get_by_task_id(task.request.id) or QueryTaskTracker.create(task.request.id,
'created',
self.query_hash,
self.data_source_id,
False, metadata)
def run(self):
signal.signal(signal.SIGINT, signal_handler)
self.tracker.update(started_at=time.time(), state='started')
logger.debug("Executing query:\n%s", self.query)
self._log_progress('executing_query')
query_runner = self.data_source.query_runner
annotated_query = self._annotate_query(query_runner)
try:
data, error = query_runner.run_query(annotated_query, self.user)
except Exception as e:
error = unicode(e)
data = None
logging.warning('Unexpected error while running query:', exc_info=1)
run_time = time.time() - self.tracker.started_at
self.tracker.update(error=error, run_time=run_time, state='saving_results')
logger.info(u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error)
_unlock(self.query_hash, self.data_source.id)
if error:
self.tracker.update(state='failed')
result = QueryExecutionError(error)
else:
query_result, updated_query_ids = models.QueryResult.store_result(self.data_source.org_id, self.data_source.id,
self.query_hash, self.query, data,
run_time, utils.utcnow())
self._log_progress('checking_alerts')
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
self._log_progress('finished')
result = query_result.id
return result
def _annotate_query(self, query_runner):
if query_runner.annotate_query():
self.metadata['Task ID'] = self.task.request.id
self.metadata['Query Hash'] = self.query_hash
self.metadata['Queue'] = self.task.request.delivery_info['routing_key']
annotation = u", ".join([u"{}: {}".format(k, v) for k, v in self.metadata.iteritems()])
annotated_query = u"/* {} */ {}".format(annotation, self.query)
else:
annotated_query = self.query
return annotated_query
def _log_progress(self, state):
logger.info(u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d task_id=%s queue=%s query_id=%s username=%s",
state,
self.query_hash, self.data_source.type, self.data_source.id, self.task.request.id, self.task.request.delivery_info['routing_key'],
self.metadata.get('Query ID', 'unknown'), self.metadata.get('Username', 'unknown'))
self.tracker.update(state=state)
def _load_data_source(self):
logger.info("task=execute_query state=load_ds ds_id=%d", self.data_source_id)
return models.DataSource.get_by_id(self.data_source_id)
# user_id is added last as a keyword argument for backward compatability -- to support executing previously submitted
# jobs before the upgrade to this version.
@celery.task(name="redash.tasks.execute_query", bind=True, base=BaseTask, track_started=True)
def execute_query(self, query, data_source_id, metadata, user_id=None):
return QueryExecutor(self, query, data_source_id, user_id, metadata).run()
| bsd-2-clause |
popazerty/zde-e2 | lib/python/Plugins/SystemPlugins/HdmiCEC/plugin.py | 33 | 5647 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, getConfigListEntry
from Components.Sources.StaticText import StaticText
class HdmiCECSetupScreen(Screen, ConfigListScreen):
skin = """
<screen position="c-300,c-250" size="600,500" title="HDMI CEC setup">
<widget name="config" position="25,25" size="550,350" />
<widget source="current_address" render="Label" position="25,375" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<widget source="fixed_address" render="Label" position="25,405" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<ePixmap pixmap="skin_default/buttons/red.png" position="20,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="160,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="300,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="440,e-45" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="20,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="160,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="300,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="440,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
</screen>"""
def __init__(self, session):
self.skin = HdmiCECSetupScreen.skin
Screen.__init__(self, session)
from Components.ActionMap import ActionMap
from Components.Button import Button
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Set fixed"))
self["key_blue"] = StaticText(_("Clear fixed"))
self["current_address"] = StaticText()
self["fixed_address"] = StaticText()
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
"yellow": self.setFixedAddress,
"blue": self.clearFixedAddress,
"menu": self.closeRecursive,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
self.list.append(getConfigListEntry(_("Enabled"), config.hdmicec.enabled))
self.list.append(getConfigListEntry(_("Put TV in standby"), config.hdmicec.control_tv_standby))
self.list.append(getConfigListEntry(_("Wakeup TV from standby"), config.hdmicec.control_tv_wakeup))
self.list.append(getConfigListEntry(_("Regard deep standby as standby"), config.hdmicec.handle_deepstandby_events))
self.list.append(getConfigListEntry(_("Switch TV to correct input"), config.hdmicec.report_active_source))
self.list.append(getConfigListEntry(_("Use TV remote control"), config.hdmicec.report_active_menu))
self.list.append(getConfigListEntry(_("Handle standby from TV"), config.hdmicec.handle_tv_standby))
self.list.append(getConfigListEntry(_("Handle wakeup from TV"), config.hdmicec.handle_tv_wakeup))
self.list.append(getConfigListEntry(_("Wakeup signal from TV"), config.hdmicec.tv_wakeup_detection))
self.list.append(getConfigListEntry(_("Forward volume keys"), config.hdmicec.volume_forwarding))
self.list.append(getConfigListEntry(_("Put receiver in standby"), config.hdmicec.control_receiver_standby))
self.list.append(getConfigListEntry(_("Wakeup receiver from standby"), config.hdmicec.control_receiver_wakeup))
self.list.append(getConfigListEntry(_("Minimum send interval"), config.hdmicec.minimum_send_interval))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.updateAddress()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyGo(self):
for x in self["config"].list:
x[1].save()
self.close()
def keyCancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
def setFixedAddress(self):
import Components.HdmiCec
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress(Components.HdmiCec.hdmi_cec.getPhysicalAddress())
self.updateAddress()
def clearFixedAddress(self):
import Components.HdmiCec
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress("0.0.0.0")
self.updateAddress()
def updateAddress(self):
import Components.HdmiCec
self["current_address"].setText(_("Current CEC address") + ": " + Components.HdmiCec.hdmi_cec.getPhysicalAddress())
if config.hdmicec.fixed_physical_address.value == "0.0.0.0":
fixedaddresslabel = ""
else:
fixedaddresslabel = _("Using fixed address") + ": " + config.hdmicec.fixed_physical_address.value
self["fixed_address"].setText(fixedaddresslabel)
def main(session, **kwargs):
session.open(HdmiCECSetupScreen)
def startSetup(menuid):
if menuid == "system":
return [(_("HDMI-CEC setup"), main, "hdmi_cec_setup", 0)]
return []
def Plugins(**kwargs):
from os import path
if path.exists("/dev/hdmi_cec") or path.exists("/dev/misc/hdmi_cec0"):
import Components.HdmiCec
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(where = PluginDescriptor.WHERE_MENU, fnc = startSetup)]
return []
| gpl-2.0 |
kittehcoin/p2pool | nattraverso/pynupnp/soap.py | 288 | 3547 | """
This module is a SOAP client using twisted's deferreds.
It uses the SOAPpy package.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
import SOAPpy, logging
from SOAPpy.Config import Config
from twisted.web import client, error
#General config
Config.typed = False
class SoapError(Exception):
"""
This is a SOAP error message, not an HTTP error message.
The content of this error is a SOAPpy structure representing the
SOAP error message.
"""
pass
class SoapProxy:
"""
Proxy for an url to which we send SOAP rpc calls.
"""
def __init__(self, url, prefix):
"""
Init the proxy, it will connect to the given url, using the
given soap namespace.
@param url: The url of the remote host to call
@param prefix: The namespace prefix to use, eg.
'urn:schemas-upnp-org:service:WANIPConnection:1'
"""
logging.debug("Soap Proxy: '%s', prefix: '%s'", url, prefix)
self._url = url
self._prefix = prefix
def call(self, method, **kwargs):
"""
Call the given remote method with the given arguments, as keywords.
Returns a deferred, called with SOAPpy structure representing
the soap response.
@param method: The method name to call, eg. 'GetExternalIP'
@param kwargs: The parameters of the call, as keywords
@return: A deferred called with the external ip address of this host
@rtype: L{twisted.internet.defer.Deferred}
"""
payload = SOAPpy.buildSOAP(method=method, config=Config, namespace=self._prefix, kw=kwargs)
# Here begins the nasty hack
payload = payload.replace(
# Upnp wants s: instead of SOAP-ENV
'SOAP-ENV','s').replace(
# Doesn't seem to like these encoding stuff
'xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"', '').replace(
'SOAP-ENC:root="1"', '').replace(
# And it wants u: instead of ns1 namespace for arguments..
'ns1','u')
logging.debug("SOAP Payload:\n%s", payload)
return client.getPage(self._url, postdata=payload, method="POST",
headers={'content-type': 'text/xml', 'SOAPACTION': '%s#%s' % (self._prefix, method)}
).addCallbacks(self._got_page, self._got_error)
def _got_page(self, result):
"""
The http POST command was successful, we parse the SOAP
answer, and return it.
@param result: the xml content
"""
parsed = SOAPpy.parseSOAPRPC(result)
logging.debug("SOAP Answer:\n%s", result)
logging.debug("SOAP Parsed Answer: %r", parsed)
return parsed
def _got_error(self, res):
"""
The HTTP POST command did not succeed, depending on the error type:
- it's a SOAP error, we parse it and return a L{SoapError}.
- it's another type of error (http, other), we raise it as is
"""
logging.debug("SOAP Error:\n%s", res)
if isinstance(res.value, error.Error):
try:
logging.debug("SOAP Error content:\n%s", res.value.response)
raise SoapError(SOAPpy.parseSOAPRPC(res.value.response)["detail"])
except:
raise
raise Exception(res.value)
| gpl-3.0 |
colemanja91/PyEloqua-Examples | venv/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 515 | 5599 | from __future__ import absolute_import
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
super(HeaderParsingError, self).__init__(message)
| gpl-2.0 |
juanalfonsopr/odoo | addons/l10n_at/__init__.py | 438 | 1050 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RobSpectre/garfield | garfield/sms/models.py | 1 | 1262 | from django.db import models
from contacts.models import Contact
from phone_numbers.models import PhoneNumber
class SmsMessage(models.Model):
date_created = models.DateTimeField(auto_now_add=True, db_index=True)
date_modified = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False)
sid = models.CharField(max_length=255, db_index=True)
from_number = models.CharField(max_length=255, db_index=True)
to_number = models.CharField(max_length=255, db_index=True)
body = models.TextField(db_index=True)
related_contact = models.ForeignKey(Contact,
null=True,
related_name="sms_messages",
on_delete=models.CASCADE)
related_phone_number = models.ForeignKey(PhoneNumber,
null=True,
related_name="sms_messages",
on_delete=models.CASCADE)
def __str__(self):
return "{0}: from {1} to {2}".format(self.date_created,
self.from_number,
self.to_number)
| mit |
sbusso/rethinkdb | external/v8_3.30.33.16/build/gyp/test/mac/gyptest-xctest.py | 221 | 1196 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xctest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['5']:
test.pass_test()
CHDIR = 'xctest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
'foo\n', chdir=CHDIR)
test.pass_test()
| agpl-3.0 |
pistruiatul/hartapoliticii | python/src/ro/vivi/youtube_crawler/gdata/apps/migration/__init__.py | 168 | 8177 | #!/usr/bin/python
#
# Copyright (C) 2008 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains objects used with Google Apps."""
__author__ = 'google-apps-apis@googlegroups.com'
import atom
import gdata
# XML namespaces which are often used in Google Apps entity.
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
class Rfc822Msg(atom.AtomBase):
"""The Migration rfc822Msg element."""
_tag = 'rfc822Msg'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['encoding'] = 'encoding'
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.encoding = 'base64'
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def Rfc822MsgFromString(xml_string):
"""Parse in the Rrc822 message from the XML definition."""
return atom.CreateClassFromXMLString(Rfc822Msg, xml_string)
class MailItemProperty(atom.AtomBase):
"""The Migration mailItemProperty element."""
_tag = 'mailItemProperty'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def MailItemPropertyFromString(xml_string):
"""Parse in the MailItemProperiy from the XML definition."""
return atom.CreateClassFromXMLString(MailItemProperty, xml_string)
class Label(atom.AtomBase):
"""The Migration label element."""
_tag = 'label'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['labelName'] = 'label_name'
def __init__(self, label_name=None,
extension_elements=None, extension_attributes=None,
text=None):
self.label_name = label_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LabelFromString(xml_string):
"""Parse in the mailItemProperty from the XML definition."""
return atom.CreateClassFromXMLString(Label, xml_string)
class MailEntry(gdata.GDataEntry):
"""A Google Migration flavor of an Atom Entry."""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
[MailItemProperty])
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
rfc822_msg=None, mail_item_property=None, label=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.rfc822_msg = rfc822_msg
self.mail_item_property = mail_item_property
self.label = label
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def MailEntryFromString(xml_string):
"""Parse in the MailEntry from the XML definition."""
return atom.CreateClassFromXMLString(MailEntry, xml_string)
class BatchMailEntry(gdata.BatchEntry):
"""A Google Migration flavor of an Atom Entry."""
_tag = gdata.BatchEntry._tag
_namespace = gdata.BatchEntry._namespace
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg)
_children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property',
[MailItemProperty])
_children['{%s}label' % APPS_NAMESPACE] = ('label', [Label])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
rfc822_msg=None, mail_item_property=None, label=None,
batch_operation=None, batch_id=None, batch_status=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.rfc822_msg = rfc822_msg or None
self.mail_item_property = mail_item_property or []
self.label = label or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def BatchMailEntryFromString(xml_string):
"""Parse in the BatchMailEntry from the XML definition."""
return atom.CreateClassFromXMLString(BatchMailEntry, xml_string)
class BatchMailEventFeed(gdata.BatchFeed):
"""A Migration event feed flavor of an Atom Feed."""
_tag = gdata.BatchFeed._tag
_namespace = gdata.BatchFeed._namespace
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchMailEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, interrupted=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
interrupted=interrupted,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchMailEventFeedFromString(xml_string):
"""Parse in the BatchMailEventFeed from the XML definition."""
return atom.CreateClassFromXMLString(BatchMailEventFeed, xml_string)
| agpl-3.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/numpy/core/fromnumeric.py | 12 | 95796 | """Module containing non-deprecated functions borrowed from Numeric.
"""
from __future__ import division, absolute_import, print_function
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import umath as um
from . import numerictypes as nt
from .numeric import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = type(None)
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
try:
take = a.take
except AttributeError:
return _wrapit(a, 'take', indices, axis, out, mode)
return take(indices, axis, out, mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the elements
into the reshaped array using this index order. 'C' means to
read / write the elements using C-like index order, with the last axis
index changing fastest, back to the first axis index changing slowest.
'F' means to read / write the elements using Fortran-like index order,
with the first index changing fastest, and the last index changing
slowest.
Note that the 'C' and 'F' options take no account of the memory layout
of the underlying array, and only refer to the order of indexing. 'A'
means to read / write the elements in Fortran-like index order if `a`
is Fortran *contiguous* in memory, C-like order otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
try:
reshape = a.reshape
except AttributeError:
return _wrapit(a, 'reshape', newshape, order=order)
return reshape(newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
try:
choose = a.choose
except AttributeError:
return _wrapit(a, 'choose', choices, out=out, mode=mode)
return choose(choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
try:
repeat = a.repeat
except AttributeError:
return _wrapit(a, 'repeat', repeats, axis)
return repeat(repeats, axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
return a.put(ind, v, mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For Numpy >= 1.10, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier Numpy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
try:
swapaxes = a.swapaxes
except AttributeError:
return _wrapit(a, 'swapaxes', axis1, axis2)
return swapaxes(axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
rollaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
try:
transpose = a.transpose
except AttributeError:
return _wrapit(a, 'transpose', axes)
return transpose(axes)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a way that
the value of the element in kth position is in the position it would be in
a sorted array. All elements smaller than the kth element are moved before
this element and all equal or greater are moved behind it. The ordering of
the elements in the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The kth value of the element will be in
its final sorted position and all smaller elements will be moved before
it and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string. Not all fields need be specified, but
unspecified fields will still be used, in the order in which they
come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative order. The
available algorithms have the following properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently, partitioning
along the last axis is faster and uses less space than partitioning
along any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the algorithm
specified by the `kind` keyword. It returns an array of indices of the
same shape as `a` that index data along the given axis in partitioned
order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The kth element will be in its final
sorted position and all smaller elements will be moved before it and
all larger elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all of them into
their sorted position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
"""
try:
argpartition = a.argpartition
except AttributeError:
return _wrapit(a, 'argpartition',kth, axis, kind, order)
return argpartition(kth, axis, kind=kind, order=order)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.sort(axis, kind, order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
try:
argmax = a.argmax
except AttributeError:
return _wrapit(a, 'argmax', axis, out)
return argmax(axis, out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
>>> b = np.arange(6)
>>> b[4] = 0
>>> b
array([0, 1, 2, 3, 0, 5])
>>> np.argmin(b) # Only the first occurrence is returned.
0
"""
try:
argmin = a.argmin
except AttributeError:
return _wrapit(a, 'argmin', axis, out)
return argmin(axis, out)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
try:
searchsorted = a.searchsorted
except AttributeError:
return _wrapit(a, 'searchsorted', v, side, sorter)
return searchsorted(v, side, sorter)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na:
return mu.zeros(new_shape, a.dtype.char)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate((a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=(2,)).shape
(1, 3)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
try:
# First try to use the new axis= parameter
return squeeze(axis=axis)
except TypeError:
# For backwards compatibility
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
In NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In NumPy 1.10, it will return a read/write view and writing to the
returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D and not a matrix, a 1-D array of the same type as `a`
containing the diagonal is returned. If `a` is a matrix, a 1-D
array containing the diagonal is returned in order to maintain
backward compatibility. If the dimension of `a` is greater than
two, then an array of diagonals is returned, "packed" from
left-most dimension to right-most (e.g., if `a` is 3-D, then the
diagonals are "packed" along rows).
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset, axis1, axis2)
else:
return asanyarray(a).diagonal(offset, axis1, axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset, axis1, axis2, dtype, out)
else:
return asanyarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
the same subtype as `a`. The shape of the returned array is
``(a.size,)``. Matrices are special cased for backward
compatibility.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print np.ravel(x)
[1 2 3 4 5 6]
>>> print x.reshape(-1)
[1 2 3 4 5 6]
>>> print np.ravel(x, order='F')
[1 4 2 5 3 6]
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> print np.ravel(x.T)
[1 4 2 5 3 6]
>>> print np.ravel(x.T, order='A')
[1 2 3 4 5 6]
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order)
else:
return asanyarray(a).ravel(order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order. The corresponding non-zero
values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
try:
nonzero = a.nonzero
except AttributeError:
res = _wrapit(a, 'nonzero')
else:
res = nonzero()
return res
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
try:
compress = a.compress
except AttributeError:
return _wrapit(a, 'compress', condition, axis, out)
return compress(condition, axis, out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
try:
clip = a.clip
except AttributeError:
return _wrapit(a, 'clip', a_min, a_max, out)
return clip(a_min, a_max, out)
def sum(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed.
The default (`axis` = `None`) is perform a sum over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a sum is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
elif type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
# NOTE: Dropping the keepdims parameters here...
return sum(axis=axis, dtype=dtype, out=out)
else:
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def product(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
return um.multiply.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def sometrue(a, axis=None, out=None, keepdims=False):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
arr = asanyarray(a)
try:
return arr.any(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.any(axis=axis, out=out)
def alltrue(a, axis=None, out=None, keepdims=False):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
arr = asanyarray(a)
try:
return arr.all(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.all(axis=axis, out=out)
def any(a, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (`axis` = `None`) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
arr = asanyarray(a)
try:
return arr.any(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.any(axis=axis, out=out)
def all(a, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (`axis` = `None`) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
arr = asanyarray(a)
try:
return arr.all(axis=axis, out=out, keepdims=keepdims)
except TypeError:
return arr.all(axis=axis, out=out)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th order discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
try:
cumsum = a.cumsum
except AttributeError:
return _wrapit(a, 'cumsum', axis, dtype, out)
return cumsum(axis, dtype, out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
try:
ptp = a.ptp
except AttributeError:
return _wrapit(a, 'ptp', axis, out)
return ptp(axis, out)
def amax(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
return _methods._amax(a, axis=axis,
out=out, keepdims=keepdims)
# NOTE: Dropping the keepdims parameter
return amax(axis=axis, out=out)
else:
return _methods._amax(a, axis=axis,
out=out, keepdims=keepdims)
def amin(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
return _methods._amin(a, axis=axis,
out=out, keepdims=keepdims)
# NOTE: Dropping the keepdims parameter
return amin(axis=axis, out=out)
else:
return _methods._amin(a, axis=axis,
out=out, keepdims=keepdims)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def prod(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed.
The default (`axis` = `None`) is perform a product over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a product is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : data-type, optional
The data-type of the returned array, as well as of the accumulator
in which the elements are multiplied. By default, if `a` is of
integer type, `dtype` is the default platform integer. (Note: if
the type of `a` is unsigned, then so is `dtype`.) Otherwise,
the dtype is the same as that of `a`.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the
output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
return prod(axis=axis, dtype=dtype, out=out)
else:
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
.. note::
This function is deprecated in NumPy 1.9 to avoid confusion with
`numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
should be used instead.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
# 2014-04-12, 1.9
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
VisibleDeprecationWarning)
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, Numpy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.546875
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
if type(a) is not mu.ndarray:
try:
mean = a.mean
return mean(axis=axis, dtype=dtype, out=out)
except AttributeError:
pass
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177
"""
if type(a) is not mu.ndarray:
try:
std = a.std
return std(axis=axis, dtype=dtype, out=out, ddof=ddof)
except AttributeError:
pass
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
def var(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=False):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std , mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([ 1., 1.])
>>> np.var(a, axis=1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
if type(a) is not mu.ndarray:
try:
var = a.var
return var(axis=axis, dtype=dtype, out=out, ddof=ddof)
except AttributeError:
pass
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
| apache-2.0 |
JulienMcJay/eclock | windows/Python27/Lib/encodings/iso8859_14.py | 593 | 13908 | """ Python Character Mapping Codec iso8859_14 generated from 'MAPPINGS/ISO8859/8859-14.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-14',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u1e02' # 0xA1 -> LATIN CAPITAL LETTER B WITH DOT ABOVE
u'\u1e03' # 0xA2 -> LATIN SMALL LETTER B WITH DOT ABOVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\u010a' # 0xA4 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u010b' # 0xA5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u1e0a' # 0xA6 -> LATIN CAPITAL LETTER D WITH DOT ABOVE
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u1e80' # 0xA8 -> LATIN CAPITAL LETTER W WITH GRAVE
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u1e82' # 0xAA -> LATIN CAPITAL LETTER W WITH ACUTE
u'\u1e0b' # 0xAB -> LATIN SMALL LETTER D WITH DOT ABOVE
u'\u1ef2' # 0xAC -> LATIN CAPITAL LETTER Y WITH GRAVE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0178' # 0xAF -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u1e1e' # 0xB0 -> LATIN CAPITAL LETTER F WITH DOT ABOVE
u'\u1e1f' # 0xB1 -> LATIN SMALL LETTER F WITH DOT ABOVE
u'\u0120' # 0xB2 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\u0121' # 0xB3 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\u1e40' # 0xB4 -> LATIN CAPITAL LETTER M WITH DOT ABOVE
u'\u1e41' # 0xB5 -> LATIN SMALL LETTER M WITH DOT ABOVE
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\u1e56' # 0xB7 -> LATIN CAPITAL LETTER P WITH DOT ABOVE
u'\u1e81' # 0xB8 -> LATIN SMALL LETTER W WITH GRAVE
u'\u1e57' # 0xB9 -> LATIN SMALL LETTER P WITH DOT ABOVE
u'\u1e83' # 0xBA -> LATIN SMALL LETTER W WITH ACUTE
u'\u1e60' # 0xBB -> LATIN CAPITAL LETTER S WITH DOT ABOVE
u'\u1ef3' # 0xBC -> LATIN SMALL LETTER Y WITH GRAVE
u'\u1e84' # 0xBD -> LATIN CAPITAL LETTER W WITH DIAERESIS
u'\u1e85' # 0xBE -> LATIN SMALL LETTER W WITH DIAERESIS
u'\u1e61' # 0xBF -> LATIN SMALL LETTER S WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0174' # 0xD0 -> LATIN CAPITAL LETTER W WITH CIRCUMFLEX
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u1e6a' # 0xD7 -> LATIN CAPITAL LETTER T WITH DOT ABOVE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0176' # 0xDE -> LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0175' # 0xF0 -> LATIN SMALL LETTER W WITH CIRCUMFLEX
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u1e6b' # 0xF7 -> LATIN SMALL LETTER T WITH DOT ABOVE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0177' # 0xFE -> LATIN SMALL LETTER Y WITH CIRCUMFLEX
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
tensorflow/tensorboard | tensorboard/compat/proto/proto_test.py | 1 | 6863 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Proto match tests between `tensorboard.compat.proto` and TensorFlow.
These tests verify that the local copy of TensorFlow protos are the same
as those available directly from TensorFlow. Local protos are used to
build `tensorboard-notf` without a TensorFlow dependency.
"""
import difflib
import importlib
import tensorflow as tf
from google.protobuf import descriptor_pb2
# Keep this list synced with BUILD in current directory
PROTO_IMPORTS = [
(
"tensorflow.core.framework.allocation_description_pb2",
"tensorboard.compat.proto.allocation_description_pb2",
),
(
"tensorflow.core.framework.api_def_pb2",
"tensorboard.compat.proto.api_def_pb2",
),
(
"tensorflow.core.framework.attr_value_pb2",
"tensorboard.compat.proto.attr_value_pb2",
),
(
"tensorflow.core.protobuf.cluster_pb2",
"tensorboard.compat.proto.cluster_pb2",
),
(
"tensorflow.core.protobuf.config_pb2",
"tensorboard.compat.proto.config_pb2",
),
(
"tensorflow.core.framework.cost_graph_pb2",
"tensorboard.compat.proto.cost_graph_pb2",
),
(
"tensorflow.python.framework.cpp_shape_inference_pb2",
"tensorboard.compat.proto.cpp_shape_inference_pb2",
),
(
"tensorflow.core.protobuf.debug_pb2",
"tensorboard.compat.proto.debug_pb2",
),
("tensorflow.core.util.event_pb2", "tensorboard.compat.proto.event_pb2"),
(
"tensorflow.core.framework.function_pb2",
"tensorboard.compat.proto.function_pb2",
),
(
"tensorflow.core.framework.graph_pb2",
"tensorboard.compat.proto.graph_pb2",
),
(
"tensorflow.core.protobuf.meta_graph_pb2",
"tensorboard.compat.proto.meta_graph_pb2",
),
(
"tensorflow.core.framework.node_def_pb2",
"tensorboard.compat.proto.node_def_pb2",
),
(
"tensorflow.core.framework.op_def_pb2",
"tensorboard.compat.proto.op_def_pb2",
),
(
"tensorflow.core.framework.resource_handle_pb2",
"tensorboard.compat.proto.resource_handle_pb2",
),
(
"tensorflow.core.protobuf.rewriter_config_pb2",
"tensorboard.compat.proto.rewriter_config_pb2",
),
(
"tensorflow.core.protobuf.saved_object_graph_pb2",
"tensorboard.compat.proto.saved_object_graph_pb2",
),
(
"tensorflow.core.protobuf.saver_pb2",
"tensorboard.compat.proto.saver_pb2",
),
(
"tensorflow.core.framework.step_stats_pb2",
"tensorboard.compat.proto.step_stats_pb2",
),
(
"tensorflow.core.protobuf.struct_pb2",
"tensorboard.compat.proto.struct_pb2",
),
(
"tensorflow.core.framework.summary_pb2",
"tensorboard.compat.proto.summary_pb2",
),
(
"tensorflow.core.framework.tensor_pb2",
"tensorboard.compat.proto.tensor_pb2",
),
(
"tensorflow.core.framework.tensor_description_pb2",
"tensorboard.compat.proto.tensor_description_pb2",
),
(
"tensorflow.core.framework.tensor_shape_pb2",
"tensorboard.compat.proto.tensor_shape_pb2",
),
(
"tensorflow.core.profiler.tfprof_log_pb2",
"tensorboard.compat.proto.tfprof_log_pb2",
),
(
"tensorflow.core.protobuf.trackable_object_graph_pb2",
"tensorboard.compat.proto.trackable_object_graph_pb2",
),
(
"tensorflow.core.framework.types_pb2",
"tensorboard.compat.proto.types_pb2",
),
(
"tensorflow.core.framework.variable_pb2",
"tensorboard.compat.proto.variable_pb2",
),
(
"tensorflow.core.framework.versions_pb2",
"tensorboard.compat.proto.versions_pb2",
),
]
PROTO_REPLACEMENTS = [
("tensorflow/core/framework/", "tensorboard/compat/proto/"),
("tensorflow/core/protobuf/", "tensorboard/compat/proto/"),
("tensorflow/core/profiler/", "tensorboard/compat/proto/"),
("tensorflow/python/framework/", "tensorboard/compat/proto/"),
("tensorflow/core/util/", "tensorboard/compat/proto/"),
('package: "tensorflow.tfprof"', 'package: "tensorboard"'),
('package: "tensorflow"', 'package: "tensorboard"'),
('type_name: ".tensorflow.tfprof', 'type_name: ".tensorboard'),
('type_name: ".tensorflow', 'type_name: ".tensorboard'),
]
MATCH_FAIL_MESSAGE_TEMPLATE = """
{}
NOTE!
====
This is expected to happen when TensorFlow updates their proto definitions.
We pin copies of the protos, but TensorFlow can freely update them at any
time.
The proper fix is:
1. In your TensorFlow clone, check out the version of TensorFlow whose
protos you want to update (e.g., `git checkout v2.2.0-rc0`)
2. In your tensorboard repo, run:
./tensorboard/compat/proto/update.sh PATH_TO_TENSORFLOW_REPO
3. Review and commit any changes.
"""
class ProtoMatchTest(tf.test.TestCase):
def test_each_proto_matches_tensorflow(self):
failed_diffs = []
for tf_path, tb_path in PROTO_IMPORTS:
tf_pb2 = importlib.import_module(tf_path)
tb_pb2 = importlib.import_module(tb_path)
tf_descriptor = descriptor_pb2.FileDescriptorProto()
tb_descriptor = descriptor_pb2.FileDescriptorProto()
tf_pb2.DESCRIPTOR.CopyToProto(tf_descriptor)
tb_pb2.DESCRIPTOR.CopyToProto(tb_descriptor)
# Convert expected to be actual since this matches the
# replacements done in proto/update.sh
tb_string = str(tb_descriptor)
tf_string = str(tf_descriptor)
for orig, repl in PROTO_REPLACEMENTS:
tf_string = tf_string.replace(orig, repl)
diff = difflib.unified_diff(
tb_string.splitlines(1),
tf_string.splitlines(1),
fromfile=tb_path,
tofile=tf_path,
)
diff = "".join(diff)
if diff:
failed_diffs.append(diff)
if failed_diffs:
self.fail(MATCH_FAIL_MESSAGE_TEMPLATE.format("".join(failed_diffs)))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
pyjs/pyjs | pyjs/lib_trans/pycompiler/ast.py | 6 | 38017 | """Python abstract syntax node definitions
This file is automatically generated by Tools/compiler/astgen.py
"""
from pycompiler.consts import CO_VARARGS, CO_VARKEYWORDS
def flatten(seq):
l = []
for elt in seq:
t = type(elt)
if t is tuple or t is list:
for elt2 in flatten(elt):
l.append(elt2)
else:
l.append(elt)
return l
def flatten_nodes(seq):
return [n for n in flatten(seq) if isinstance(n, Node)]
nodes = {}
class Node(object):
"""Abstract base class for ast nodes."""
def getChildren(self):
pass # implemented by subclasses
def __iter__(self):
for n in self.getChildren():
yield n
def asList(self): # for backwards compatibility
return self.getChildren()
def getChildNodes(self):
pass # implemented by subclasses
def _get_lineno(self):
return self._lineno
def _set_lineno(self, lineno):
if lineno is not None and not isinstance(lineno, int):
self._context = lineno
self._lineno = lineno[1][0]
else:
self._lineno = lineno
self._context = None
lineno = property(_get_lineno, _set_lineno)
def __str__(self):
return repr(self)
class EmptyNode(Node):
pass
class Expression(Node):
# Expression is an artificial node class to support "eval"
nodes["expression"] = "Expression"
def __init__(self, node):
Node.__init__(self)
self.node = node
def getChildren(self):
return self.node,
def getChildNodes(self):
return self.node,
def __repr__(self):
return "Expression(%s)" % (repr(self.node))
class Add(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Add(%s, %s)" % (repr(self.left), repr(self.right))
class And(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "And(%s)" % (repr(self.nodes),)
class AssAttr(Node):
def __init__(self, expr, attrname, flags, lineno=None):
self.expr = expr
self.attrname = attrname
self.flags = flags
self.lineno = lineno
def getChildren(self):
return self.expr, self.attrname, self.flags
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "AssAttr(%s, %s, %s)" % (repr(self.expr), repr(self.attrname), repr(self.flags))
class AssList(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "AssList(%s)" % (repr(self.nodes),)
class AssName(Node):
def __init__(self, name, flags, lineno=None):
self.name = name
self.flags = flags
self.lineno = lineno
def getChildren(self):
return self.name, self.flags
def getChildNodes(self):
return ()
def __repr__(self):
return "AssName(%s, %s)" % (repr(self.name), repr(self.flags))
class AssTuple(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "AssTuple(%s)" % (repr(self.nodes),)
class Assert(Node):
def __init__(self, test, fail, lineno=None):
self.test = test
self.fail = fail
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.test)
children.append(self.fail)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.test)
if self.fail is not None:
nodelist.append(self.fail)
return tuple(nodelist)
def __repr__(self):
return "Assert(%s, %s)" % (repr(self.test), repr(self.fail))
class Assign(Node):
def __init__(self, nodes, expr, lineno=None):
self.nodes = nodes
self.expr = expr
self.lineno = lineno
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.expr)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
nodelist.append(self.expr)
return tuple(nodelist)
def __repr__(self):
return "Assign(%s, %s)" % (repr(self.nodes), repr(self.expr))
class AugAssign(Node):
def __init__(self, node, op, expr, lineno=None):
self.node = node
self.op = op
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.node, self.op, self.expr
def getChildNodes(self):
return self.node, self.expr
def __repr__(self):
return "AugAssign(%s, %s, %s)" % (repr(self.node), repr(self.op), repr(self.expr))
class Backquote(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Backquote(%s)" % (repr(self.expr),)
class Bitand(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitand(%s)" % (repr(self.nodes),)
class Bitor(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitor(%s)" % (repr(self.nodes),)
class Bitxor(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitxor(%s)" % (repr(self.nodes),)
class Break(Node):
def __init__(self, lineno=None):
self.lineno = lineno
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Break()"
class CallFunc(Node):
def __init__(self, node, args, star_args, dstar_args, lineno=None):
self.node = node
self.args = args
self.star_args = star_args
self.dstar_args = dstar_args
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.node)
children.extend(flatten(self.args))
children.append(self.star_args)
children.append(self.dstar_args)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.node)
nodelist.extend(flatten_nodes(self.args))
if self.star_args is not None:
nodelist.append(self.star_args)
if self.dstar_args is not None:
nodelist.append(self.dstar_args)
return tuple(nodelist)
def __repr__(self):
return "CallFunc(%s, %s, %s, %s)" % (repr(self.node), repr(self.args), repr(self.star_args), repr(self.dstar_args))
class Class(Node):
def __init__(self, name, bases, doc, code, decorators, lineno=None):
self.name = name
self.bases = bases
self.doc = doc
self.code = code
self.decorators = decorators
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.name)
children.extend(flatten(self.bases))
children.append(self.doc)
children.append(self.code)
children.append(self.decorators)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.bases))
nodelist.append(self.code)
if self.decorators is not None:
nodelist.append(self.decorators)
return tuple(nodelist)
def __repr__(self):
return "Class(%s, %s, %s, %s, %s)" % (repr(self.name), repr(self.bases), repr(self.doc), repr(self.code), repr(self.decorators))
class Compare(Node):
def __init__(self, expr, ops, lineno=None):
self.expr = expr
self.ops = ops
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.ops))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.ops))
return tuple(nodelist)
def __repr__(self):
return "Compare(%s, %s)" % (repr(self.expr), repr(self.ops))
class Const(Node):
def __init__(self, value, lineno=None):
self.value = value
self.lineno = lineno
def getChildren(self):
return self.value,
def getChildNodes(self):
return ()
def __repr__(self):
return "Const(%s)" % (repr(self.value),)
class Continue(Node):
def __init__(self, lineno=None):
self.lineno = lineno
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Continue()"
class Decorators(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Decorators(%s)" % (repr(self.nodes),)
class Dict(Node):
def __init__(self, items, lineno=None):
self.items = items
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.items))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.items))
return tuple(nodelist)
def __repr__(self):
return "Dict(%s)" % (repr(self.items),)
class Discard(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Discard(%s)" % (repr(self.expr),)
class Div(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Div(%s, %s)" % (repr(self.left), repr(self.right))
class Ellipsis(Node):
def __init__(self, lineno=None):
self.lineno = lineno
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Ellipsis()"
class Exec(Node):
def __init__(self, expr, locals, globals, lineno=None):
self.expr = expr
self.locals = locals
self.globals = globals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.locals)
children.append(self.globals)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
if self.locals is not None:
nodelist.append(self.locals)
if self.globals is not None:
nodelist.append(self.globals)
return tuple(nodelist)
def __repr__(self):
return "Exec(%s, %s, %s)" % (repr(self.expr), repr(self.locals), repr(self.globals))
class FloorDiv(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "FloorDiv(%s, %s)" % (repr(self.left), repr(self.right))
class For(Node):
def __init__(self, assign, list, body, else_, lineno=None):
self.assign = assign
self.list = list
self.body = body
self.else_ = else_
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.assign)
children.append(self.list)
children.append(self.body)
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.assign)
nodelist.append(self.list)
nodelist.append(self.body)
if self.else_ is not None:
nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "For(%s, %s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.body), repr(self.else_))
class From(Node):
def __init__(self, modname, names, level, lineno=None):
self.modname = modname
self.names = names
self.level = level
self.lineno = lineno
def getChildren(self):
return self.modname, self.names, self.level
def getChildNodes(self):
return ()
def __repr__(self):
return "From(%s, %s, %s)" % (repr(self.modname), repr(self.names), repr(self.level))
class Function(Node):
def __init__(self, decorators, name, argnames, defaults, varargs, kwargs, doc, code, lineno=None):
self.decorators = decorators
self.name = name
self.argnames = argnames
self.defaults = defaults
self.varargs = varargs
self.kwargs = kwargs
self.doc = doc
self.code = code
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.decorators)
children.append(self.name)
children.append(self.argnames)
children.extend(flatten(self.defaults))
children.append(self.varargs)
children.append(self.kwargs)
children.append(self.doc)
children.append(self.code)
return tuple(children)
def getChildNodes(self):
nodelist = []
if self.decorators is not None:
nodelist.append(self.decorators)
nodelist.extend(flatten_nodes(self.defaults))
nodelist.append(self.varargs)
nodelist.append(self.kwargs)
nodelist.append(self.code)
return tuple(nodelist)
def __repr__(self):
return "Function(%s, %s, %s, %s, %s, %s, %s, %s)" % (repr(self.decorators), repr(self.name), repr(self.argnames), repr(self.defaults), repr(self.varargs), repr(self.kwargs), repr(self.doc), repr(self.code))
_Function = Function
class GenExpr(Node):
def __init__(self, code, lineno=None):
self.code = code
self.lineno = lineno
self.argnames = ['.0']
self.varargs = self.kwargs = None
def getChildren(self):
return self.code,
def getChildNodes(self):
return self.code,
def __repr__(self):
return "GenExpr(%s)" % (repr(self.code),)
class GenExprFor(Node):
def __init__(self, assign, iter, ifs, lineno=None):
self.assign = assign
self.iter = iter
self.ifs = ifs
self.lineno = lineno
self.is_outmost = False
def getChildren(self):
children = []
children.append(self.assign)
children.append(self.iter)
children.extend(flatten(self.ifs))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.assign)
nodelist.append(self.iter)
nodelist.extend(flatten_nodes(self.ifs))
return tuple(nodelist)
def __repr__(self):
return "GenExprFor(%s, %s, %s)" % (repr(self.assign), repr(self.iter), repr(self.ifs))
class GenExprIf(Node):
def __init__(self, test, lineno=None):
self.test = test
self.lineno = lineno
def getChildren(self):
return self.test,
def getChildNodes(self):
return self.test,
def __repr__(self):
return "GenExprIf(%s)" % (repr(self.test),)
class GenExprInner(Node):
def __init__(self, expr, quals, lineno=None):
self.expr = expr
self.quals = quals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "GenExprInner(%s, %s)" % (repr(self.expr), repr(self.quals))
class Getattr(Node):
def __init__(self, expr, attrname, lineno=None):
self.expr = expr
self.attrname = attrname
self.lineno = lineno
def getChildren(self):
return self.expr, self.attrname
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Getattr(%s, %s)" % (repr(self.expr), repr(self.attrname))
class Global(Node):
def __init__(self, names, lineno=None):
self.names = names
self.lineno = lineno
def getChildren(self):
return self.names,
def getChildNodes(self):
return ()
def __repr__(self):
return "Global(%s)" % (repr(self.names),)
class If(Node):
def __init__(self, tests, else_, lineno=None):
self.tests = tests
self.else_ = else_
self.lineno = lineno
def getChildren(self):
children = []
children.extend(flatten(self.tests))
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.tests))
if self.else_ is not None:
nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "If(%s, %s)" % (repr(self.tests), repr(self.else_))
class IfExp(Node):
def __init__(self, test, then, else_, lineno=None):
self.test = test
self.then = then
self.else_ = else_
self.lineno = lineno
def getChildren(self):
return self.test, self.then, self.else_
def getChildNodes(self):
return self.test, self.then, self.else_
def __repr__(self):
return "IfExp(%s, %s, %s)" % (repr(self.test), repr(self.then), repr(self.else_))
class Import(Node):
def __init__(self, names, lineno=None):
self.names = names
self.lineno = lineno
def getChildren(self):
return self.names,
def getChildNodes(self):
return ()
def __repr__(self):
return "Import(%s)" % (repr(self.names),)
class Invert(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Invert(%s)" % (repr(self.expr),)
class Keyword(Node):
def __init__(self, name, expr, lineno=None):
self.name = name
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.name, self.expr
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Keyword(%s, %s)" % (repr(self.name), repr(self.expr))
class Lambda(Node):
def __init__(self, argnames, defaults, varargs, kwargs, code, lineno=None):
self.argnames = argnames
self.defaults = defaults
self.varargs = varargs
self.kwargs = kwargs
self.code = code
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.argnames)
children.extend(flatten(self.defaults))
children.append(self.varargs)
children.append(self.kwargs)
children.append(self.code)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.defaults))
nodelist.append(self.varargs)
nodelist.append(self.kwargs)
nodelist.append(self.code)
return tuple(nodelist)
def __repr__(self):
return "Lambda(%s, %s, %s, %s, %s)" % (repr(self.argnames), repr(self.defaults), repr(self.varargs), repr(self.kwargs), repr(self.code))
class LeftShift(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "LeftShift(%s, %s)" % (repr(self.left), repr(self.right))
class List(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "List(%s)" % (repr(self.nodes),)
class CollComp(Node):
pass
class ListComp(CollComp):
def __init__(self, expr, quals, lineno=None):
self.expr = expr
self.quals = quals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "ListComp(%s, %s)" % (repr(self.expr), repr(self.quals))
class ListCompFor(Node):
def __init__(self, assign, list, ifs, lineno=None):
self.assign = assign
self.list = list
self.ifs = ifs
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.assign)
children.append(self.list)
children.extend(flatten(self.ifs))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.assign)
nodelist.append(self.list)
nodelist.extend(flatten_nodes(self.ifs))
return tuple(nodelist)
def __repr__(self):
return "ListCompFor(%s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.ifs))
class ListCompIf(Node):
def __init__(self, test, lineno=None):
self.test = test
self.lineno = lineno
def getChildren(self):
return self.test,
def getChildNodes(self):
return self.test,
def __repr__(self):
return "ListCompIf(%s)" % (repr(self.test),)
class SetComp(CollComp):
def __init__(self, expr, quals, lineno=None):
self.expr = expr
self.quals = quals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "SetComp(%s, %s)" % (repr(self.expr), repr(self.quals))
class DictComp(CollComp):
def __init__(self, key, value, quals, lineno=None):
self.key = key
self.value = value
self.quals = quals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.key)
children.append(self.value)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.key)
nodelist.append(self.value)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "DictComp(%s, %s, %s)" % (repr(self.key), repr(self.value), repr(self.quals))
class Mod(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Mod(%s, %s)" % (repr(self.left), repr(self.right))
class Module(Node):
def __init__(self, doc, node, lineno=None):
self.doc = doc
self.node = node
self.lineno = lineno
def getChildren(self):
return self.doc, self.node
def getChildNodes(self):
return self.node,
def __repr__(self):
return "Module(%s, %s)" % (repr(self.doc), repr(self.node))
class Mul(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Mul(%s, %s)" % (repr(self.left), repr(self.right))
class Name(Node):
def __init__(self, name, lineno=None):
self.name = name
self.lineno = lineno
def getChildren(self):
return self.name,
def getChildNodes(self):
return ()
def __repr__(self):
return "Name(%s)" % (repr(self.name),)
class Not(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Not(%s)" % (repr(self.expr),)
class Or(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Or(%s)" % (repr(self.nodes),)
class Pass(Node):
def __init__(self, lineno=None):
self.lineno = lineno
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Pass()"
class Power(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Power(%s, %s)" % (repr(self.left), repr(self.right))
class Print(Node):
def __init__(self, nodes, dest, lineno=None):
self.nodes = nodes
self.dest = dest
self.lineno = lineno
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.dest)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
if self.dest is not None:
nodelist.append(self.dest)
return tuple(nodelist)
def __repr__(self):
return "Print(%s, %s)" % (repr(self.nodes), repr(self.dest))
class Printnl(Node):
def __init__(self, nodes, dest, nl, lineno=None):
self.nodes = nodes
self.dest = dest
self.nl = nl
self.lineno = lineno
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.dest)
children.append(self.nl)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
if self.dest is not None:
nodelist.append(self.dest)
nodelist.append(self.nl)
return tuple(nodelist)
def __repr__(self):
return "Printnl(%s, %s, %s)" % (repr(self.nodes), repr(self.dest), repr(self.nl))
class Raise(Node):
def __init__(self, expr1, expr2, expr3, lineno=None):
self.expr1 = expr1
self.expr2 = expr2
self.expr3 = expr3
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr1)
children.append(self.expr2)
children.append(self.expr3)
return tuple(children)
def getChildNodes(self):
nodelist = []
if self.expr1 is not None:
nodelist.append(self.expr1)
if self.expr2 is not None:
nodelist.append(self.expr2)
if self.expr3 is not None:
nodelist.append(self.expr3)
return tuple(nodelist)
def __repr__(self):
return "Raise(%s, %s, %s)" % (repr(self.expr1), repr(self.expr2), repr(self.expr3))
class Return(Node):
def __init__(self, value, lineno=None):
self.value = value
self.lineno = lineno
def getChildren(self):
return self.value,
def getChildNodes(self):
return self.value,
def __repr__(self):
return "Return(%s)" % (repr(self.value),)
class RightShift(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "RightShift(%s, %s)" % (repr(self.left), repr(self.right))
class Set(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Set(%s)" % (repr(self.nodes),)
class Slice(Node):
def __init__(self, expr, flags, lower, upper, lineno=None):
self.expr = expr
self.flags = flags
self.lower = lower
self.upper = upper
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.flags)
children.append(self.lower)
children.append(self.upper)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
if self.lower is not None:
nodelist.append(self.lower)
if self.upper is not None:
nodelist.append(self.upper)
return tuple(nodelist)
def __repr__(self):
return "Slice(%s, %s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.lower), repr(self.upper))
class Sliceobj(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Sliceobj(%s)" % (repr(self.nodes),)
class Stmt(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Stmt(%s)" % (repr(self.nodes),)
class Sub(Node):
def __init__(self, left, right, lineno=None):
self.left = left
self.right = right
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Sub(%s, %s)" % (repr(self.left), repr(self.right))
class Subscript(Node):
def __init__(self, expr, flags, subs, lineno=None):
self.expr = expr
self.flags = flags
self.subs = subs
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.flags)
children.extend(flatten(self.subs))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.subs))
return tuple(nodelist)
def __repr__(self):
return "Subscript(%s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.subs))
class TryExcept(Node):
def __init__(self, body, handlers, else_, lineno=None):
self.body = body
self.handlers = handlers
self.else_ = else_
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.body)
children.extend(flatten(self.handlers))
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.body)
nodelist.extend(flatten_nodes(self.handlers))
if self.else_ is not None:
nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "TryExcept(%s, %s, %s)" % (repr(self.body), repr(self.handlers), repr(self.else_))
class TryFinally(Node):
def __init__(self, body, final_, lineno=None):
self.body = body
self.final_ = final_
self.lineno = lineno
def getChildren(self):
return self.body, self.final_
def getChildNodes(self):
return self.body, self.final_
def __repr__(self):
return "TryFinally(%s, %s)" % (repr(self.body), repr(self.final_))
class Tuple(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Tuple(%s)" % (repr(self.nodes),)
class UnaryAdd(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "UnaryAdd(%s)" % (repr(self.expr),)
class UnarySub(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "UnarySub(%s)" % (repr(self.expr),)
class While(Node):
def __init__(self, test, body, else_, lineno=None):
self.test = test
self.body = body
self.else_ = else_
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.test)
children.append(self.body)
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.test)
nodelist.append(self.body)
if self.else_ is not None:
nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "While(%s, %s, %s)" % (repr(self.test), repr(self.body), repr(self.else_))
class With(Node):
def __init__(self, expr, vars, body, lineno=None):
self.expr = expr
self.vars = vars
self.body = body
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.vars)
children.append(self.body)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
if self.vars is not None:
nodelist.append(self.vars)
nodelist.append(self.body)
return tuple(nodelist)
def __repr__(self):
return "With(%s, %s, %s)" % (repr(self.expr), repr(self.vars), repr(self.body))
class Yield(Node):
def __init__(self, value, lineno=None):
self.value = value
self.lineno = lineno
def getChildren(self):
return self.value,
def getChildNodes(self):
return self.value,
def __repr__(self):
return "Yield(%s)" % (repr(self.value),)
for name, obj in list(globals().items()):
if isinstance(obj, type) and issubclass(obj, Node):
nodes[name.lower()] = obj
| apache-2.0 |
samthetechie/pyFolia | venv/lib/python2.7/site-packages/requests/packages/urllib3/util/url.py | 304 | 4273 | from collections import namedtuple
from ..exceptions import LocationParseError
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| gpl-3.0 |
xzackli/isocurvature_2017 | analysis/plot_derived_parameters/OLD_make_beta_plots.py | 1 | 4054 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from scipy.stats import gaussian_kde
from pprint import pprint
import sys
import os
from astropy.io import ascii
from astropy.table import vstack
# chainfile = "/Users/zequnl/Installs/montepython_public/chains/example/2016-10-18_10000__1.txt"
# CONFIGURATION -------------
# chainfile = "chains/CDI_2/2016-11-02_1000000__1.txt"
# paramfile = "chains/CDI_2/2016-11-02_1000000_.paramnames"
chainfile = "chains/CDI_2/2016-11-02_1000000__1.txt"
paramfile = "chains/CDI_2/2016-11-02_1000000_.paramnames"
xname = 'P_{II}^1'
yname = 'P_{RI}^1'
options = ""
chainfolder = "chains/CDI_2/"
if len(sys.argv) >= 3:
chainfile = sys.argv[1]
paramfile = sys.argv[2]
print(paramfile)
if len(sys.argv) >= 5:
xname = sys.argv[3]
yname = sys.argv[4]
options = sys.argv[5:]
elif len(sys.argv) == 2:
if sys.argv[1] == "info":
params = np.array(ascii.read('params', delimiter="\t", format="no_header")['col1']).tolist()
print(params)
sys.exit(0)
# ---------------------------
params = np.array(ascii.read(paramfile, delimiter="\t", format="no_header")['col1'])
data_all = None
for filename in os.listdir(chainfolder):
if filename.startswith("201") and filename.endswith(".txt"):
chainfile = os.path.join(chainfolder, filename)
print(chainfile)
data = (ascii.read(chainfile, delimiter="\s"))[300:]
# set up column names (read in from param file)
data['col1'].name = 'acceptance'
data['col2'].name = 'likelihood'
for i in range(3,len(params)+3):
data['col' + str(i)].name = params[i-3]
if data_all == None:
data_all = data
else:
data_all = vstack( [data_all, data] )
print(len(data), len(data_all))
data = data_all
print(len(data), "rows")
x = data[xname]
y = data[yname]
t = np.array( range(len(x)))
# we look for the -s option, and then find the number afterwards.
# that's where we start
if "-s" in options:
s = int(sys.argv[sys.argv.index("-s")+1])
x = x[s:]
y = y[s:]
prr1 = data['P_{RR}^1']; pii1 = data['P_{II}^1']; pri1 = data['P_{RI}^1'];
prr2 = data['P_{RR}^2']; pii2 = data['P_{II}^2']; pri2 = pri1 * np.sqrt(pii2 * prr2 / (pii1 * prr1))
# make density plot
# sc(x,y)
beta_iso1 = pii1 / (prr1 + pii1)
beta_iso2 = pii2 / (prr2 + pii2)
alpha = pri1 / np.sqrt( pii1 * prr1 )
# \frac{\log( P_{AB}^2 / P_{AB}^1 )}{\log ( k_2 / k_1 )
k1 = 0.002 # Mpc^{-1}
k2 = 0.1 # Mpc^{-1}
nRR = np.log(prr2/prr1) / np.log(k2/k1)
nRI = np.log(pri2/pri1) / np.log(k2/k1)
nII = np.log(pii2/pii1) / np.log(k2/k1)
def denplot( list_data, ax, name="data", lower=0.0, upper=0.25, \
nbins=20, extend=False, extent=0.1, cov=0.2 ):
"""
plot a smoothed histogram
"""
x = np.linspace(lower, upper, 150)
if extend:
bools = list_data < extent
new_list_data = np.hstack( (list_data,-list_data) )
new_weights = np.hstack( (data['acceptance'], (data['acceptance']) ) )
density = gaussian_kde(new_list_data)
else:
density = gaussian_kde( list_data )
density.covariance_factor = lambda : cov
density._compute_covariance()
ax.plot( x, density(x), "k--" )
counts, bins = np.histogram( list_data, bins=x, weights=data['acceptance'], density=True )
#ax.plot( x[:-1], counts, "r." )
ax.get_yaxis().set_ticks([])
# ax.set_ylim( 0.0, counts.max() )
ax.set_xlim( lower, upper )
ax.set_xlabel( name )
fig = plt.figure(figsize=(12,3))
ax1 = fig.add_subplot(141)
ax2 = fig.add_subplot(142)
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
denplot( beta_iso1, ax1, r"$\beta_{iso}(k_{low})$", 0.0, 0.25, extend=True )
denplot( beta_iso2, ax2, r"$\beta_{iso}(k_{high})$", 0.0, 0.8, extend=True)
denplot( alpha, ax3, r"$\cos \Delta$", -0.5, 0.5 )
denplot( nII, ax4, r"$n_{II}$", -1.0, 2.8 )
plt.tight_layout()
plt.savefig("../../figures/beta_planck.pdf")
plt.show()
## TESTING
| mit |
CaptainTrunky/googletest | test/gtest_break_on_failure_unittest.py | 2140 | 7339 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
TheProjecter/igatools | source/basis_functions/multi_patch_space.inst.py | 1 | 1764 | #-+--------------------------------------------------------------------
# Igatools a general purpose Isogeometric analysis library.
# Copyright (C) 2012-2015 by the igatools authors (see authors.txt).
#
# This file is part of the igatools library.
#
# The igatools library is free software: you can use it, redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-+--------------------------------------------------------------------
from init_instantiation_data import *
include_files = ['basis_functions/bspline_space.h',
'basis_functions/nurbs_space.h',
'geometry/push_forward.h',
'geometry/cartesian_grid_element_accessor.h',
'geometry/mapping_element_accessor.h',
'geometry/push_forward_element_accessor.h',
'basis_functions/bspline_element_accessor.h',
'basis_functions/nurbs_element_accessor.h',
'basis_functions/physical_space.h',
'basis_functions/physical_space_element_accessor.h']
data = Instantiation(include_files)
(f, inst) = (data.file_output, data.inst)
for space in inst.PhysSpaces_v2:
f.write( 'template class MultiPatchSpace<%s>;\n' %space.name)
| gpl-3.0 |
pedrobaeza/bank-payment | account_banking_payment_transfer/__openerp__.py | 7 | 1900 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
# (C) 2014 ACSONE SA/NV (<http://acsone.eu>).
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Banking - Payments Transfer Account',
'version': '0.2',
'license': 'AGPL-3',
'author': "Banking addons community,Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/banking',
'category': 'Banking addons',
'post_init_hook': 'set_date_sent',
'depends': [
'account_banking_payment_export',
],
'data': [
'view/payment_mode.xml',
'workflow/account_payment.xml',
'view/account_payment.xml',
],
'test': [
'test/data.yml',
'test/test_payment_method.yml',
'test/test_partial_payment_refunded.yml',
'test/test_partial_payment_transfer.yml',
],
'auto_install': False,
'installable': True,
}
| agpl-3.0 |
ryangallen/django | django/contrib/gis/admin/options.py | 379 | 5649 | from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.db import models
from django.contrib.gis.gdal import HAS_GDAL, OGRGeomType
from django.core.exceptions import ImproperlyConfigured
spherical_mercator_srid = 3857
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.13/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
wms_options = {'format': 'image/jpeg'}
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing 2D GeometryFields (OpenLayers 2 does not support
3D editing).
"""
if isinstance(db_field, models.GeometryField) and db_field.dim < 3:
kwargs.pop('request', None)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION':
collection_type = 'Any'
else:
collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', %s' % ', '.join(wms_options)
params = {'default_lon': self.default_lon,
'default_lat': self.default_lat,
'default_zoom': self.default_zoom,
'display_wkt': self.debug or self.display_wkt,
'geom_type': OGRGeomType(db_field.geom_type),
'field_name': db_field.name,
'is_collection': is_collection,
'scrollable': self.scrollable,
'layerswitcher': self.layerswitcher,
'collection_type': collection_type,
'is_generic': db_field.geom_type == 'GEOMETRY',
'is_linestring': db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon': db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point': db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom': self.num_zoom,
'max_zoom': self.max_zoom,
'min_zoom': self.min_zoom,
'units': self.units, # likely should get from object
'max_resolution': self.max_resolution,
'max_extent': self.max_extent,
'modifiable': self.modifiable,
'mouse_position': self.mouse_position,
'scale_text': self.scale_text,
'map_width': self.map_width,
'map_height': self.map_height,
'point_zoom': self.point_zoom,
'srid': self.map_srid,
'display_srid': self.display_srid,
'wms_url': self.wms_url,
'wms_layer': self.wms_layer,
'wms_name': self.wms_name,
'wms_options': wms_options,
'debug': self.debug,
}
return OLMap
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
def __init__(self, *args):
if not HAS_GDAL:
raise ImproperlyConfigured("OSMGeoAdmin is not usable without GDAL libs installed")
super(OSMGeoAdmin, self).__init__(*args)
| bsd-3-clause |
graphite-project/graphite-web | webapp/tests/test_readers_whisper.py | 4 | 7083 | from .base import TestCase
import os
import mock
import shutil
import time
from django.conf import settings
import whisper
import gzip
from graphite.readers import WhisperReader, GzippedWhisperReader
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
class WhisperReadersTests(TestCase):
start_ts = 0
# Create/wipe test whisper files
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
worker1 = hostcpu.replace('hostname', 'worker1')
worker2 = hostcpu.replace('hostname', 'worker2')
worker3 = hostcpu.replace('hostname', 'worker3')
worker4 = hostcpu.replace('hostname', 'worker4')
worker4 = worker4.replace('cpu.wsp', 'cpu.wsp.gz')
def create_whisper_hosts(self):
self.start_ts = int(time.time())
try:
os.makedirs(self.worker1.replace('cpu.wsp', ''))
os.makedirs(self.worker2.replace('cpu.wsp', ''))
os.makedirs(self.worker3.replace('cpu.wsp', ''))
os.makedirs(self.worker4.replace('cpu.wsp.gz', ''))
except OSError:
pass
whisper.create(self.worker1, [(1, 60)])
whisper.create(self.worker2, [(1, 60)])
open(self.worker3, 'a').close()
whisper.update(self.worker1, 1, self.start_ts)
whisper.update(self.worker2, 2, self.start_ts)
with open(self.worker1, 'rb') as f_in, gzip.open(self.worker4, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def wipe_whisper_hosts(self):
try:
os.remove(self.worker1)
os.remove(self.worker2)
os.remove(self.worker3)
os.remove(self.worker4)
shutil.rmtree(os.path.join(settings.WHISPER_DIR, 'hosts'))
except OSError:
pass
#
# GzippedWHisper Reader tests
#
# Confirm the reader object is not none
def test_GzippedWhisperReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_GzippedWhisperReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
ts = int(time.time())
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start), ts - 60)
self.assertIn(int(interval.end), [ts, ts - 1])
# read it again to validate cache works
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start),ts - 60)
self.assertIn(int(interval.end), [ts, ts - 1])
# Confirm fetch works.
def test_GzippedWhisperReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
#
# WHisper Reader tests
#
# Confirm the reader object is not none
def test_WhisperReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_WhisperReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
ts = int(time.time())
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start),ts - 60)
self.assertIn(int(interval.end), [ts, ts - 1])
# read it again to validate cache works
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start),ts - 60)
self.assertIn(int(interval.end), [ts, ts - 1])
# Confirm get_raw_step works
def test_WhisperReader_get_raw_step(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
raw_step = reader.get_raw_step()
self.assertEqual(int(raw_step),1)
# read it again to validate cache works
raw_step = reader.get_raw_step()
self.assertEqual(int(raw_step),1)
# Confirm fetch works.
def test_WhisperReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
# Whisper Reader broken file
@mock.patch('whisper.fetch')
def test_WhisperReader_fetch_returns_no_data(self, whisper_fetch):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
whisper_fetch.return_value = None
self.assertEqual(reader.fetch(self.start_ts-5, self.start_ts), None)
# Whisper Reader broken file
def test_WhisperReader_broken_file(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
# Test broken whisper file
f = open(self.worker2, 'rb+')
f.seek(10)
f.write(b'Bad Data')
f.close()
reader = WhisperReader(self.worker2, 'hosts.worker2.cpu')
with self.assertRaises(Exception):
reader.fetch(self.start_ts-5, self.start_ts)
# Whisper Reader missing file
@mock.patch('graphite.logger.log.exception')
def test_WhisperReader_missing_file(self, log_exception):
path = 'missing/file.wsp'
reader = WhisperReader(path, 'hosts.worker2.cpu')
self.assertEqual(reader.fetch(self.start_ts-5, self.start_ts), None)
log_exception.assert_called_with("Failed fetch of whisper file '%s'" % path)
# Whisper Reader CarbonLink Query returns a dict
@mock.patch('graphite.carbonlink.CarbonLinkPool.query')
def test_WhisperReader_CarbonLinkQuery(self, carbonlink_query):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
carbonlink_query.return_value = {}
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
| apache-2.0 |
darmaa/odoo | addons/account/project/wizard/account_analytic_balance_report.py | 378 | 2136 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_balance(osv.osv_memory):
_name = 'account.analytic.balance'
_description = 'Account Analytic Balance'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'empty_acc': fields.boolean('Empty Accounts ? ', help='Check if you want to display Accounts with 0 balance too.'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.analytic.account',
'form': data
}
datas['form']['active_ids'] = context.get('active_ids', False)
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticbalance', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
suutari-ai/shoop | shuup/admin/error_handlers.py | 2 | 1379 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from django.shortcuts import render
from shuup.core.error_handling import ErrorPageHandler
class AdminPageErrorHandler(ErrorPageHandler):
"""
Page Error handler for Shuup Admin
"""
@classmethod
def can_handle_error(cls, request, error_status):
# we can't handle 404 errors, neither static or media files
# since 404 errors means no URL match,
# how can we figure out, in a elegant way if we are in the Admin?
if (error_status == 404 or request.path.startswith(settings.STATIC_URL) or
request.path.startswith(settings.MEDIA_URL)):
return False
# we are in a view which belongs to the Admin
elif request.resolver_match:
from shuup.admin import ShuupAdminAppConfig
return (request.resolver_match.app_name == ShuupAdminAppConfig.label)
return False
@classmethod
def handle_error(cls, request, error_status):
return render(request, "shuup/admin/errors/{}.jinja".format(error_status), status=error_status)
| agpl-3.0 |
fastly/sshuttle | Documentation/md2man.py | 20 | 7344 | #!/usr/bin/env python
import sys, os, markdown, re
from BeautifulSoup import BeautifulSoup
def _split_lines(s):
return re.findall(r'([^\n]*\n?)', s)
class Writer:
def __init__(self):
self.started = False
self.indent = 0
self.last_wrote = '\n'
def _write(self, s):
if s:
self.last_wrote = s
sys.stdout.write(s)
def writeln(self, s):
if s:
self.linebreak()
self._write('%s\n' % s)
def write(self, s):
if s:
self.para()
for line in _split_lines(s):
if line.startswith('.'):
self._write('\\&' + line)
else:
self._write(line)
def linebreak(self):
if not self.last_wrote.endswith('\n'):
self._write('\n')
def para(self, bullet=None):
if not self.started:
if not bullet:
bullet = ' '
if not self.indent:
self.writeln(_macro('.PP'))
else:
assert(self.indent >= 2)
prefix = ' '*(self.indent-2) + bullet + ' '
self.writeln('.IP "%s" %d' % (prefix, self.indent))
self.started = True
def end_para(self):
self.linebreak()
self.started = False
def start_bullet(self):
self.indent += 3
self.para(bullet='\\[bu]')
def end_bullet(self):
self.indent -= 3
self.end_para()
w = Writer()
def _macro(name, *args):
if not name.startswith('.'):
raise ValueError('macro names must start with "."')
fixargs = []
for i in args:
i = str(i)
i = i.replace('\\', '')
i = i.replace('"', "'")
if (' ' in i) or not i:
i = '"%s"' % i
fixargs.append(i)
return ' '.join([name] + list(fixargs))
def macro(name, *args):
w.writeln(_macro(name, *args))
def _force_string(owner, tag):
if tag.string:
return tag.string
else:
out = ''
for i in tag:
if not (i.string or i.name in ['a', 'br']):
raise ValueError('"%s" tags must contain only strings: '
'got %r: %r' % (owner.name, tag.name, tag))
out += _force_string(owner, i)
return out
def _clean(s):
s = s.replace('\\', '\\\\')
return s
def _bitlist(tag):
if getattr(tag, 'contents', None) == None:
for i in _split_lines(str(tag)):
yield None,_clean(i)
else:
for e in tag:
name = getattr(e, 'name', None)
if name in ['a', 'br']:
name = None # just treat as simple text
s = _force_string(tag, e)
if name:
yield name,_clean(s)
else:
for i in _split_lines(s):
yield None,_clean(i)
def _bitlist_simple(tag):
for typ,text in _bitlist(tag):
if typ and not typ in ['em', 'strong', 'code']:
raise ValueError('unexpected tag %r inside %r' % (typ, tag.name))
yield text
def _text(bitlist):
out = ''
for typ,text in bitlist:
if not typ:
out += text
elif typ == 'em':
out += '\\fI%s\\fR' % text
elif typ in ['strong', 'code']:
out += '\\fB%s\\fR' % text
else:
raise ValueError('unexpected tag %r inside %r' % (typ, tag.name))
out = out.strip()
out = re.sub(re.compile(r'^\s+', re.M), '', out)
return out
def text(tag):
w.write(_text(_bitlist(tag)))
# This is needed because .BI (and .BR, .RB, etc) are weird little state
# machines that alternate between two fonts. So if someone says something
# like foo<b>chicken</b><b>wicken</b>dicken we have to convert that to
# .BI foo chickenwicken dicken
def _boldline(l):
out = ['']
last_bold = False
for typ,text in l:
nonzero = not not typ
if nonzero != last_bold:
last_bold = not last_bold
out.append('')
out[-1] += re.sub(r'\s+', ' ', text)
macro('.BI', *out)
def do_definition(tag):
w.end_para()
macro('.TP')
w.started = True
split = 0
pre = []
post = []
for typ,text in _bitlist(tag):
if split:
post.append((typ,text))
elif text.lstrip().startswith(': '):
split = 1
post.append((typ,text.lstrip()[2:].lstrip()))
else:
pre.append((typ,text))
_boldline(pre)
w.write(_text(post))
def do_list(tag):
for i in tag:
name = getattr(i, 'name', '').lower()
if not name and not str(i).strip():
pass
elif name != 'li':
raise ValueError('only <li> is allowed inside <ul>: got %r' % i)
else:
w.start_bullet()
for xi in i:
do(xi)
w.end_para()
w.end_bullet()
def do(tag):
name = getattr(tag, 'name', '').lower()
if not name:
text(tag)
elif name == 'h1':
macro('.SH', _force_string(tag, tag).upper())
w.started = True
elif name == 'h2':
macro('.SS', _force_string(tag, tag))
w.started = True
elif name.startswith('h') and len(name)==2:
raise ValueError('%r invalid - man page headers must be h1 or h2'
% name)
elif name == 'pre':
t = _force_string(tag.code, tag.code)
if t.strip():
macro('.RS', '+4n')
macro('.nf')
w.write(_clean(t).rstrip())
macro('.fi')
macro('.RE')
w.end_para()
elif name == 'p' or name == 'br':
g = re.match(re.compile(r'([^\n]*)\n +: +(.*)', re.S), str(tag))
if g:
# it's a definition list (which some versions of python-markdown
# don't support, including the one in Debian-lenny, so we can't
# enable that markdown extension). Fake it up.
do_definition(tag)
else:
text(tag)
w.end_para()
elif name == 'ul':
do_list(tag)
else:
raise ValueError('non-man-compatible html tag %r' % name)
PROD='Untitled'
VENDOR='Vendor Name'
SECTION='9'
GROUPNAME='User Commands'
DATE=''
AUTHOR=''
lines = []
if len(sys.argv) > 1:
for n in sys.argv[1:]:
lines += open(n).read().decode('utf8').split('\n')
else:
lines += sys.stdin.read().decode('utf8').split('\n')
# parse pandoc-style document headers (not part of markdown)
g = re.match(r'^%\s+(.*?)\((.*?)\)\s+(.*)$', lines[0])
if g:
PROD = g.group(1)
SECTION = g.group(2)
VENDOR = g.group(3)
lines.pop(0)
g = re.match(r'^%\s+(.*?)$', lines[0])
if g:
AUTHOR = g.group(1)
lines.pop(0)
g = re.match(r'^%\s+(.*?)$', lines[0])
if g:
DATE = g.group(1)
lines.pop(0)
g = re.match(r'^%\s+(.*?)$', lines[0])
if g:
GROUPNAME = g.group(1)
lines.pop(0)
inp = '\n'.join(lines)
if AUTHOR:
inp += ('\n# AUTHOR\n\n%s\n' % AUTHOR).replace('<', '\\<')
html = markdown.markdown(inp)
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
macro('.TH', PROD.upper(), SECTION, DATE, VENDOR, GROUPNAME)
macro('.ad', 'l') # left justified
macro('.nh') # disable hyphenation
for e in soup:
do(e)
| lgpl-2.1 |
flgiordano/netcash | +/google-cloud-sdk/lib/googlecloudsdk/calliope/backend.py | 1 | 54356 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backend stuff for the calliope.cli module.
Not to be used by mortals.
"""
import argparse
import os
import re
import sys
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import display
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.calliope import usage_text
from googlecloudsdk.core import cli as core_cli
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import remote_completion
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import pkg_resources
class ArgumentException(Exception):
"""ArgumentException is for problems with the provided arguments."""
pass
class LayoutException(Exception):
"""LayoutException is for problems with module directory structure."""
pass
class CommandLoadFailure(Exception):
"""An exception for when a command or group module cannot be imported."""
def __init__(self, command, root_exception):
self.command = command
self.root_exception = root_exception
super(CommandLoadFailure, self).__init__(
'Problem loading {command}: {issue}.'.format(
command=command, issue=str(root_exception)))
class ArgumentParser(argparse.ArgumentParser):
"""A custom subclass for arg parsing behavior.
This overrides the default argparse parser. It only changes a few things,
mostly around the printing of usage error messages.
"""
def __init__(self, *args, **kwargs):
self._calliope_command = kwargs.pop('calliope_command')
self._flag_collection = kwargs.pop('flag_collection')
self._is_group = isinstance(self._calliope_command, CommandGroup)
super(ArgumentParser, self).__init__(*args, **kwargs)
# Assume we will never have a flag called ----calliope-internal...
CIDP = '__calliope_internal_deepest_parser'
def GetFlagCollection(self):
return self._flag_collection
def parse_known_args(self, args=None, namespace=None):
"""Override's argparse.ArgumentParser's .parse_known_args method."""
args, argv = super(ArgumentParser, self).parse_known_args(args, namespace)
# Pass back a reference to the deepest parser used in the parse
# as part of the returned args.
if not hasattr(args, self.CIDP):
setattr(args, self.CIDP, self)
return (args, argv)
def parse_args(self, args=None, namespace=None):
"""Override's argparse.ArgumentParser's .parse_args method."""
args, argv = self.parse_known_args(args, namespace)
if not argv:
return args
if hasattr(args, 'implementation_args'):
# Workaround for argparse total botch of posix '--'. An
# 'implementation_args' positional signals that the current command
# expects 0 or more positionals which may be separated from the explicit
# flags and positionals by '--'. The first '--' is consumed here. The
# extra positionals, if any, are stuffed in args.implementation_args.
# This is still not 100% correct. Incredibly, argparse recognizes the
# leftmost '--' and does not consume it, and in addition recognizes and
# consumes all subsequent '--' args. This is exactly opposite of the
# POSIX spec. We would have to intercept more argparse innards here to get
# that right, and then take about 100 showers afterwards. It wouldn't be
# worth doing that unless someone files a bug. One scenario where it
# might pop up is using ssh to run a command that also needs '--' to work:
# gcloud compute ssh my-instance -- some-ssh-like-command -- args
# Currently the second '--' will not be seen by some-ssh-like-command.
start = 1 if argv[0] == '--' else 0
args.implementation_args = argv[start:]
return args
# Content of these lines differs from argparser's parse_args().
deepest_parser = getattr(args, self.CIDP, self)
# Add a message for each unknown argument. For each, try to come up with
# a suggestion based on text distance. If one is close enough, print a
# 'did you mean' message along with that argument.
messages = []
suggester = usage_text.CommandChoiceSuggester()
# pylint:disable=protected-access, This is an instance of this class.
for flag in deepest_parser._calliope_command.GetAllAvailableFlags():
options = flag.option_strings
if options:
# This is a flag, add all its names as choices.
suggester.AddChoices(options)
# Add any aliases as choices as well, but suggest the primary name.
aliases = getattr(flag, 'suggestion_aliases', None)
if aliases:
suggester.AddAliases(aliases, options[0])
for arg in argv:
# Only do this for flag names.
suggestion = suggester.GetSuggestion(arg) if arg.startswith('-') else None
if suggestion:
messages.append(arg + " (did you mean '{0}'?)".format(suggestion))
else:
messages.append(arg)
# If there is a single arg, put it on the same line. If there are multiple
# add each on it's own line for better clarity.
separator = '\n ' if len(messages) > 1 else ' '
deepest_parser.error('unrecognized arguments:{0}{1}'.format(
separator, separator.join(messages)))
def _check_value(self, action, value):
"""Override's argparse.ArgumentParser's ._check_value(action, value) method.
Args:
action: argparse.Action, The action being checked against this value.
value: The command line argument provided that needs to correspond to this
action.
Raises:
argparse.ArgumentError: If the action and value don't work together.
"""
is_subparser = isinstance(action, CloudSDKSubParsersAction)
# When using tab completion, argcomplete monkey patches various parts of
# argparse and interferes with the normal argument parsing flow. Here, we
# need to set self._orig_class because argcomplete compares this
# directly to argparse._SubParsersAction to see if it should recursively
# patch this parser. It should really check to see if it is a subclass
# but alas, it does not. If we don't set this, argcomplete will not patch,
# our subparser and completions below this point wont work. Normally we
# would just set this in action.IsValidChoice() but sometimes this
# sub-element has already been loaded and is already in action.choices. In
# either case, we still need argcomplete to patch this subparser so it
# can compute completions below this point.
if is_subparser and '_ARGCOMPLETE' in os.environ:
# pylint:disable=protected-access, Required by argcomplete.
action._orig_class = argparse._SubParsersAction
# This is copied from this method in argparse's version of this method.
if action.choices is None or value in action.choices:
return
# We add this to check if we can lazy load the element.
if is_subparser and action.IsValidChoice(value):
return
# Not something we know, raise an error.
# pylint:disable=protected-access
cli_generator = self._calliope_command._cli_generator
missing_components = cli_generator.ComponentsForMissingCommand(
self._calliope_command.GetPath() + [value])
if missing_components:
msg = ('You do not currently have this command group installed. Using '
'it requires the installation of components: '
'[{missing_components}]'.format(
missing_components=', '.join(missing_components)))
update_manager.UpdateManager.EnsureInstalledAndRestart(
missing_components, msg=msg)
if is_subparser:
# We are going to show the usage anyway, which requires loading
# everything. Do this here so that choices gets populated.
self._calliope_command.LoadAllSubElements()
# Command is not valid, see what we can suggest as a fix...
message = "Invalid choice: '{0}'.".format(value)
# Determine if the requested command is available in another release track.
existing_alternatives = self._ExistingAlternativeReleaseTracks(value)
if existing_alternatives:
message += ('\nThis command is available in one or more alternate '
'release tracks. Try:\n ')
message += '\n '.join(existing_alternatives)
# See if the spelling was close to something else that exists here.
else:
choices = sorted(action.choices)
suggester = usage_text.CommandChoiceSuggester(choices)
suggester.AddSynonyms()
suggestion = suggester.GetSuggestion(value)
if suggestion:
message += " Did you mean '{0}'?".format(suggestion)
else:
message += '\n\nValid choices are [{0}].'.format(', '.join(choices))
raise argparse.ArgumentError(action, message)
def _ExistingAlternativeReleaseTracks(self, value):
"""Gets the path of alternatives for the command in other release tracks.
Args:
value: str, The value being parsed.
Returns:
[str]: The names of alternate commands that the user may have meant.
"""
existing_alternatives = []
# Get possible alternatives.
# pylint:disable=protected-access
cli_generator = self._calliope_command._cli_generator
alternates = cli_generator.ReplicateCommandPathForAllOtherTracks(
self._calliope_command.GetPath() + [value])
# See if the command is actually enabled in any of those alternative tracks.
if alternates:
top_element = self._calliope_command._TopCLIElement()
# Sort by the release track prefix.
for _, command_path in sorted(alternates.iteritems(),
key=lambda x: x[0].prefix):
if top_element.IsValidSubPath(command_path[1:]):
existing_alternatives.append(' '.join(command_path))
return existing_alternatives
def error(self, message):
"""Override's argparse.ArgumentParser's .error(message) method.
Specifically, it avoids reprinting the program name and the string "error:".
Args:
message: str, The error message to print.
"""
if self._is_group:
shorthelp = usage_text.ShortHelpText(
self._calliope_command, self._calliope_command.ai)
# pylint:disable=protected-access
argparse._sys.stderr.write(shorthelp + '\n')
else:
self.usage = usage_text.GenerateUsage(
self._calliope_command, self._calliope_command.ai)
# pylint:disable=protected-access
self.print_usage(argparse._sys.stderr)
log.error('({prog}) {message}'.format(prog=self.prog, message=message))
self.exit(2)
def _parse_optional(self, arg_string):
"""Override's argparse.ArgumentParser's ._parse_optional method.
This allows the parser to have leading flags included in the grabbed
arguments and stored in the namespace.
Args:
arg_string: str, The argument string.
Returns:
The normal return value of argparse.ArgumentParser._parse_optional.
"""
positional_actions = self._get_positional_actions()
option_tuple = super(ArgumentParser, self)._parse_optional(arg_string)
# If parse_optional finds an action for this arg_string, use that option.
# Note: option_tuple = (action, option_string, explicit_arg) or None
known_option = option_tuple and option_tuple[0]
if (len(positional_actions) == 1 and
positional_actions[0].nargs == argparse.REMAINDER and
not known_option):
return None
return option_tuple
def _get_values(self, action, arg_strings):
"""Override's argparse.ArgumentParser's ._get_values method.
This override does not actually change any behavior. We use this hook to
grab the flags and arguments that are actually seen at parse time. The
resulting namespace has entries for every argument (some with defaults) so
we can't know which the user actually typed.
Args:
action: Action, the action that is being processed.
arg_strings: [str], The values provided for this action.
Returns:
Whatever the parent method returns.
"""
if action.dest != argparse.SUPPRESS:
# Don't look at the action unless it is a real argument or flag. The
# suppressed destination indicates that it is a SubParsers action.
name = None
if action.option_strings:
# This is a flag, save the first declared name of the flag.
name = action.option_strings[0]
elif arg_strings:
# This is a positional and there are arguments to consume. Optional
# positionals will always get to this method, so we need to ignore the
# ones for which a value was not actually provided. If it is provided,
# save the metavar name or the destination name.
name = action.metavar if action.metavar else action.dest
if name:
self._flag_collection.append(name)
return super(ArgumentParser, self)._get_values(action, arg_strings)
# pylint:disable=protected-access
class CloudSDKSubParsersAction(argparse._SubParsersAction):
"""A custom subclass for arg parsing behavior.
While the above ArgumentParser overrides behavior for parsing the flags
associated with a specific group or command, this class overrides behavior
for loading those sub parsers. We use this to intercept the parsing right
before it needs to start parsing args for sub groups and we then load the
specific sub group it needs.
"""
def __init__(self, *args, **kwargs):
self._calliope_command = kwargs.pop('calliope_command')
self._flag_collection = kwargs.pop('flag_collection')
super(CloudSDKSubParsersAction, self).__init__(*args, **kwargs)
def add_parser(self, name, **kwargs):
# Pass the same flag collection down to any sub parsers that are created.
kwargs['flag_collection'] = self._flag_collection
return super(CloudSDKSubParsersAction, self).add_parser(name, **kwargs)
def IsValidChoice(self, choice):
"""Determines if the given arg is a valid sub group or command.
Args:
choice: str, The name of the sub element to check.
Returns:
bool, True if the given item is a valid sub element, False otherwise.
"""
# When using tab completion, argcomplete monkey patches various parts of
# argparse and interferes with the normal argument parsing flow. Usually
# it is sufficient to check if the given choice is valid here, but delay
# the loading until __call__ is invoked later during the parsing process.
# During completion time, argcomplete tries to patch the subparser before
# __call__ is called, so nothing has been loaded yet. We need to force
# load things here so that there will be something loaded for it to patch.
if '_ARGCOMPLETE' in os.environ:
self._calliope_command.LoadSubElement(choice)
return self._calliope_command.IsValidSubElement(choice)
def __call__(self, parser, namespace, values, option_string=None):
# This is the name of the arg that is the sub element that needs to be
# loaded.
parser_name = values[0]
# Load that element if it's there. If it's not valid, nothing will be
# loaded and normal error handling will take over.
if self._calliope_command:
self._calliope_command.LoadSubElement(parser_name)
super(CloudSDKSubParsersAction, self).__call__(
parser, namespace, values, option_string=option_string)
class ArgumentInterceptor(object):
"""ArgumentInterceptor intercepts calls to argparse parsers.
The argparse module provides no public way to access a complete list of
all arguments, and we need to know these so we can do validation of arguments
when this library is used in the python interpreter mode. Argparse itself does
the validation when it is run from the command line.
Attributes:
parser: argparse.Parser, The parser whose methods are being intercepted.
allow_positional: bool, Whether or not to allow positional arguments.
defaults: {str:obj}, A dict of {dest: default} for all the arguments added.
required: [str], A list of the dests for all required arguments.
dests: [str], A list of the dests for all arguments.
positional_args: [argparse.Action], A list of the positional arguments.
flag_args: [argparse.Action], A list of the flag arguments.
Raises:
ArgumentException: if a positional argument is made when allow_positional
is false.
"""
class ParserData(object):
def __init__(self, command_name):
self.command_name = command_name
self.defaults = {}
self.required = []
self.dests = []
self.mutex_groups = {}
self.positional_args = []
self.flag_args = []
self.ancestor_flag_args = []
def __init__(self, parser, is_root, cli_generator, allow_positional,
data=None, mutex_group_id=None):
self.parser = parser
self.is_root = is_root
self.cli_generator = cli_generator
self.allow_positional = allow_positional
# If this is an argument group within a command, use the data from the
# parser for the entire command. If it is the command itself, create a new
# data object and extract the command name from the parser.
if data:
self.data = data
else:
self.data = ArgumentInterceptor.ParserData(
command_name=self.parser._calliope_command.GetPath())
self.mutex_group_id = mutex_group_id
@property
def defaults(self):
return self.data.defaults
@property
def required(self):
return self.data.required
@property
def dests(self):
return self.data.dests
@property
def mutex_groups(self):
return self.data.mutex_groups
@property
def positional_args(self):
return self.data.positional_args
@property
def flag_args(self):
return self.data.flag_args
@property
def ancestor_flag_args(self):
return self.data.ancestor_flag_args
# pylint: disable=g-bad-name
def add_argument(self, *args, **kwargs):
"""add_argument intercepts calls to the parser to track arguments."""
# TODO(user): do not allow short-options without long-options.
# we will choose the first option as the name
name = args[0]
dest = kwargs.get('dest')
if not dest:
# this is exactly what happens in argparse
dest = name.lstrip(self.parser.prefix_chars).replace('-', '_')
default = kwargs.get('default')
required = kwargs.get('required')
# A flag that can only be supplied where it is defined and not propagated to
# subcommands.
do_not_propagate = kwargs.pop('do_not_propagate', False)
# A global flag that is added at each level explicitly because each command
# has a different behavior (like -h).
is_replicated = kwargs.pop('is_replicated', False)
# This is used for help printing. A flag is considered global if it is
# added at the root of the CLI tree, or if it is explicitly added to every
# command level.
is_global = self.is_root or is_replicated
# True if this should be marked as a commonly used flag.
is_common = kwargs.pop('is_common', False)
# Any alias this flag has for the purposes of the "did you mean"
# suggestions.
suggestion_aliases = kwargs.pop('suggestion_aliases', [])
# The resource name for the purposes of doing remote completion.
completion_resource = kwargs.pop('completion_resource', None)
# An explicit command to run for remote completion instead of the default
# for this resource type.
list_command_path = kwargs.pop('list_command_path', None)
positional = not name.startswith('-')
if positional:
if not self.allow_positional:
# TODO(user): More informative error message here about which group
# the problem is in.
raise ArgumentException(
'Illegal positional argument [{0}] for command [{1}]'.format(
name, self.data.command_name))
if '-' in name:
raise ArgumentException(
"Positional arguments cannot contain a '-'. Illegal argument [{0}] "
'for command [{1}]'.format(name, self.data.command_name))
if is_common:
raise ArgumentException(
'Positional argument [{0}] cannot be marked as a common flag in '
'command [{1}]'.format(name, self.data.command_name))
if suggestion_aliases:
raise ArgumentException(
'Positional argument [{0}] cannot have suggestion aliases in '
'command [{1}]'.format(name, self.data.command_name))
self.defaults[dest] = default
if required:
self.required.append(dest)
self.dests.append(dest)
if self.mutex_group_id:
self.mutex_groups[dest] = self.mutex_group_id
if positional and 'metavar' not in kwargs:
kwargs['metavar'] = name.upper()
added_argument = self.parser.add_argument(*args, **kwargs)
self._AddRemoteCompleter(added_argument, completion_resource,
list_command_path)
if positional:
self.positional_args.append(added_argument)
else:
added_argument.do_not_propagate = do_not_propagate
added_argument.is_replicated = is_replicated
added_argument.is_global = is_global
added_argument.is_common = is_common
added_argument.suggestion_aliases = suggestion_aliases
self.flag_args.append(added_argument)
inverted_flag = self._AddInvertedBooleanFlagIfNecessary(
added_argument, name, dest, kwargs)
if inverted_flag:
inverted_flag.do_not_propagate = do_not_propagate
inverted_flag.is_replicated = is_replicated
inverted_flag.is_global = is_global
inverted_flag.is_common = is_common
# Don't add suggestion aliases for the inverted flag. It can only map
# to one or the other.
self.flag_args.append(inverted_flag)
return added_argument
# pylint: disable=redefined-builtin
def register(self, registry_name, value, object):
return self.parser.register(registry_name, value, object)
def set_defaults(self, **kwargs):
return self.parser.set_defaults(**kwargs)
def get_default(self, dest):
return self.parser.get_default(dest)
def add_argument_group(self, *args, **kwargs):
new_parser = self.parser.add_argument_group(*args, **kwargs)
return ArgumentInterceptor(parser=new_parser,
is_root=self.is_root,
cli_generator=self.cli_generator,
allow_positional=self.allow_positional,
data=self.data)
def add_mutually_exclusive_group(self, **kwargs):
new_parser = self.parser.add_mutually_exclusive_group(**kwargs)
return ArgumentInterceptor(parser=new_parser,
is_root=self.is_root,
cli_generator=self.cli_generator,
allow_positional=self.allow_positional,
data=self.data,
mutex_group_id=id(new_parser))
def AddFlagActionFromAncestors(self, action):
"""Add a flag action to this parser, but segregate it from the others.
Segregating the action allows automatically generated help text to ignore
this flag.
Args:
action: argparse.Action, The action for the flag being added.
"""
# pylint:disable=protected-access, simply no other way to do this.
self.parser._add_action(action)
# explicitly do this second, in case ._add_action() fails.
self.data.ancestor_flag_args.append(action)
def _AddInvertedBooleanFlagIfNecessary(self, added_argument, name, dest,
original_kwargs):
"""Determines whether to create the --no-* flag and adds it to the parser.
Args:
added_argument: The argparse argument that was previously created.
name: str, The name of the flag.
dest: str, The dest field of the flag.
original_kwargs: {str: object}, The original set of kwargs passed to the
ArgumentInterceptor.
Returns:
The new argument that was added to the parser or None, if it was not
necessary to create a new argument.
"""
action = original_kwargs.get('action')
# There are a few legitimate explicit --no-foo flags.
should_invert, prop = self._ShouldInvertBooleanFlag(name, action)
if not should_invert:
return
default = original_kwargs.get('default', False)
help_str = original_kwargs.get('help')
# Add hidden --no-foo for the --foo Boolean flag. The inverted flag will
# have the same dest and mutually exclusive group as the original flag.
inverted_name = '--no-' + name[2:]
# Explicit default=None yields the 'Use to disable.' text.
if prop or (default in (True, None) and help_str != argparse.SUPPRESS):
if prop:
inverted_help = (' Overrides the default *{0}* property value'
' for this command invocation. Use *{1}* to'
' disable.'.format(prop.name, inverted_name))
elif default:
inverted_help = ' Enabled by default, use *{0}* to disable.'.format(
inverted_name)
else:
inverted_help = ' Use *{0}* to disable.'.format(inverted_name)
# calliope.markdown.MarkdownGenerator._Details() checks and appends
# arg.inverted_help to the detailed help markdown. We can't do that
# here because detailed_help may not have been set yet.
setattr(added_argument, 'inverted_help', inverted_help)
kwargs = dict(original_kwargs)
if action == 'store_true':
action = 'store_false'
elif action == 'store_false':
action = 'store_true'
kwargs['action'] = action
if not kwargs.get('dest'):
kwargs['dest'] = dest
kwargs['help'] = argparse.SUPPRESS
return self.parser.add_argument(inverted_name, **kwargs)
def _ShouldInvertBooleanFlag(self, name, action):
"""Checks if flag name with action is a Boolean flag to invert.
Args:
name: str, The flag name.
action: argparse.Action, The argparse action.
Returns:
(False, None) if flag is not a Boolean flag or should not be inverted,
(True, property) if flag is a Boolean flag associated with a property,
otherwise (True, None) if flag is a pure Boolean flag.
"""
if not name.startswith('--'):
return False, None
if name.startswith('--no-'):
# --no-no-* is a no no.
return False, None
if '--no-' + name[2:] in self.parser._option_string_actions:
# Don't override explicit --no-* inverted flag.
return False, None
if action in ('store_true', 'store_false'):
return True, None
prop = getattr(action, 'boolean_property', None)
if prop:
return True, prop
# Not a Boolean flag.
return False, None
def _AddRemoteCompleter(self, added_argument, completion_resource,
list_command_path):
"""Adds a remote completer to the given argument if necessary.
Args:
added_argument: The argparse argument that was previously created.
completion_resource: str, The name of the resource that this argument
corresponds to.
list_command_path: str, The explicit calliope command to run to get the
completions if you want to override the default for the given resource
type.
"""
if not completion_resource:
return
if not list_command_path:
list_command_path = completion_resource
# alpha and beta commands need to specify list_command_path
if (list_command_path.startswith('alpha') or
list_command_path.startswith('beta')):
# if list_command_path not specified don't add the completer
completion_resource = None
else:
list_command_path = self._LowerCaseWithDashes(completion_resource)
if completion_resource:
# add a remote completer
added_argument.completer = (
remote_completion.RemoteCompletion.GetCompleterForResource(
completion_resource,
self.cli_generator.Generate,
command_line=list_command_path))
added_argument.completion_resource = completion_resource
def _LowerCaseWithDashes(self, name):
# Uses two passes to handle all-upper initialisms, such as fooBARBaz
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', name)
s2 = re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1).lower()
return s2
class ConfigHooks(object):
"""This class holds function hooks for context and config loading/saving."""
def __init__(self, load_context=None, context_filters=None):
"""Create a new object with the given hooks.
Args:
load_context: a function returns the context to be sent to commands.
context_filters: a list of functions that take (contex, args),
that will be called in order before a command is run. They are
described in the README under the heading GROUP SPECIFICATION.
"""
self.load_context = load_context if load_context else lambda: {}
self.context_filters = context_filters if context_filters else []
def OverrideWithBase(self, group_base):
"""Get a new ConfigHooks object with overridden functions based on module.
If module defines any of the function, they will be used instead of what
is in this object. Anything that is not defined will use the existing
behavior.
Args:
group_base: The base.Group class corresponding to the group.
Returns:
A new ConfigHooks object updated with any newly found hooks
"""
def ContextFilter(context, http_func, args):
group = group_base(http_func)
group.Filter(context, args)
return group
# We want the new_context_filters to be a completely new list, if there is
# a change.
new_context_filters = self.context_filters + [ContextFilter]
return ConfigHooks(load_context=self.load_context,
context_filters=new_context_filters)
class CommandCommon(object):
"""A base class for CommandGroup and Command.
It is responsible for extracting arguments from the modules and does argument
validation, since this is always the same for groups and commands.
"""
def __init__(self, common_type, path, release_track, cli_generator,
config_hooks, parser_group, allow_positional_args, parent_group):
"""Create a new CommandCommon.
Args:
common_type: base._Command, The actual loaded user written command or
group class.
path: [str], Similar to module_path, but is the path to this command or
group with respect to the CLI itself. This path should be used for
things like error reporting when a specific element in the tree needs
to be referenced.
release_track: base.ReleaseTrack, The release track (ga, beta, alpha) that
this command group is in. This will apply to all commands under it.
cli_generator: cli.CLILoader, The builder used to generate this CLI.
config_hooks: a ConfigHooks object to use for loading context.
parser_group: argparse.Parser, The parser that this command or group will
live in.
allow_positional_args: bool, True if this command can have positional
arguments.
parent_group: CommandGroup, The parent of this command or group. None if
at the root.
"""
self._config_hooks = config_hooks
self._parent_group = parent_group
self.name = path[-1]
# For the purposes of argparse and the help, we should use dashes.
self.cli_name = self.name.replace('_', '-')
log.debug('Loaded Command Group: %s', path)
path[-1] = self.cli_name
self._path = path
self.dotted_name = '.'.join(path)
self._cli_generator = cli_generator
self._common_type = common_type
self._common_type._cli_generator = cli_generator
self._common_type._release_track = release_track
if parent_group:
# Propagate down the hidden attribute.
if parent_group.IsHidden():
self._common_type._is_hidden = True
# TODO(user): This is going to go away once we remove the explicit
# Alpha and Beta decorators for commands. Once the commands show up
# under the correct track, the help will use the regular release track
# for annotations (b/19406151).
legacy_release_track = parent_group._common_type._legacy_release_track
if legacy_release_track and not self._common_type._legacy_release_track:
self._common_type._legacy_release_track = legacy_release_track
self.detailed_help = getattr(self._common_type, 'detailed_help', {})
self._ExtractHelpStrings(self._common_type.__doc__)
self._AssignParser(
parser_group=parser_group,
allow_positional_args=allow_positional_args)
def ReleaseTrack(self, for_help=False):
"""Gets the release track of this command or group."""
return self._common_type.ReleaseTrack(for_help=for_help)
def IsHidden(self):
"""Gets the hidden status of this command or group."""
return self._common_type.IsHidden()
def IsRoot(self):
"""Returns True if this is the root element in the CLI tree."""
return not self._parent_group
def _TopCLIElement(self):
"""Gets the top group of this CLI."""
if self.IsRoot():
return self
return self._parent_group._TopCLIElement()
def _ExtractHelpStrings(self, docstring):
"""Extracts short help, long help and man page index from a docstring.
Sets self.short_help, self.long_help and self.index_help and adds release
track tags if needed.
Args:
docstring: The docstring from which short and long help are to be taken
"""
self.short_help, self.long_help = usage_text.ExtractHelpStrings(docstring)
if 'brief' in self.detailed_help:
self.short_help = self.detailed_help['brief']
if self.short_help and not self.short_help.endswith('.'):
self.short_help += '.'
self.index_help = self.short_help
if len(self.index_help) > 1:
if self.index_help[0].isupper() and not self.index_help[1].isupper():
self.index_help = self.index_help[0].lower() + self.index_help[1:]
if self.index_help[-1] == '.':
self.index_help = self.index_help[:-1]
# Add an annotation to the help strings to mark the release stage.
tag = self.ReleaseTrack(for_help=True).help_tag
if tag:
self.short_help = tag + self.short_help
self.long_help = tag + self.long_help
# TODO(user):b/21208128: Drop these 4 lines.
prefix = self.ReleaseTrack(for_help=True).prefix
if len(self._path) < 2 or self._path[1] != prefix:
self.index_help = tag + self.index_help
def _AssignParser(self, parser_group, allow_positional_args):
"""Assign a parser group to model this Command or CommandGroup.
Args:
parser_group: argparse._ArgumentGroup, the group that will model this
command or group's arguments.
allow_positional_args: bool, Whether to allow positional args for this
group or not.
"""
if not parser_group:
# This is the root of the command tree, so we create the first parser.
self._parser = ArgumentParser(
description=self.long_help,
add_help=False,
prog=self.dotted_name,
calliope_command=self,
flag_collection=[])
else:
# This is a normal sub group, so just add a new subparser to the existing
# one.
self._parser = parser_group.add_parser(
self.cli_name,
help=self.short_help,
description=self.long_help,
add_help=False,
prog=self.dotted_name,
calliope_command=self)
self._sub_parser = None
self.ai = ArgumentInterceptor(
parser=self._parser,
is_root=not parser_group,
cli_generator=self._cli_generator,
allow_positional=allow_positional_args)
self.ai.add_argument(
'-h', action=actions.ShortHelpAction(self),
is_replicated=True,
is_common=True,
help='Print a summary help and exit.')
self.ai.add_argument(
'--help', action=actions.RenderDocumentAction(self, '--help'),
is_replicated=True,
is_common=True,
help='Display detailed help.')
self.ai.add_argument(
'--document', action=actions.RenderDocumentAction(self),
is_replicated=True,
nargs=1,
metavar='ATTRIBUTES',
type=arg_parsers.ArgDict(),
help=argparse.SUPPRESS)
self._AcquireArgs()
def IsValidSubPath(self, command_path):
"""Determines if the given sub command path is valid from this node.
Args:
command_path: [str], The pieces of the command path.
Returns:
True, if the given path parts exist under this command or group node.
False, if the sub path does not lead to a valid command or group.
"""
current = self
for part in command_path:
current = current.LoadSubElement(part)
if not current:
return False
return True
def AllSubElements(self):
"""Gets all the sub elements of this group.
Returns:
set(str), The names of all sub groups or commands under this group.
"""
return []
def LoadAllSubElements(self, recursive=False):
"""Load all the sub groups and commands of this group."""
pass
def LoadSubElement(self, name, allow_empty=False):
"""Load a specific sub group or command.
Args:
name: str, The name of the element to load.
allow_empty: bool, True to allow creating this group as empty to start
with.
Returns:
_CommandCommon, The loaded sub element, or None if it did not exist.
"""
pass
def GetPath(self):
return self._path
def GetShortHelp(self):
return usage_text.ShortHelpText(self, self.ai)
def GetSubCommandHelps(self):
return {}
def GetSubGroupHelps(self):
return {}
def _GetModuleFromPath(self, module_dir, module_path, path, construction_id):
"""Import the module and dig into it to return the namespace we are after.
Import the module relative to the top level directory. Then return the
actual module corresponding to the last bit of the path.
Args:
module_dir: str, The path to the tools directory that this command or
group lives within.
module_path: [str], The command group names that brought us down to this
command group or command from the top module directory.
path: [str], The same as module_path but with the groups named as they
will be in the CLI.
construction_id: str, A unique identifier for the CLILoader that is
being constructed.
Returns:
The imported module.
"""
# Make sure this module name never collides with any real module name.
# Use the CLI naming path, so values are always unique.
name_to_give = '__calliope__command__.{construction_id}.{name}'.format(
construction_id=construction_id,
name='.'.join(path).replace('-', '_'))
try:
return pkg_resources.GetModuleFromPath(
name_to_give, os.path.join(module_dir, *module_path))
# pylint:disable=broad-except, We really do want to catch everything here,
# because if any exceptions make it through for any single command or group
# file, the whole CLI will not work. Instead, just log whatever it is.
except Exception as e:
_, _, exc_traceback = sys.exc_info()
raise CommandLoadFailure('.'.join(path), e), None, exc_traceback
def _AcquireArgs(self):
"""Calls the functions to register the arguments for this module."""
# A Command subclass can define a _Flags() method.
self._common_type._Flags(self.ai)
# A command implementation can optionally define an Args() method.
self._common_type.Args(self.ai)
if self._parent_group:
# Add parent flags to children, if they aren't represented already
for flag in self._parent_group.GetAllAvailableFlags():
if flag.is_replicated:
# Each command or group gets its own unique help flags.
continue
if flag.do_not_propagate:
# Don't propagate down flags that only apply to the group but not to
# subcommands.
continue
if flag.required:
# It is not easy to replicate required flags to subgroups and
# subcommands, since then there would be two+ identical required
# flags, and we'd want only one of them to be necessary.
continue
try:
self.ai.AddFlagActionFromAncestors(flag)
except argparse.ArgumentError:
raise ArgumentException(
'repeated flag in {command}: {flag}'.format(
command=self.dotted_name,
flag=flag.option_strings))
def GetAllAvailableFlags(self):
return self.ai.flag_args + self.ai.ancestor_flag_args
def GetSpecificFlags(self):
return self.ai.flag_args
class CommandGroup(CommandCommon):
"""A class to encapsulate a group of commands."""
def __init__(self, module_dir, module_path, path, release_track,
construction_id, cli_generator, parser_group, config_hooks,
parent_group=None, allow_empty=False):
"""Create a new command group.
Args:
module_dir: always the root of the whole command tree
module_path: a list of command group names that brought us down to this
command group from the top module directory
path: similar to module_path, but is the path to this command group
with respect to the CLI itself. This path should be used for things
like error reporting when a specific element in the tree needs to be
referenced
release_track: base.ReleaseTrack, The release track (ga, beta, alpha) that
this command group is in. This will apply to all commands under it.
construction_id: str, A unique identifier for the CLILoader that is
being constructed.
cli_generator: cli.CLILoader, The builder used to generate this CLI.
parser_group: the current argparse parser, or None if this is the root
command group. The root command group will allocate the initial
top level argparse parser.
config_hooks: a ConfigHooks object to use for loading context
parent_group: CommandGroup, The parent of this group. None if at the
root.
allow_empty: bool, True to allow creating this group as empty to start
with.
Raises:
LayoutException: if the module has no sub groups or commands
"""
# pylint:disable=protected-access, The base module is effectively an
# extension of calliope, and we want to leave _Common private so people
# don't extend it directly.
common_type = base._Common.FromModule(
self._GetModuleFromPath(module_dir, module_path, path, construction_id),
release_track,
is_command=False)
super(CommandGroup, self).__init__(
common_type,
path=path,
release_track=release_track,
cli_generator=cli_generator,
config_hooks=config_hooks,
allow_positional_args=False,
parser_group=parser_group,
parent_group=parent_group)
self._module_dir = module_dir
self._module_path = module_path
self._construction_id = construction_id
self._config_hooks = self._config_hooks.OverrideWithBase(self._common_type)
# find sub groups and commands
self.groups = {}
self.commands = {}
self._groups_to_load = {}
self._commands_to_load = {}
self._unloadable_elements = set()
self._FindSubElements()
if (not allow_empty and
not self._groups_to_load and not self._commands_to_load):
raise LayoutException('Group %s has no subgroups or commands'
% self.dotted_name)
# Initialize the sub-parser so sub groups can be found.
self.SubParser()
def _FindSubElements(self):
"""Final all the sub groups and commands under this group.
Raises:
LayoutException: if there is a command or group with an illegal name.
"""
location = os.path.join(self._module_dir, *self._module_path)
groups, commands = pkg_resources.ListPackage(location)
for collection in [groups, commands]:
for name in collection:
if re.search('[A-Z]', name):
raise LayoutException('Commands and groups cannot have capital '
'letters: %s.' % name)
for group_info in self._GetSubPathForNames(groups):
self.AddSubGroup(group_info)
for command_info in self._GetSubPathForNames(commands):
self.AddSubCommand(command_info)
def _GetSubPathForNames(self, names):
"""Gets a list of (module path, path) for the sub names.
Args:
names: [str], The names of the sub groups or commands the paths are for.
Returns:
A list of tuples of (module_dir, module_path, name, release_track) for the
given names. These terms are that as used by the constructor of
CommandGroup and Command.
"""
return [(self._module_dir, self._module_path + [name], name,
self.ReleaseTrack(for_help=False))
for name in names]
def AddSubGroup(self, group_info):
"""Merges another command group under this one.
If we load command groups for alternate locations, this method is used to
make those extra sub groups fall under this main group in the CLI.
Args:
group_info: A tuple of (module_dir, module_path, name, release_track).
The arguments used by the LoadSubElement() method for lazy loading this
group.
"""
name = group_info[2]
self._groups_to_load[name] = group_info
def AddSubCommand(self, command_info):
"""Merges another command group under this one.
If we load commands for alternate locations, this method is used to
make those extra sub commands fall under this main group in the CLI.
Args:
command_info: A tuple of (module_dir, module_path, name, release_track).
The arguments used by the LoadSubElement() method for lazy loading this
command.
"""
name = command_info[2]
self._commands_to_load[name] = command_info
def CopyAllSubElementsTo(self, other_group, ignore):
"""Copies all the sub groups and commands from this group to the other.
Args:
other_group: CommandGroup, The other group to populate.
ignore: set(str), Names of elements not to copy.
"""
collections_to_update = [
(self._groups_to_load, other_group._groups_to_load),
(self._commands_to_load, other_group._commands_to_load)]
for src, dst in collections_to_update:
for name, info in src.iteritems():
if name in ignore:
continue
(module_dir, module_path, name, unused_track) = info
dst[name] = (module_dir, module_path, name,
other_group.ReleaseTrack(for_help=False))
def SubParser(self):
"""Gets or creates the argparse sub parser for this group.
Returns:
The argparse subparser that children of this group should register with.
If a sub parser has not been allocated, it is created now.
"""
if not self._sub_parser:
self._sub_parser = self._parser.add_subparsers(
action=CloudSDKSubParsersAction, calliope_command=self,
flag_collection=self._parser._flag_collection)
return self._sub_parser
def AllSubElements(self):
"""Gets all the sub elements of this group.
Returns:
set(str), The names of all sub groups or commands under this group.
"""
return (set(self._groups_to_load.keys()) |
set(self._commands_to_load.keys()))
def IsValidSubElement(self, name):
"""Determines if the given name is a valid sub group or command.
Args:
name: str, The name of the possible sub element.
Returns:
bool, True if the name is a valid sub element of this group.
"""
return bool(self.LoadSubElement(name))
def LoadAllSubElements(self, recursive=False):
"""Load all the sub groups and commands of this group."""
for name in self.AllSubElements():
element = self.LoadSubElement(name)
if element and recursive:
element.LoadAllSubElements(recursive=recursive)
def LoadSubElement(self, name, allow_empty=False):
"""Load a specific sub group or command.
Args:
name: str, The name of the element to load.
allow_empty: bool, True to allow creating this group as empty to start
with.
Returns:
_CommandCommon, The loaded sub element, or None if it did not exist.
"""
name = name.replace('-', '_')
# See if this element has already been loaded.
existing = self.groups.get(name, None)
if not existing:
existing = self.commands.get(name, None)
if existing:
return existing
if name in self._unloadable_elements:
return None
element = None
try:
if name in self._groups_to_load:
(module_dir, module_path, name, track) = self._groups_to_load[name]
element = CommandGroup(
module_dir, module_path, self._path + [name], track,
self._construction_id, self._cli_generator, self.SubParser(),
self._config_hooks, parent_group=self, allow_empty=allow_empty)
self.groups[element.name] = element
elif name in self._commands_to_load:
(module_dir, module_path, name, track) = self._commands_to_load[name]
element = Command(
module_dir, module_path, self._path + [name], track,
self._construction_id, self._cli_generator, self._config_hooks,
self.SubParser(), parent_group=self)
self.commands[element.name] = element
except base.ReleaseTrackNotImplementedException as e:
self._unloadable_elements.add(name)
log.debug(e)
return element
def GetSubCommandHelps(self):
return dict(
(item.cli_name,
usage_text.HelpInfo(help_text=item.short_help,
is_hidden=item.IsHidden(),
release_track=item.ReleaseTrack))
for item in self.commands.values())
def GetSubGroupHelps(self):
return dict(
(item.cli_name,
usage_text.HelpInfo(help_text=item.short_help,
is_hidden=item.IsHidden(),
release_track=item.ReleaseTrack(for_help=True)))
for item in self.groups.values())
class Command(CommandCommon):
"""A class that encapsulates the configuration for a single command."""
def __init__(self, module_dir, module_path, path, release_track,
construction_id, cli_generator, config_hooks, parser_group,
parent_group=None):
"""Create a new command.
Args:
module_dir: str, The root of the command tree.
module_path: a list of command group names that brought us down to this
command from the top module directory
path: similar to module_path, but is the path to this command with respect
to the CLI itself. This path should be used for things like error
reporting when a specific element in the tree needs to be referenced.
release_track: base.ReleaseTrack, The release track (ga, beta, alpha) that
this command group is in. This will apply to all commands under it.
construction_id: str, A unique identifier for the CLILoader that is
being constructed.
cli_generator: cli.CLILoader, The builder used to generate this CLI.
config_hooks: a ConfigHooks object to use for loading context
parser_group: argparse.Parser, The parser to be used for this command.
parent_group: CommandGroup, The parent of this command.
"""
# pylint:disable=protected-access, The base module is effectively an
# extension of calliope, and we want to leave _Common private so people
# don't extend it directly.
common_type = base._Common.FromModule(
self._GetModuleFromPath(module_dir, module_path, path, construction_id),
release_track,
is_command=True)
super(Command, self).__init__(
common_type,
path=path,
release_track=release_track,
cli_generator=cli_generator,
config_hooks=config_hooks,
allow_positional_args=True,
parser_group=parser_group,
parent_group=parent_group)
self._parser.set_defaults(cmd_func=self.Run, command_path=self._path)
def Run(self, cli, args):
"""Run this command with the given arguments.
Args:
cli: The cli.CLI object for this command line tool.
args: The arguments for this command as a namespace.
Returns:
The object returned by the module's Run() function.
Raises:
exceptions.Error: if thrown by the Run() function.
exceptions.ExitCodeNoError: if the command is returning with a non-zero
exit code.
"""
metrics.Loaded()
tool_context = self._config_hooks.load_context()
last_group = None
for context_filter in self._config_hooks.context_filters:
last_group = context_filter(tool_context, core_cli.Http, args)
command_instance = self._common_type(
cli=cli,
context=tool_context,
group=last_group,
http_func=core_cli.Http,
format_string=args.format or 'yaml')
log.debug('Running %s with %s.', self.dotted_name, args)
resources = command_instance.Run(args)
resources = display.Displayer(command_instance, args, resources).Display()
metrics.Ran()
if command_instance.exit_code != 0:
raise exceptions.ExitCodeNoError(exit_code=command_instance.exit_code)
return resources
| bsd-3-clause |
superdesk/superdesk-aap | server/aap/publish/formatters/aap_nitf_formatter.py | 2 | 5094 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
from superdesk.publish.formatters.nitf_formatter import NITFFormatter
from superdesk.errors import FormatterError
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, SIGN_OFF
from lxml import etree as etree
from lxml.etree import SubElement
import re
from .unicodetoascii import to_ascii
from superdesk.etree import parse_html, to_string
from superdesk.text_utils import get_text
class AAPNITFFormatter(NITFFormatter):
def format(self, article, subscriber, codes=None):
try:
pub_seq_num = superdesk.get_resource_service('subscribers').generate_sequence_number(subscriber)
nitf = self.get_nitf(article, subscriber, pub_seq_num)
return [{'published_seq_num': pub_seq_num,
'formatted_item': etree.tostring(nitf, encoding='ascii').decode('ascii'),
'item_encoding': 'ascii'}]
except Exception as ex:
raise FormatterError.nitfFormatterError(ex, subscriber)
def can_format(self, format_type, article):
return format_type == 'aap_nitf' and \
article[ITEM_TYPE] in (CONTENT_TYPE.TEXT, CONTENT_TYPE.PREFORMATTED)
def _format_meta(self, article, head, destination, pub_seq_num):
"""
Appends <meta> elements to <head>
"""
super()._format_meta(article, head, destination, pub_seq_num)
if 'anpa_category' in article and article['anpa_category'] is not None and len(
article.get('anpa_category')) > 0:
SubElement(head, 'meta',
{'name': 'anpa-category', 'content': article.get('anpa_category')[0].get('qcode', '')})
SubElement(head, 'meta', {'name': 'anpa-sequence', 'content': str(pub_seq_num)})
SubElement(head, 'meta', {'name': 'anpa-keyword', 'content': self.append_legal(article)})
if article.get('anpa_take_key'):
SubElement(head, 'meta', {'name': 'anpa-takekey', 'content': article.get('anpa_take_key', '')})
original_creator = superdesk.get_resource_service('users').find_one(req=None,
_id=article.get('original_creator', ''))
if original_creator:
SubElement(head, 'meta', {'name': 'aap-original-creator', 'content': original_creator.get('username')})
version_creator = superdesk.get_resource_service('users').find_one(req=None,
_id=article.get('version_creator', ''))
if version_creator:
SubElement(head, 'meta', {'name': 'aap-version-creator', 'content': version_creator.get('username')})
if article.get('task', {}).get('desk') is not None:
desk = superdesk.get_resource_service('desks').find_one(_id=article.get('task', {}).get('desk'), req=None)
SubElement(head, 'meta', {'name': 'aap-desk', 'content': desk.get('name', '')})
if article.get('task', {}).get('stage') is not None:
stage = superdesk.get_resource_service('stages').find_one(_id=article.get('task', {}).get('stage'),
req=None)
if stage is not None:
SubElement(head, 'meta', {'name': 'aap-stage', 'content': stage.get('name', '')})
SubElement(head, 'meta', {'name': 'aap-source', 'content': article.get('source', '')})
SubElement(head, 'meta', {'name': 'aap-original-source', 'content': article.get('original_source', '')})
if 'place' in article and article['place'] is not None and len(article.get('place', [])) > 0:
SubElement(head, 'meta', {'name': 'aap-place', 'content': article.get('place')[0]['qcode']})
if SIGN_OFF in article:
SubElement(head, 'meta', {'name': 'aap-signoff', 'content': article.get(SIGN_OFF, '') or ''})
def _format_meta_priority(self, article, head):
if 'priority' in article:
SubElement(head, 'meta', {'name': 'aap-priority', 'content': str(article['priority'])})
def map_html_to_xml(self, element, html):
"""
Map the html text tags to xml
:param element: The xml element to populate
:param html: the html to parse the text from
:return:
"""
html = html.replace('<br>', '<br/>').replace('</br>', '')
html = re.sub('[\x00-\x09\x0b\x0c\x0e-\x1f]', '', html)
html = html.replace('\n', ' ')
html = re.sub(r'\s\s+', ' ', html)
parsed = parse_html(html, content='html')
for tag in parsed.xpath('/html/div/child::*'):
p = etree.Element('p')
p.text = to_ascii(get_text(to_string(tag, method='html'), content='html'))
element.append(p)
| agpl-3.0 |
BayanGroup/sentry | src/sentry/migrations/0015_auto__add_field_message_project__add_field_messagecountbyminute_projec.py | 36 | 14224 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['checksum', 'logger', 'view']
db.delete_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
# Removing unique constraint on 'MessageFilterValue', fields ['group', 'value', 'key']
db.delete_unique('sentry_messagefiltervalue', ['group_id', 'value', 'key'])
# Removing unique constraint on 'FilterValue', fields ['key', 'value']
db.delete_unique('sentry_filtervalue', ['key', 'value'])
# Removing unique constraint on 'MessageCountByMinute', fields ['date', 'group']
db.delete_unique('sentry_messagecountbyminute', ['date', 'group_id'])
# Adding field 'Message.project'
db.add_column('sentry_message', 'project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding field 'MessageCountByMinute.project'
db.add_column('sentry_messagecountbyminute', 'project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'MessageCountByMinute', fields ['project', 'date', 'group']
db.create_unique('sentry_messagecountbyminute', ['project_id', 'date', 'group_id'])
# Adding field 'FilterValue.project'
db.add_column('sentry_filtervalue', 'project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'FilterValue', fields ['project', 'value', 'key']
db.create_unique('sentry_filtervalue', ['project_id', 'value', 'key'])
# Adding field 'MessageFilterValue.project'
db.add_column('sentry_messagefiltervalue', 'project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'MessageFilterValue', fields ['project', 'group', 'value', 'key']
db.create_unique('sentry_messagefiltervalue', ['project_id', 'group_id', 'value', 'key'])
# Adding field 'GroupedMessage.project'
db.add_column('sentry_groupedmessage', 'project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'GroupedMessage', fields ['project', 'checksum', 'logger', 'view']
db.create_unique('sentry_groupedmessage', ['project_id', 'checksum', 'logger', 'view'])
def backwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['project', 'checksum', 'logger', 'view']
db.delete_unique('sentry_groupedmessage', ['project_id', 'checksum', 'logger', 'view'])
# Removing unique constraint on 'MessageFilterValue', fields ['project', 'group', 'value', 'key']
db.delete_unique('sentry_messagefiltervalue', ['project_id', 'group_id', 'value', 'key'])
# Removing unique constraint on 'FilterValue', fields ['project', 'value', 'key']
db.delete_unique('sentry_filtervalue', ['project_id', 'value', 'key'])
# Removing unique constraint on 'MessageCountByMinute', fields ['project', 'date', 'group']
db.delete_unique('sentry_messagecountbyminute', ['project_id', 'date', 'group_id'])
# Deleting field 'Message.project'
db.delete_column('sentry_message', 'project_id')
# Deleting field 'MessageCountByMinute.project'
db.delete_column('sentry_messagecountbyminute', 'project_id')
# Adding unique constraint on 'MessageCountByMinute', fields ['date', 'group']
db.create_unique('sentry_messagecountbyminute', ['date', 'group_id'])
# Deleting field 'FilterValue.project'
db.delete_column('sentry_filtervalue', 'project_id')
# Adding unique constraint on 'FilterValue', fields ['key', 'value']
db.create_unique('sentry_filtervalue', ['key', 'value'])
# Deleting field 'MessageFilterValue.project'
db.delete_column('sentry_messagefiltervalue', 'project_id')
# Adding unique constraint on 'MessageFilterValue', fields ['group', 'value', 'key']
db.create_unique('sentry_messagefiltervalue', ['group_id', 'value', 'key'])
# Deleting field 'GroupedMessage.project'
db.delete_column('sentry_groupedmessage', 'project_id')
# Adding unique constraint on 'GroupedMessage', fields ['checksum', 'logger', 'view']
db.create_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('project', 'logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
dan-blanchard/conda-build | conda_build/exceptions.py | 8 | 1182 | import textwrap
SEPARATOR = "-" * 70
indent = lambda s: textwrap.fill(textwrap.dedent(s))
class CondaBuildException(Exception):
pass
class YamlParsingError(CondaBuildException):
pass
class UnableToParse(YamlParsingError):
def __init__(self, original, *args, **kwargs):
super(UnableToParse, self).__init__(*args, **kwargs)
self.original = original
def error_msg(self):
return "\n".join([
SEPARATOR,
self.error_body(),
self.indented_exception(),
])
def error_body(self):
return "\n".join([
"Unable to parse meta.yaml file\n",
])
def indented_exception(self):
orig = str(self.original)
indent = lambda s: s.replace("\n", "\n--> ")
return "Error Message:\n--> {}\n\n".format(indent(orig))
class UnableToParseMissingJinja2(UnableToParse):
def error_body(self):
return "\n".join([
super(UnableToParseMissingJinja2, self).error_body(),
indent("""\
It appears you are missing jinja2. Please install that
package, then attempt to build.
"""),
])
| bsd-3-clause |
with-git/tensorflow | tensorflow/python/kernel_tests/benchmark_test.py | 116 | 7664 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
# Used by SomeRandomBenchmark class below.
_ran_somebenchmark_1 = [False]
_ran_somebenchmark_2 = [False]
_ran_somebenchmark_but_shouldnt = [False]
class SomeRandomBenchmark(test.Benchmark):
"""This Benchmark should automatically be registered in the registry."""
def _dontRunThisBenchmark(self):
_ran_somebenchmark_but_shouldnt[0] = True
def notBenchmarkMethod(self):
_ran_somebenchmark_but_shouldnt[0] = True
def benchmark1(self):
_ran_somebenchmark_1[0] = True
def benchmark2(self):
_ran_somebenchmark_2[0] = True
class TestReportingBenchmark(test.Benchmark):
"""This benchmark (maybe) reports some stuff."""
def benchmarkReport1(self):
self.report_benchmark(iters=1)
def benchmarkReport2(self):
self.report_benchmark(
iters=2,
name="custom_benchmark_name",
extras={"number_key": 3,
"other_key": "string"})
def benchmark_times_an_op(self):
with session.Session() as sess:
a = constant_op.constant(0.0)
a_plus_a = a + a
self.run_op_benchmark(
sess, a_plus_a, min_iters=1000, store_trace=True, name="op_benchmark")
class BenchmarkTest(test.TestCase):
def testGlobalBenchmarkRegistry(self):
registry = list(benchmark.GLOBAL_BENCHMARK_REGISTRY)
self.assertEqual(len(registry), 2)
self.assertTrue(SomeRandomBenchmark in registry)
self.assertTrue(TestReportingBenchmark in registry)
def testRunSomeRandomBenchmark(self):
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run other benchmarks, but this wont run the one we care about
benchmark._run_benchmarks("unrelated")
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run all the benchmarks, avoid generating any reports
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom")
# Validate that SomeRandomBenchmark ran correctly
self.assertTrue(_ran_somebenchmark_1[0])
self.assertTrue(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
_ran_somebenchmark_1[0] = False
_ran_somebenchmark_2[0] = False
_ran_somebenchmark_but_shouldnt[0] = False
# Test running a specific method of SomeRandomBenchmark
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom.*1$")
self.assertTrue(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
def testReportingBenchmark(self):
tempdir = test.get_temp_dir()
try:
gfile.MakeDirs(tempdir)
except OSError as e:
# It's OK if the directory already exists.
if " exists:" not in str(e):
raise e
prefix = os.path.join(tempdir,
"reporting_bench_%016x_" % random.getrandbits(64))
expected_output_file = "%s%s" % (prefix,
"TestReportingBenchmark.benchmarkReport1")
expected_output_file_2 = "%s%s" % (
prefix, "TestReportingBenchmark.custom_benchmark_name")
expected_output_file_3 = "%s%s" % (prefix,
"TestReportingBenchmark.op_benchmark")
try:
self.assertFalse(gfile.Exists(expected_output_file))
# Run benchmark but without env, shouldn't write anything
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should run without writing anything
self.assertFalse(gfile.Exists(expected_output_file))
# Runbenchmark with env, should write
os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should write
reporting.benchmarkReport2() # This should write
reporting.benchmark_times_an_op() # This should write
# Check the files were written
self.assertTrue(gfile.Exists(expected_output_file))
self.assertTrue(gfile.Exists(expected_output_file_2))
self.assertTrue(gfile.Exists(expected_output_file_3))
# Check the contents are correct
expected_1 = test_log_pb2.BenchmarkEntry()
expected_1.name = "TestReportingBenchmark.benchmarkReport1"
expected_1.iters = 1
expected_2 = test_log_pb2.BenchmarkEntry()
expected_2.name = "TestReportingBenchmark.custom_benchmark_name"
expected_2.iters = 2
expected_2.extras["number_key"].double_value = 3
expected_2.extras["other_key"].string_value = "string"
expected_3 = test_log_pb2.BenchmarkEntry()
expected_3.name = "TestReportingBenchmark.op_benchmark"
expected_3.iters = 1000
def read_benchmark_entry(f):
s = gfile.GFile(f, "rb").read()
entries = test_log_pb2.BenchmarkEntries.FromString(s)
self.assertEquals(1, len(entries.entry))
return entries.entry[0]
read_benchmark_1 = read_benchmark_entry(expected_output_file)
self.assertProtoEquals(expected_1, read_benchmark_1)
read_benchmark_2 = read_benchmark_entry(expected_output_file_2)
self.assertProtoEquals(expected_2, read_benchmark_2)
read_benchmark_3 = read_benchmark_entry(expected_output_file_3)
self.assertEquals(expected_3.name, read_benchmark_3.name)
self.assertEquals(expected_3.iters, read_benchmark_3.iters)
self.assertGreater(read_benchmark_3.wall_time, 0)
full_trace = read_benchmark_3.extras["full_trace_chrome_format"]
json_trace = json.loads(full_trace.string_value)
self.assertTrue(isinstance(json_trace, dict))
self.assertTrue("traceEvents" in json_trace.keys())
allocator_keys = [k for k in read_benchmark_3.extras.keys()
if k.startswith("allocator_maximum_num_bytes_")]
self.assertGreater(len(allocator_keys), 0)
for k in allocator_keys:
self.assertGreater(read_benchmark_3.extras[k].double_value, 0)
finally:
gfile.DeleteRecursively(tempdir)
if __name__ == "__main__":
test.main()
| apache-2.0 |
swatilodha/coala | tests/parsing/ConfParserTest.py | 26 | 5556 | import os
import tempfile
import unittest
from collections import OrderedDict
from coalib.parsing.ConfParser import ConfParser
from coalib.settings.Section import Section
class ConfParserTest(unittest.TestCase):
example_file = """to be ignored
a_default, another = val
TEST = tobeignored # do you know that thats a comment
test = push
t =
escaped_\\=equal = escaped_\\#hash
escaped_\\\\backslash = escaped_\\ space
escaped_\\,comma = escaped_\\.dot
[MakeFiles]
j , another = a
multiline
value
# just a omment
# just a omment
nokey. = value
default.test = content
makefiles.lastone = val
[EMPTY_ELEM_STRIP]
A = a, b, c
B = a, ,, d
C = ,,,
"""
def setUp(self):
self.tempdir = tempfile.gettempdir()
self.file = os.path.join(self.tempdir, ".coafile")
self.nonexistentfile = os.path.join(self.tempdir, "e81k7bd98t")
with open(self.file, "w") as file:
file.write(self.example_file)
self.uut = ConfParser()
try:
os.remove(self.nonexistentfile)
except FileNotFoundError:
pass
self.sections = self.uut.parse(self.file)
def tearDown(self):
os.remove(self.file)
def test_parse_nonexisting_file(self):
self.assertRaises(FileNotFoundError,
self.uut.parse,
self.nonexistentfile)
self.assertNotEqual(self.uut.parse(self.file, True), self.sections)
def test_parse_nonexisting_section(self):
self.assertRaises(IndexError,
self.uut.get_section,
"inexistent section")
def test_parse_default_section(self):
default_should = OrderedDict([
('a_default', 'val'),
('another', 'val'),
('comment0', '# do you know that thats a comment'),
('test', 'content'),
('t', ''),
('escaped_=equal', 'escaped_#hash'),
('escaped_\\backslash', 'escaped_ space'),
('escaped_,comma', 'escaped_.dot')])
key, val = self.sections.popitem(last=False)
self.assertTrue(isinstance(val, Section))
self.assertEqual(key, 'default')
is_dict = OrderedDict()
for k in val:
is_dict[k] = str(val[k])
self.assertEqual(is_dict, default_should)
def test_parse_makefiles_section(self):
makefiles_should = OrderedDict([
('j', 'a\nmultiline\nvalue'),
('another', 'a\nmultiline\nvalue'),
('comment1', '# just a omment'),
('comment2', '# just a omment'),
('lastone', 'val'),
('comment3', ''),
('a_default', 'val'),
('comment0', '# do you know that thats a comment'),
('test', 'content'),
('t', ''),
('escaped_=equal', 'escaped_#hash'),
('escaped_\\backslash', 'escaped_ space'),
('escaped_,comma', 'escaped_.dot')])
# Pop off the default section.
self.sections.popitem(last=False)
key, val = self.sections.popitem(last=False)
self.assertTrue(isinstance(val, Section))
self.assertEqual(key, 'makefiles')
is_dict = OrderedDict()
for k in val:
is_dict[k] = str(val[k])
self.assertEqual(is_dict, makefiles_should)
self.assertEqual(val["comment1"].key, "comment1")
def test_parse_empty_elem_strip_section(self):
empty_elem_strip_should = OrderedDict([
('a', 'a, b, c'),
('b', 'a, ,, d'),
('c', ',,,'),
('comment4', ''),
('a_default', 'val'),
('another', 'val'),
('comment0', '# do you know that thats a comment'),
('test', 'content'),
('t', ''),
('escaped_=equal', 'escaped_#hash'),
('escaped_\\backslash', 'escaped_ space'),
('escaped_,comma', 'escaped_.dot')])
# Pop off the default and makefiles section.
self.sections.popitem(last=False)
self.sections.popitem(last=False)
key, val = self.sections.popitem(last=False)
self.assertTrue(isinstance(val, Section))
self.assertEqual(key, 'empty_elem_strip')
is_dict = OrderedDict()
for k in val:
is_dict[k] = str(val[k])
self.assertEqual(is_dict, empty_elem_strip_should)
def test_remove_empty_iter_elements(self):
# Test with empty-elem stripping.
uut = ConfParser(remove_empty_iter_elements=True)
uut.parse(self.file)
self.assertEqual(list(uut.get_section("EMPTY_ELEM_STRIP")["A"]),
["a", "b", "c"])
self.assertEqual(list(uut.get_section("EMPTY_ELEM_STRIP")["B"]),
["a", "d"])
self.assertEqual(list(uut.get_section("EMPTY_ELEM_STRIP")["C"]),
[])
# Test without stripping.
uut = ConfParser(remove_empty_iter_elements=False)
uut.parse(self.file)
self.assertEqual(list(uut.get_section("EMPTY_ELEM_STRIP")["A"]),
["a", "b", "c"])
self.assertEqual(list(uut.get_section("EMPTY_ELEM_STRIP")["B"]),
["a", "", "", "d"])
self.assertEqual(list(uut.get_section("EMPTY_ELEM_STRIP")["C"]),
["", "", "", ""])
def test_config_directory(self):
self.uut.parse(self.tempdir)
| agpl-3.0 |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_collective_barrier_api.py | 2 | 1173 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
from test_collective_api_base import TestDistBase
paddle.enable_static()
class TestCollectiveBarrierAPI(TestDistBase):
def _setup_config(self):
pass
def test_barrier_nccl(self):
self.check_with_place("collective_barrier_api.py", "barrier", "nccl")
def test_barrier_gloo(self):
self.check_with_place("collective_barrier_api.py", "barrier", "gloo",
"5")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
JPFrancoia/scikit-learn | sklearn/decomposition/online_lda.py | 11 | 27161 | """
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://matthewdhoffman.com/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
import warnings
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..exceptions import NotFittedError
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
The default learning method is going to be changed to 'batch' in the 0.20 release.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://matthewdhoffman.com//code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method=None,
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online", None):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
if learning_method == None:
warnings.warn("The default value for 'learning_method' will be "
"changed from 'online' to 'batch' in the release 0.20. "
"This warning was introduced in 0.18.",
DeprecationWarning)
learning_method = 'online'
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
# normalize doc_topic_distr
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
| bsd-3-clause |
sacsant/avocado-misc-tests | io/disk/ssd/ezfiotest.py | 4 | 3108 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Narasimhan V <sim@linux.vnet.ibm.com>
"""
This test script is intended to give a block-level based overview of
SSD performance.
"""
import os
from avocado import Test
from avocado.utils import build
from avocado.utils import process
from avocado.utils import genio
from avocado.utils.software_manager import SoftwareManager
import avocado.utils.git as git
class EzfioTest(Test):
"""
This test script is intended to give a block-level based overview of
SSD performance. Uses FIO to perform the actual IO tests.
Places the output files in avocado test's outputdir.
:param device: Name of the ssd block device
"""
def setUp(self):
"""
Build 'fio and ezfio'.
"""
self.disk = self.params.get('disk', default='/dev/nvme0n1')
cmd = 'ls %s' % self.disk
if process.system(cmd, ignore_status=True) is not 0:
self.cancel("%s does not exist" % self.disk)
fio_path = os.path.join(self.teststmpdir, 'fio')
fio_link = 'https://github.com/axboe/fio.git'
git.get_repo(fio_link, destination_dir=fio_path)
build.make(fio_path, make='./configure')
build.make(fio_path)
build.make(fio_path, extra_args='install')
self.ezfio_path = os.path.join(self.teststmpdir, 'ezfio')
ezfio_link = 'https://github.com/earlephilhower/ezfio.git'
git.get_repo(ezfio_link, destination_dir=self.ezfio_path)
self.utilization = self.params.get('utilization', default='100')
# aio-max-nr is 65536 by default, and test fails if QD is 256 or above
genio.write_file("/proc/sys/fs/aio-max-nr", "1048576")
smm = SoftwareManager()
# Not a package that must be installed, so not skipping.
if not smm.check_installed("sdparm") and not smm.install("sdparm"):
self.log.debug("Can not install sdparm")
pkg_list = ['libaio', 'libaio-devel']
smm = SoftwareManager()
for pkg in pkg_list:
if pkg and not smm.check_installed(pkg) and not smm.install(pkg):
self.cancel("Package %s is missing and could not be installed"
% pkg)
self.cwd = os.getcwd()
def test(self):
"""
Performs ezfio test on the block device'.
"""
os.chdir(self.ezfio_path)
cmd = './ezfio.py -d %s -o "%s" -u %s --yes' \
% (self.disk, self.outputdir, self.utilization)
process.run(cmd, shell=True)
def tearDown(self):
"""
Clean up
"""
os.chdir(self.cwd)
| gpl-2.0 |
jeroanan/GameCollection | Tests/Interactors/Game/TestCountGamesInteractor.py | 1 | 1595 | # Copyright (c) David Wilson 2015
# Icarus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Icarus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Icarus. If not, see <http://www.gnu.org/licenses/>.
from Interactors.GameInteractors import CountGamesInteractor
from Interactors.Interactor import Interactor
from Tests.Interactors.InteractorTestBase import InteractorTestBase
class TestCountGamesInteractor(InteractorTestBase):
"""Unit tests for the CountGamesInteractor class"""
def setUp(self):
"""setUp function for all unit tests in this class"""
super().setUp()
self.__target = CountGamesInteractor()
self.__target.persistence = self.persistence
def test_is_interactor(self):
"""Test that CountGamesInteractor derives from Interactor"""
self.assertIsInstance(self.__target, Interactor)
def test_execute_calls_persistence_method(self):
"""Test that calling CountGamesInteractor.execute causes persistence.count_games to be called."""
user_id = "user_id"
self.__target.execute(user_id)
self.persistence.count_games.assert_called_with(user_id)
| gpl-3.0 |
ravibhure/ansible | lib/ansible/plugins/action/sros_config.py | 79 | 4193 | #
# Copyright 2016 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import os
import re
import time
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.module_utils._text import to_text
from ansible.plugins.action.sros import ActionModule as _ActionModule
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
ekiwi/tinyos-1.x | contrib/handhelds/swtest/testSerialSetTime/shimmer_timesync.py | 2 | 2462 | #! /usr/bin/env python
#
# $Id: shimmer_timesync.py,v 1.1 2010/03/31 19:35:39 ayer1 Exp $
#
# Copyright (c) 2007, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Jason Waterman
# July, 2007
# truncated to shimmer_timesync by
# Steve Ayer
# March, 2010
import serial
import struct
import time
import random
import sys
import shimmerUtil
# Find the data serial port
# this needs to find the real serial port
port = shimmerUtil.find_data_port(True)
if port == '':
print 'Could not find SHIMMER data port port. Exiting.'
sys.exit()
speed = 115200
print 'Found SHIMMER data port on %s' % (port)
ser = serial.Serial(port, speed, timeout = 1)
ser.flushInput()
print "Synchronizing clocks..."
t1 = int(time.time())
for i in range(0, 4):
t2 = (t1 >> 8 * (3-i)) & 0x000000ff
time.sleep(random.random())
t2_str = struct.pack('B', t2)
ser.write(t2_str)
print "Wrote %d to shimmer. Done!" % t1
ser.close()
| bsd-3-clause |
kamalx/edx-platform | lms/djangoapps/commerce/tests/__init__.py | 17 | 3178 | # -*- coding: utf-8 -*-
""" Commerce app tests package. """
import json
from django.test import TestCase
from django.test.utils import override_settings
import httpretty
import jwt
import mock
from commerce import ecommerce_api_client
from student.tests.factories import UserFactory
TEST_PUBLIC_URL_ROOT = 'http://www.example.com'
TEST_API_URL = 'http://www-internal.example.com/api'
TEST_API_SIGNING_KEY = 'edx'
TEST_BASKET_ID = 7
TEST_ORDER_NUMBER = '100004'
TEST_PAYMENT_DATA = {
'payment_processor_name': 'test-processor',
'payment_form_data': {},
'payment_page_url': 'http://example.com/pay',
}
@override_settings(ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY, ECOMMERCE_API_URL=TEST_API_URL)
class EcommerceApiClientTest(TestCase):
""" Tests to ensure the client is initialized properly. """
TEST_USER_EMAIL = 'test@example.com'
TEST_CLIENT_ID = 'test-client-id'
def setUp(self):
super(EcommerceApiClientTest, self).setUp()
self.user = UserFactory()
self.user.email = self.TEST_USER_EMAIL
self.user.save() # pylint: disable=no-member
@httpretty.activate
def test_tracking_context(self):
"""
Ensure the tracking context is set up in the api client correctly and
automatically.
"""
# fake an ecommerce api request.
httpretty.register_uri(
httpretty.POST,
'{}/baskets/1/'.format(TEST_API_URL),
status=200, body='{}',
adding_headers={'Content-Type': 'application/json'}
)
mock_tracker = mock.Mock()
mock_tracker.resolve_context = mock.Mock(return_value={'client_id': self.TEST_CLIENT_ID})
with mock.patch('commerce.tracker.get_tracker', return_value=mock_tracker):
ecommerce_api_client(self.user).baskets(1).post()
# make sure the request's JWT token payload included correct tracking context values.
actual_header = httpretty.last_request().headers['Authorization']
expected_payload = {
'username': self.user.username,
'full_name': self.user.profile.name,
'email': self.user.email,
'tracking_context': {
'lms_user_id': self.user.id, # pylint: disable=no-member
'lms_client_id': self.TEST_CLIENT_ID,
},
}
expected_header = 'JWT {}'.format(jwt.encode(expected_payload, TEST_API_SIGNING_KEY))
self.assertEqual(actual_header, expected_header)
@httpretty.activate
def test_client_unicode(self):
"""
The client should handle json responses properly when they contain
unicode character data.
Regression test for ECOM-1606.
"""
expected_content = '{"result": "Préparatoire"}'
httpretty.register_uri(
httpretty.GET,
'{}/baskets/1/order/'.format(TEST_API_URL),
status=200, body=expected_content,
adding_headers={'Content-Type': 'application/json'},
)
actual_object = ecommerce_api_client(self.user).baskets(1).order.get()
self.assertEqual(actual_object, {u"result": u"Préparatoire"})
| agpl-3.0 |
treejames/viewfinder | backend/op/viewfinder_op.py | 13 | 17030 | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Viewfinder base operation.
ViewfinderOperation is the base class for all other Viewfinder operations. It contains code
that is common across at least two derived operations.
"""
__authors__ = ['andy@emailscrubbed.com (Andy Kimball)']
from copy import deepcopy
from tornado import gen
from viewfinder.backend.base.exceptions import InvalidRequestError, PermissionError
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.db.post import Post
from viewfinder.backend.db.user import User
class ViewfinderOperation(object):
"""Base class for other Viewfinder operations, containing common code."""
def __init__(self, client):
self._client = client
self._op = Operation.GetCurrent()
@classmethod
@gen.coroutine
def _CheckEpisodePostAccess(cls, action, client, user_id, ep_ph_ids_list):
"""Ensures that given user has access to the set of episodes and photos in "ep_ph_ids_list",
which is a list of (episode_id, photo_ids) tuples.
Returns list of (episode, posts) tuples that corresponds to "ep_ph_ids_list".
"""
# Gather db keys for all source episodes and posts, and check for duplicate episodes and photos.
episode_keys = []
post_keys = []
for episode_id, photo_ids in ep_ph_ids_list:
episode_keys.append(DBKey(episode_id, None))
for photo_id in photo_ids:
post_keys.append(DBKey(episode_id, photo_id))
# Query for all episodes and posts in parallel and in batches.
episodes, posts = yield [gen.Task(Episode.BatchQuery,
client,
episode_keys,
None,
must_exist=False),
gen.Task(Post.BatchQuery,
client,
post_keys,
None,
must_exist=False)]
# Check that user has ability to access all source episodes and posts.
ep_posts_list = []
posts_iter = iter(posts)
for (episode_id, photo_ids), episode in zip(ep_ph_ids_list, episodes):
if episode is None:
raise InvalidRequestError('Episode "%s" does not exist.' % episode_id)
posts_list = []
for photo_id in photo_ids:
post = next(posts_iter)
if post is None:
raise InvalidRequestError('Photo "%s" does not exist or is not in episode "%s".' %
(photo_id, episode_id))
# Do not raise error if removing a photo that has already been unshared or removed.
if action != 'remove':
if post.IsUnshared():
raise PermissionError('Cannot %s photo "%s", because it was unshared.' % (action, photo_id))
if post.IsRemoved():
raise PermissionError('Cannot %s photo "%s", because it was removed.' % (action, photo_id))
posts_list.append(post)
ep_posts_list.append((episode, posts_list))
# Query for followers of all unique source viewpoints in parallel and in a batch.
follower_keys = {episode.viewpoint_id: DBKey(user_id, episode.viewpoint_id) for episode in episodes}
followers = yield gen.Task(Follower.BatchQuery, client, follower_keys.values(), None, must_exist=False)
# Get set of all viewpoints that are accessible to this user.
allowed_viewpoint_ids = set(follower.viewpoint_id for follower in followers
if follower is not None and follower.CanViewContent())
# Check access permission to the source viewpoints.
for episode in episodes:
if episode.viewpoint_id not in allowed_viewpoint_ids:
raise PermissionError('User %d does not have permission to %s episode "%s".' %
(user_id, action, episode.episode_id))
raise gen.Return(ep_posts_list)
@classmethod
@gen.coroutine
def _CheckCopySources(cls, action, client, user_id, source_ep_dicts):
"""Ensures that the sharer or saver has access to the source episodes and that the source
photos are part of the source episodes. Caller is expected to check permission to add to
the given viewpoint.
Returns a list of the source episodes and posts in the form of (episode, posts) tuples.
"""
# Gather list of (episode_id, photo_ids) tuples and check for duplicate posts.
unique_keys = set()
ep_ph_ids_list = []
for ep_dict in source_ep_dicts:
ph_ids = []
for photo_id in ep_dict['photo_ids']:
db_key = (ep_dict['new_episode_id'], photo_id)
if db_key in unique_keys:
raise InvalidRequestError('Photo "%s" cannot be %sd into episode "%s" more than once in same request.' %
(photo_id, action, ep_dict['new_episode_id']))
unique_keys.add(db_key)
ph_ids.append(photo_id)
ep_ph_ids_list.append((ep_dict['existing_episode_id'], ph_ids))
ep_posts_list = yield ViewfinderOperation._CheckEpisodePostAccess(action, client, user_id, ep_ph_ids_list)
raise gen.Return(ep_posts_list)
@classmethod
@gen.coroutine
def _AllocateTargetEpisodeIds(self, client, user_id, device_id, target_viewpoint_id, source_ep_ids):
"""For each episode listed in "source_ep_ids", determines if a child episode already
exists in the given viewpoint. If not, allocates a new episode id using the user's asset
id allocator. The same timestamp used to create the source episode id is used to create
the target episode id.
Returns the list of target episodes ids, including both existing ids and allocated ids.
"""
# First check whether each episode has already been shared/saved into the target viewpoint.
tasks = []
for source_ep_id in source_ep_ids:
query_expr = ('episode.parent_ep_id={id}', {'id': source_ep_id})
tasks.append(gen.Task(Episode.IndexQuery, client, query_expr, None))
target_ep_ids = []
allocate_ids_count = 0
target_episodes_list = yield tasks
for target_episodes in target_episodes_list:
found_match = False
for episode in target_episodes:
if episode.viewpoint_id == target_viewpoint_id:
target_ep_ids.append(episode.episode_id)
found_match = True
break
# If no matching child episode, then need to allocate an episode id.
if not found_match:
target_ep_ids.append(None)
allocate_ids_count += 1
if allocate_ids_count > 0:
# Allocate ids for any episodes which do not yet exist, and merge them into target_ep_ids.
id = yield gen.Task(User.AllocateAssetIds, client, user_id, allocate_ids_count)
for i, source_ep_id in enumerate(source_ep_ids):
if target_ep_ids[i] is None:
timestamp, _, _ = source_ep_id = Episode.DeconstructEpisodeId(source_ep_id)
target_ep_ids[i] = Episode.ConstructEpisodeId(timestamp, device_id, id)
id += 1
raise gen.Return(target_ep_ids)
@classmethod
def _CreateCopyTargetDicts(cls, timestamp, user_id, target_viewpoint_id, source_ep_posts_list, target_ep_ids):
"""Creates list of dicts which will be used to create episodes that are the target of a
share or save operation.
"""
new_ep_dict_list = []
for (source_episode, posts), target_ep_id in zip(source_ep_posts_list, target_ep_ids):
new_ep_dict = {'episode_id': target_ep_id,
'parent_ep_id': source_episode.episode_id,
'user_id': user_id,
'viewpoint_id': target_viewpoint_id,
'timestamp': source_episode.timestamp,
'publish_timestamp': timestamp,
'location': source_episode.location,
'placemark': source_episode.placemark,
'photo_ids': [post.photo_id for post in posts]}
new_ep_dict_list.append(new_ep_dict)
return new_ep_dict_list
@classmethod
@gen.coroutine
def _CheckCopyTargets(cls, action, client, user_id, viewpoint_id, target_ep_dicts):
"""Compiles a list of target episode and post ids that do not exist or are removed. These
episodes and posts will not be copied as part of the operation.
Returns the set of target episode and post ids that will be (re)created by the caller.
"""
# Gather db keys for all target episodes and posts.
episode_keys = []
post_keys = []
for ep_dict in target_ep_dicts:
episode_keys.append(DBKey(ep_dict['episode_id'], None))
for photo_id in ep_dict['photo_ids']:
post_keys.append(DBKey(ep_dict['episode_id'], photo_id))
# Query for all episodes and posts in parallel and in batches.
episodes, posts = yield [gen.Task(Episode.BatchQuery,
client,
episode_keys,
None,
must_exist=False),
gen.Task(Post.BatchQuery,
client,
post_keys,
None,
must_exist=False)]
# If a viewable post already exists, don't add it to the set to copy.
new_ids = set()
post_iter = iter(posts)
for ep_dict, episode in zip(target_ep_dicts, episodes):
if episode is None:
# Add the episode id to the set to copy.
new_ids.add(ep_dict['episode_id'])
else:
# Only owner user should get this far, since we check that new episode id contains the user's device id.
assert episode.user_id == user_id, (episode, user_id)
# Enforce sharing *tree* - no sharing acyclic graph allowed!
if episode.parent_ep_id != ep_dict['parent_ep_id']:
raise InvalidRequestError('Cannot %s to episode "%s". It was created from a different parent episode.' %
(action, ep_dict['episode_id']))
# Cannot share into episodes which are not in the target viewpoint.
if episode.viewpoint_id != viewpoint_id:
raise InvalidRequestError('Cannot %s to episode "%s". It is not in viewpoint "%s".' %
(action, episode.episode_id, viewpoint_id))
for photo_id in ep_dict['photo_ids']:
post = next(post_iter)
# If the post does not exist or is removed, add it to the new list.
if post is None or post.IsRemoved():
new_ids.add(Post.ConstructPostId(ep_dict['episode_id'], photo_id))
raise gen.Return(new_ids)
@gen.coroutine
def _CreateNewEpisodesAndPosts(self, new_ep_dicts, new_ids):
"""Creates new episodes and posts within those episodes based on a list returned by
_CheckCopySources.
If an episode or post id does not exist in "new_ids", it is not created. The "new_ids"
set is created by _CheckCopyTargets.
"""
tasks = []
for new_ep_dict in deepcopy(new_ep_dicts):
ep_id = new_ep_dict['episode_id']
ph_ids = [ph_id for ph_id in new_ep_dict.pop('photo_ids')
if Post.ConstructPostId(ep_id, ph_id) in new_ids]
if ep_id in new_ids:
tasks.append(gen.Task(Episode.CreateNew, self._client, **new_ep_dict))
for ph_id in ph_ids:
post = Post.CreateFromKeywords(episode_id=ep_id, photo_id=ph_id)
post.labels.remove(Post.UNSHARED)
post.labels.remove(Post.REMOVED)
tasks.append(gen.Task(post.Update, self._client))
yield tasks
@classmethod
@gen.coroutine
def _GetAllContactsWithDedup(cls, client, user_id):
"""Query for all contacts and split into a dictionary of deduped contacts which is keyed by contact_id
and a list of contacts that can be deleted because they're unnecessary.
Returns: tuple of (retained_contacts_dict, contacts_to_delete_list)
"""
contacts_to_delete = []
contacts_to_retain = dict()
# Query all contacts for this user.
# Set query limit to be max that we expect multiplied by 2 to allow for some duplicates (there shouldn't be many).
existing_contacts = yield gen.Task(Contact.RangeQuery,
client,
hash_key=user_id,
range_desc=None,
limit=Contact.MAX_CONTACTS_LIMIT * 2,
col_names=['contact_id', 'labels', 'contact_source'],
scan_forward=True)
for existing_contact in existing_contacts:
older_existing_contact = contacts_to_retain.pop(existing_contact.contact_id, None)
if older_existing_contact is not None:
contacts_to_delete.append(older_existing_contact)
contacts_to_retain[existing_contact.contact_id] = existing_contact
raise gen.Return((contacts_to_retain, contacts_to_delete))
@classmethod
def _GetRevivableFollowers(cls, followers):
"""Get subset of the given followers that have been removed but are still revivable."""
return [follower.user_id for follower in followers
if follower.IsRemoved() and not follower.IsUnrevivable()]
@gen.coroutine
def _ResolveContactIds(self, contact_dicts):
"""Examines each contact in "contact_dicts" (in the CONTACT_METADATA format). Returns a
list of the same length containing the (True, user_id, webapp_dev_id) of the contact if
it is already a Viewfinder user, or allocates new user and web device ids, and returns the
tuple (False, user_id, webapp_dev_id).
Raises an InvalidRequestError if any of the user ids do not correspond to real users.
"""
# Get identity objects for all contacts for which no user_id is given.
identity_keys = [DBKey(contact_dict['identity'], None)
for contact_dict in contact_dicts if 'user_id' not in contact_dict]
identities = yield gen.Task(Identity.BatchQuery, self._client, identity_keys, None, must_exist=False)
# Get user objects for all contacts with a user_id given, or if identity is already bound.
user_keys = []
ident_iter = iter(identities)
for contact_dict in contact_dicts:
if 'user_id' in contact_dict:
user_keys.append(DBKey(contact_dict['user_id'], None))
else:
identity = next(ident_iter)
if identity is not None and identity.user_id is not None:
user_keys.append(DBKey(identity.user_id, None))
users = yield gen.Task(User.BatchQuery, self._client, user_keys, None, must_exist=False)
# Construct result tuples; if a user does not exist, allocate a user_id and webapp_dev_id.
results = []
ident_iter = iter(identities)
user_iter = iter(users)
for contact_dict in contact_dicts:
if 'user_id' not in contact_dict:
identity = next(ident_iter)
if identity is None or identity.user_id is None:
# User doesn't yet exist, so allocate new user and web device ids.
user_id, webapp_dev_id = yield User.AllocateUserAndWebDeviceIds(self._client)
results.append((False, user_id, webapp_dev_id))
continue
user = next(user_iter)
if user is None:
assert 'user_id' in contact_dict, contact_dict
raise InvalidRequestError('A user with id %d cannot be found.' % contact_dict['user_id'])
# User already exists.
results.append((True, user.user_id, user.webapp_dev_id))
raise gen.Return(results)
@gen.coroutine
def _ResolveContacts(self, contact_dicts, contact_ids, reason=None):
"""Creates a prospective user account for any contacts that are not yet Viewfinder users.
The "contact_ids" list should have been previously obtained by the caller via a call to
_ResolveContactIds, and items in it must correspond to "contact_dicts".
If specified, the "reason" string is passed to the CreateProspective op. This describes what caused the user
to be created (see db/analytics.py for payload details).
"""
for contact_dict, (user_exists, user_id, webapp_dev_id) in zip(contact_dicts, contact_ids):
if not user_exists:
# Check if previous invocation of this operation already created the user.
user = yield gen.Task(User.Query, self._client, user_id, None, must_exist=False)
if user is None:
# Create prospective user.
request = {'user_id': user_id,
'webapp_dev_id': webapp_dev_id,
'identity_key': contact_dict['identity'],
'reason': reason}
yield Operation.CreateNested(self._client, 'CreateProspectiveOperation.Execute', request)
| apache-2.0 |
iABC2XYZ/abc | PIC/Twiss.py | 5 | 3068 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 11:10:52 2017
@author: e
"""
import numpy as np
from BetaGammaC import NP_Energy2BetaC_GammaC
def GammaT(alphaT,betaT):
gammaT=(1.+alphaT**2)/betaT
return gammaT
def SigmaT2D(alphaT,betaT):
gammaT=GammaT(alphaT,betaT)
sigmaT=np.array([[betaT,-alphaT],[-alphaT,gammaT]])
return sigmaT
def SigmaT4D(alphaX,betaX,alphaY,betaY):
xSigmaT,ySigmaT= SigmaT2D(alphaX,betaX), SigmaT2D(alphaY,betaY)
O2=np.zeros((2,2))
sigmaT=np.vstack((np.hstack((xSigmaT,O2)),np.hstack((O2,ySigmaT))))
return sigmaT
def SigmaT6D(alphaX,betaX,alphaY,betaY,alphaZ,betaZ):
xSigmaT,ySigmaT,zSigmaT= SigmaT2D(alphaX,betaX), SigmaT2D(alphaY,betaY),SigmaT2D(alphaZ,betaZ)
O2=np.zeros((2,2))
sigmaT=np.vstack((np.hstack((xSigmaT,O2,O2)),np.hstack((O2,ySigmaT,O2)),np.hstack((O2,O2,zSigmaT))))
return sigmaT
def SigmaE2D(alphaT,betaT,emit):
sigmaT=SigmaT2D(alphaT,betaT)
sigmaE=sigmaT*emit
return sigmaE
def SigmaE4D(alphaX,alphaY,betaX,betaY,emitX,emitY):
xSigmaT,ySigmaT= SigmaT2D(alphaX,betaX), SigmaT2D(alphaY,betaY)
xSigmaE,ySigmaE=xSigmaT*emitX,ySigmaT*emitY
O2=np.zeros((2,2))
sigmaE=np.vstack((np.hstack((xSigmaE,O2)),np.hstack((O2,ySigmaE))))
return sigmaE
def SigmaE6D(alphaX,alphaY,alphaZ,betaX,betaY,betaZ,emitX,emitY,emitZ):
xSigmaT,ySigmaT,zSigmaT= SigmaT2D(alphaX,betaX), SigmaT2D(alphaY,betaY),SigmaT2D(alphaZ,betaZ)
xSigmaE,ySigmaE,zSigmaE=xSigmaT*emitX,ySigmaT*emitY,zSigmaT*emitZ
O2=np.zeros((2,2))
sigmaE=np.vstack((np.hstack((xSigmaE,O2,O2)),np.hstack((O2,ySigmaE,O2)),np.hstack((O2,O2,zSigmaE))))
return sigmaE
def Mu2D(muX,muXP):
muT=np.array([muX,muXP])
return muT
def Mu4D(muX,muXP,muY,muYP):
muT=np.array([muX,muXP,muY,muYP])
return muT
def Mu6D(muX,muXP,muY,muYP,muZ,muZP):
muT=np.array([muX,muXP,muY,muYP,muZ,muZP])
return muT
def Emit_Norm2Geo(emitNorm,energyMeV):
emitGeo=np.empty_like(emitNorm)
betaC,gammaC=NP_Energy2BetaC_GammaC(energyMeV)
betaCgammaC=betaC*gammaC
if len(emitNorm)==3:
betaCgammaC3=betaC*gammaC**3
emitGeo[0]=emitNorm[0]/betaCgammaC
emitGeo[1]=emitNorm[1]/betaCgammaC
emitGeo[2]=emitNorm[2]/betaCgammaC3
elif len(emitNorm)==2:
emitGeo[0]=emitNorm[0]/betaCgammaC
emitGeo[1]=emitNorm[1]/betaCgammaC
elif len(emitNorm)==1:
emitGeo[0]=emitNorm[0]/betaCgammaC
return emitGeo
def Emit_Geo2Norm(emitGeo,energyMeV):
emitNorm=np.empty_like(emitGeo)
betaC,gammaC=NP_Energy2BetaC_GammaC(energyMeV)
betaCgammaC=betaC*gammaC
if len(emitGeo)==3:
betaCgammaC3=betaC*gammaC**3
emitNorm[0]=emitGeo[0]*betaCgammaC
emitNorm[1]=emitGeo[1]*betaCgammaC
emitNorm[2]=emitGeo[2]*betaCgammaC3
elif len(emitNorm)==2:
emitNorm[0]=emitGeo[0]*betaCgammaC
emitNorm[1]=emitGeo[1]*betaCgammaC
elif len(emitNorm)==1:
emitNorm[0]=emitGeo[0]*betaCgammaC
return emitNorm
| gpl-3.0 |
danialbehzadi/Nokia-RM-1013-2.0.0.11 | webkit/Tools/Scripts/webkitpy/layout_tests/port/http_server_base.py | 15 | 3209 | #!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base class with common routines between the Apache and Lighttpd servers."""
import logging
import os
import time
import urllib
from webkitpy.common.system import filesystem
_log = logging.getLogger("webkitpy.layout_tests.port.http_server_base")
class HttpServerBase(object):
def __init__(self, port_obj):
self._port_obj = port_obj
def wait_for_action(self, action):
"""Repeat the action for 20 seconds or until it succeeds. Returns
whether it succeeded."""
start_time = time.time()
while time.time() - start_time < 20:
if action():
return True
_log.debug("Waiting for action: %s" % action)
time.sleep(1)
return False
def is_server_running_on_all_ports(self):
"""Returns whether the server is running on all the desired ports."""
for mapping in self.mappings:
if 'sslcert' in mapping:
http_suffix = 's'
else:
http_suffix = ''
url = 'http%s://127.0.0.1:%d/' % (http_suffix, mapping['port'])
try:
response = urllib.urlopen(url, proxies={})
_log.debug("Server running at %s" % url)
except IOError, e:
_log.debug("Server NOT running at %s: %s" % (url, e))
return False
return True
def remove_log_files(self, folder, starts_with):
files = os.listdir(folder)
for file in files:
if file.startswith(starts_with):
full_path = os.path.join(folder, file)
filesystem.FileSystem().remove(full_path)
| gpl-3.0 |
TheTimmy/spack | var/spack/repos/builtin/packages/py-deeptools/package.py | 3 | 2155 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyDeeptools(PythonPackage):
"""deepTools addresses the challenge of handling the large amounts of data
that are now routinely generated from DNA sequencing centers."""
homepage = "https://pypi.io/packages/source/d/deepTools"
url = "https://pypi.io/packages/source/d/deepTools/deepTools-2.5.2.tar.gz"
version('2.5.2', 'ba8a44c128c6bb1ed4ebdb20bf9ae9c2')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.9.0:', type=('build', 'run'))
depends_on('py-scipy@0.17.0:', type=('build', 'run'))
depends_on('py-py2bit@0.2.0:', type=('build', 'run'))
depends_on('py-pybigwig@0.2.1:', type=('build', 'run'))
depends_on('py-pysam@0.8.2:', type=('build', 'run'))
depends_on('py-matplotlib@1.4.0:', type=('build', 'run'))
depends_on('py-numpydoc@0.5:', type=('build', 'run'))
| lgpl-2.1 |
camptocamp/ngo-addons-backport | addons/hr_holidays/__openerp__.py | 52 | 3207 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Leave Management',
'version': '1.5',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 27,
'summary': 'Holidays, Allocation and Leave Requests',
'website': 'http://www.openerp.com',
'description': """
Manage leaves and allocation requests
=====================================
This application controls the holiday schedule of your company. It allows employees to request holidays. Then, managers can review requests for holidays and approve or reject them. This way you can control the overall holiday planning for the company or department.
You can configure several kinds of leaves (sickness, holidays, paid days, ...) and allocate leaves to an employee or department quickly using allocation requests. An employee can also make a request for more days off by making a new Allocation. It will increase the total of available days for that leave type (if the request is accepted).
You can keep track of leaves in different ways by following reports:
* Leaves Summary
* Leaves by Department
* Leaves Analysis
A synchronization with an internal agenda (Meetings of the CRM module) is also possible in order to automatically create a meeting when a holiday request is accepted by setting up a type of meeting in Leave Type.
""",
'images': ['images/hr_allocation_requests.jpeg', 'images/hr_leave_requests.jpeg', 'images/leaves_analysis.jpeg'],
'depends': ['hr', 'base_calendar', 'process', 'resource'],
'data': [
'security/ir.model.access.csv',
'security/ir_rule.xml',
'hr_holidays_workflow.xml',
'hr_holidays_view.xml',
'hr_holidays_data.xml',
'hr_holidays_report.xml',
'report/hr_holidays_report_view.xml',
'report/available_holidays_view.xml',
'wizard/hr_holidays_summary_department_view.xml',
'wizard/hr_holidays_summary_employees_view.xml',
'board_hr_holidays_view.xml',
],
'demo': ['hr_holidays_demo.xml',],
'test': ['test/test_hr_holiday.yml',
'test/hr_holidays_report.yml',
],
'installable': True,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
henrytao-me/openerp.positionq | openerp/addons/project_gtd/wizard/__init__.py | 66 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_gtd_empty
import project_gtd_fill
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dsc-team/dsc-team-kernel-project | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 802 | 2710 | # Core.py - Python extension for perf trace, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
| gpl-2.0 |
linuxmaniac/jenkins-job-builder | jenkins_jobs/registry.py | 4 | 8240 | #!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage Jenkins plugin module registry.
import logging
import operator
import pkg_resources
import re
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
logger = logging.getLogger(__name__)
class ModuleRegistry(object):
entry_points_cache = {}
def __init__(self, config, plugins_list=None):
self.modules = []
self.modules_by_component_type = {}
self.handlers = {}
self.global_config = config
if plugins_list is None:
self.plugins_dict = {}
else:
self.plugins_dict = self._get_plugins_info_dict(plugins_list)
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.modules'):
Mod = entrypoint.load()
mod = Mod(self)
self.modules.append(mod)
self.modules.sort(key=operator.attrgetter('sequence'))
if mod.component_type is not None:
self.modules_by_component_type[mod.component_type] = mod
@staticmethod
def _get_plugins_info_dict(plugins_list):
def mutate_plugin_info(plugin_info):
"""
We perform mutations on a single member of plugin_info here, then
return a dictionary with the longName and shortName of the plugin
mapped to its plugin info dictionary.
"""
version = plugin_info.get('version', '0')
plugin_info['version'] = re.sub(r'(.*)-(?:SNAPSHOT|BETA)',
r'\g<1>.preview', version)
aliases = []
for key in ['longName', 'shortName']:
value = plugin_info.get(key, None)
if value is not None:
aliases.append(value)
plugin_info_dict = {}
for name in aliases:
plugin_info_dict[name] = plugin_info
return plugin_info_dict
list_of_dicts = [mutate_plugin_info(v) for v in plugins_list]
plugins_info_dict = {}
for d in list_of_dicts:
plugins_info_dict.update(d)
return plugins_info_dict
def get_plugin_info(self, plugin_name):
""" This method is intended to provide information about plugins within
a given module's implementation of Base.gen_xml. The return value is a
dictionary with data obtained directly from a running Jenkins instance.
This allows module authors to differentiate generated XML output based
on information such as specific plugin versions.
:arg string plugin_name: Either the shortName or longName of a plugin
as see in a query that looks like:
``http://<jenkins-hostname>/pluginManager/api/json?pretty&depth=2``
During a 'test' run, it is possible to override JJB's query to a live
Jenkins instance by passing it a path to a file containing a YAML list
of dictionaries that mimics the plugin properties you want your test
output to reflect::
jenkins-jobs test -p /path/to/plugins-info.yaml
Below is example YAML that might be included in
/path/to/plugins-info.yaml.
.. literalinclude:: /../../tests/cmd/fixtures/plugins-info.yaml
"""
return self.plugins_dict.get(plugin_name, {})
def registerHandler(self, category, name, method):
cat_dict = self.handlers.get(category, {})
if not cat_dict:
self.handlers[category] = cat_dict
cat_dict[name] = method
def getHandler(self, category, name):
return self.handlers[category][name]
def dispatch(self, component_type,
parser, xml_parent,
component, template_data={}):
"""This is a method that you can call from your implementation of
Base.gen_xml or component. It allows modules to define a type
of component, and benefit from extensibility via Python
entry points and Jenkins Job Builder :ref:`Macros <macro>`.
:arg string component_type: the name of the component
(e.g., `builder`)
:arg YAMLParser parser: the global YAML Parser
:arg Element xml_parent: the parent XML element
:arg dict template_data: values that should be interpolated into
the component definition
See :py:class:`jenkins_jobs.modules.base.Base` for how to register
components of a module.
See the Publishers module for a simple example of how to use
this method.
"""
if component_type not in self.modules_by_component_type:
raise JenkinsJobsException("Unknown component type: "
"'{0}'.".format(component_type))
component_list_type = self.modules_by_component_type[component_type] \
.component_list_type
if isinstance(component, dict):
# The component is a singleton dictionary of name: dict(args)
name, component_data = next(iter(component.items()))
if template_data:
# Template data contains values that should be interpolated
# into the component definition
allow_empty_variables = self.global_config \
and self.global_config.has_section('job_builder') \
and self.global_config.has_option(
'job_builder', 'allow_empty_variables') \
and self.global_config.getboolean(
'job_builder', 'allow_empty_variables')
component_data = deep_format(
component_data, template_data, allow_empty_variables)
else:
# The component is a simple string name, eg "run-tests"
name = component
component_data = {}
# Look for a component function defined in an entry point
eps = ModuleRegistry.entry_points_cache.get(component_list_type)
if eps is None:
module_eps = list(pkg_resources.iter_entry_points(
group='jenkins_jobs.{0}'.format(component_list_type)))
eps = {}
for module_ep in module_eps:
if module_ep.name in eps:
raise JenkinsJobsException(
"Duplicate entry point found for component type: "
"'{0}', '{0}',"
"name: '{1}'".format(component_type, name))
eps[module_ep.name] = module_ep
ModuleRegistry.entry_points_cache[component_list_type] = eps
logger.debug("Cached entry point group %s = %s",
component_list_type, eps)
if name in eps:
func = eps[name].load()
func(parser, xml_parent, component_data)
else:
# Otherwise, see if it's defined as a macro
component = parser.data.get(component_type, {}).get(name)
if component:
for b in component[component_list_type]:
# Pass component_data in as template data to this function
# so that if the macro is invoked with arguments,
# the arguments are interpolated into the real defn.
self.dispatch(component_type,
parser, xml_parent, b, component_data)
else:
raise JenkinsJobsException("Unknown entry point or macro '{0}'"
" for component type: '{1}'.".
format(name, component_type))
| apache-2.0 |
jonathan-beard/edx-platform | openedx/core/djangoapps/user_api/course_tag/tests/test_api.py | 118 | 1348 | """
Test the user course tag API.
"""
from django.test import TestCase
from student.tests.factories import UserFactory
from openedx.core.djangoapps.user_api.course_tag import api as course_tag_api
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class TestCourseTagAPI(TestCase):
"""
Test the user service
"""
def setUp(self):
super(TestCourseTagAPI, self).setUp()
self.user = UserFactory.create()
self.course_id = SlashSeparatedCourseKey('test_org', 'test_course_number', 'test_run')
self.test_key = 'test_key'
def test_get_set_course_tag(self):
# get a tag that doesn't exist
tag = course_tag_api.get_course_tag(self.user, self.course_id, self.test_key)
self.assertIsNone(tag)
# test setting a new key
test_value = 'value'
course_tag_api.set_course_tag(self.user, self.course_id, self.test_key, test_value)
tag = course_tag_api.get_course_tag(self.user, self.course_id, self.test_key)
self.assertEqual(tag, test_value)
#test overwriting an existing key
test_value = 'value2'
course_tag_api.set_course_tag(self.user, self.course_id, self.test_key, test_value)
tag = course_tag_api.get_course_tag(self.user, self.course_id, self.test_key)
self.assertEqual(tag, test_value)
| agpl-3.0 |
jorik041/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/networktransaction.py | 190 | 2926 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import time
import urllib2
_log = logging.getLogger(__name__)
class NetworkTimeout(Exception):
def __str__(self):
return 'NetworkTimeout'
class NetworkTransaction(object):
def __init__(self, initial_backoff_seconds=10, grown_factor=1.5, timeout_seconds=(10 * 60), convert_404_to_None=False):
self._initial_backoff_seconds = initial_backoff_seconds
self._grown_factor = grown_factor
self._timeout_seconds = timeout_seconds
self._convert_404_to_None = convert_404_to_None
def run(self, request):
self._total_sleep = 0
self._backoff_seconds = self._initial_backoff_seconds
while True:
try:
return request()
except urllib2.HTTPError, e:
if self._convert_404_to_None and e.code == 404:
return None
self._check_for_timeout()
_log.warn("Received HTTP status %s loading \"%s\". Retrying in %s seconds..." % (e.code, e.filename, self._backoff_seconds))
self._sleep()
def _check_for_timeout(self):
if self._total_sleep + self._backoff_seconds > self._timeout_seconds:
raise NetworkTimeout()
def _sleep(self):
time.sleep(self._backoff_seconds)
self._total_sleep += self._backoff_seconds
self._backoff_seconds *= self._grown_factor
| bsd-3-clause |
jhuapl-marti/marti | env-crits/lib/python2.7/site-packages/pkg_resources/tests/test_pkg_resources.py | 242 | 3447 | import sys
import tempfile
import os
import zipfile
import datetime
import time
import subprocess
import pkg_resources
try:
unicode
except NameError:
unicode = str
def timestamp(dt):
"""
Return a timestamp for a local, naive datetime instance.
"""
try:
return dt.timestamp()
except AttributeError:
# Python 3.2 and earlier
return time.mktime(dt.timetuple())
class EggRemover(unicode):
def __call__(self):
if self in sys.path:
sys.path.remove(self)
if os.path.exists(self):
os.remove(self)
class TestZipProvider(object):
finalizers = []
ref_time = datetime.datetime(2013, 5, 12, 13, 25, 0)
"A reference time for a file modification"
@classmethod
def setup_class(cls):
"create a zip egg and add it to sys.path"
egg = tempfile.NamedTemporaryFile(suffix='.egg', delete=False)
zip_egg = zipfile.ZipFile(egg, 'w')
zip_info = zipfile.ZipInfo()
zip_info.filename = 'mod.py'
zip_info.date_time = cls.ref_time.timetuple()
zip_egg.writestr(zip_info, 'x = 3\n')
zip_info = zipfile.ZipInfo()
zip_info.filename = 'data.dat'
zip_info.date_time = cls.ref_time.timetuple()
zip_egg.writestr(zip_info, 'hello, world!')
zip_egg.close()
egg.close()
sys.path.append(egg.name)
cls.finalizers.append(EggRemover(egg.name))
@classmethod
def teardown_class(cls):
for finalizer in cls.finalizers:
finalizer()
def test_resource_filename_rewrites_on_change(self):
"""
If a previous call to get_resource_filename has saved the file, but
the file has been subsequently mutated with different file of the
same size and modification time, it should not be overwritten on a
subsequent call to get_resource_filename.
"""
import mod
manager = pkg_resources.ResourceManager()
zp = pkg_resources.ZipProvider(mod)
filename = zp.get_resource_filename(manager, 'data.dat')
actual = datetime.datetime.fromtimestamp(os.stat(filename).st_mtime)
assert actual == self.ref_time
f = open(filename, 'w')
f.write('hello, world?')
f.close()
ts = timestamp(self.ref_time)
os.utime(filename, (ts, ts))
filename = zp.get_resource_filename(manager, 'data.dat')
f = open(filename)
assert f.read() == 'hello, world!'
manager.cleanup_resources()
class TestResourceManager(object):
def test_get_cache_path(self):
mgr = pkg_resources.ResourceManager()
path = mgr.get_cache_path('foo')
type_ = str(type(path))
message = "Unexpected type from get_cache_path: " + type_
assert isinstance(path, (unicode, str)), message
class TestIndependence:
"""
Tests to ensure that pkg_resources runs independently from setuptools.
"""
def test_setuptools_not_imported(self):
"""
In a separate Python environment, import pkg_resources and assert
that action doesn't cause setuptools to be imported.
"""
lines = (
'import pkg_resources',
'import sys',
'assert "setuptools" not in sys.modules, '
'"setuptools was imported"',
)
cmd = [sys.executable, '-c', '; '.join(lines)]
subprocess.check_call(cmd)
| mit |
kod3r/pyNES | pynes/cartridge.py | 28 | 2314 | class Cartridge:
def __init__(self):
self.banks = {}
self.bank_id = 0
self.pc = 0
self.inespgr = 1
self.ineschr = 1
self.inesmap = 1
self.inesmir = 1
self.rs = 0
self.path = ''
def nes_id(self):
# NES
return [0x4e, 0x45, 0x53, 0x1a]
def nes_get_header(self):
id = self.nes_id()
unused = [0, 0, 0, 0, 0, 0, 0, 0]
header = []
header.extend(id)
header.append(self.inespgr)
header.append(self.ineschr)
header.append(self.inesmir)
header.append(self.inesmap)
header.extend(unused)
return header
def set_iNES_prg(self, inespgr):
self.inespgr = inespgr
def set_iNES_chr(self, ineschr):
self.ineschr = ineschr
def set_iNES_map(self, inesmap):
self.inesmap = inesmap
def set_iNES_mir(self, inesmir):
self.inesmir = inesmir
def set_bank_id(self, id):
if id not in self.banks:
self.banks[id] = dict(code=[], start=None, size=(1024 * 8))
self.bank_id = id
def set_org(self, org):
if self.bank_id not in self.banks:
self.set_bank_id(self.bank_id)
if not self.banks[self.bank_id]['start']:
self.banks[self.bank_id]['start'] = org
self.pc = org
else:
while self.pc < org:
self.append_code([0xff])
self.pc = org
def append_code(self, code):
if self.bank_id not in self.banks:
self.set_bank_id(self.bank_id)
for c in code:
assert c <= 0xff
self.banks[self.bank_id]['code'].extend(code)
self.pc += len(code)
def get_code(self):
if self.bank_id not in self.banks:
self.set_bank_id(self.bank_id)
return self.banks[self.bank_id]['code']
def get_ines_code(self):
if self.bank_id not in self.banks:
self.set_bank_id(self.bank_id)
bin = []
nes_header = self.nes_get_header()
bin.extend(nes_header)
for i in self.banks:
for j in range(len(self.banks[i]['code']), self.banks[i]['size']):
self.banks[i]['code'].append(0xff)
bin.extend(self.banks[i]['code'])
return bin
| bsd-3-clause |
toontownfunserver/Panda3D-1.9.0 | python/Lib/email/mime/multipart.py | 480 | 1573 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Base class for MIME multipart/* type messages."""
__all__ = ['MIMEMultipart']
from email.mime.base import MIMEBase
class MIMEMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
**_params):
"""Creates a multipart/* type message.
By default, creates a multipart/mixed message, with proper
Content-Type and MIME-Version headers.
_subtype is the subtype of the multipart content type, defaulting to
`mixed'.
boundary is the multipart boundary string. By default it is
calculated as needed.
_subparts is a sequence of initial subparts for the payload. It
must be an iterable object, such as a list. You can always
attach new subparts to the message by using the attach() method.
Additional parameters for the Content-Type header are taken from the
keyword arguments (or passed into the _params argument).
"""
MIMEBase.__init__(self, 'multipart', _subtype, **_params)
# Initialise _payload to an empty list as the Message superclass's
# implementation of is_multipart assumes that _payload is a list for
# multipart messages.
self._payload = []
if _subparts:
for p in _subparts:
self.attach(p)
if boundary:
self.set_boundary(boundary)
| bsd-3-clause |
cristian99garcia/pilas-activity | pilas/video/video.py | 7 | 3167 | # -*- encoding: utf-8 -*-
'''
import pilas
try:
import opencv
from opencv import highgui
except ImportError:
opencv = None
import os
try:
from PySFML import sf
except ImportError:
pass
class MissingOpencv(Exception):
def __init__(self):
self.value = "Open CV no esta instalado, obtengalo en http://opencv.willowgarage.com"
def __str__(self):
return repr(self.value)
def error(biblioteca, web):
print "Error, no ecuentra la biblioteca '%s' (de %s)" %(biblioteca, web)
def no_opencv():
from pilas.utils import esta_en_sesion_interactiva
if esta_en_sesion_interactiva():
error('opencv', 'http://opencv.willowgarage.com')
else:
raise MissingOpencv()
class DeCamara(pilas.actores.Actor):
"""
Nos permite poner en pantalla el video proveniente de la camara web.
"""
def __init__(self, ancho=640, alto=480):
if opencv is None:
no_opencv()
return
import webcam
self.camara = webcam.CamaraWeb
self.ultimo_numero_de_cuadro = 0
pilas.actores.Actor.__init__(self, 'fondos/pasto.png')
pilas.mundo.agregar_tarea_siempre(0.15,self.actualizar_video)
def actualizar_video(self):
cuadro, numero_de_cuadro = self.camara.obtener_imagen(self.ultimo_numero_de_cuadro)
self.ultimo_numero_de_cuadro = numero_de_cuadro
self.imagen.LoadFromPixels(640, 480, cuadro)
return True
class VideoDeArchivo(object):
def __init__(self, ruta):
if opencv is None:
no_opencv()
return
if not os.path.isfile(ruta):
raise IOError('El archiyo no existe')
self._camara = highgui.cvCreateFileCapture(ruta)
self.fps = highgui.cvGetCaptureProperty(self._camara, highgui.CV_CAP_PROP_FPS)
self.altura = highgui.cvGetCaptureProperty(self._camara, highgui.CV_CAP_PROP_FRAME_HEIGHT)
self.ancho =highgui.cvGetCaptureProperty(self._camara, highgui.CV_CAP_PROP_FRAME_WIDTH)
super(VideoDeArchivo, self).__init__()
def obtener_imagen(self):
imagen_ipl = highgui.cvQueryFrame(self._camara)
imagen_ipl = opencv.cvGetMat(imagen_ipl)
return opencv.adaptors.Ipl2PIL(imagen_ipl).convert('RGBA').tostring()
class DePelicula(pilas.actores.Actor):
"""
Nos permite poner en pantalla un video desde un archivo.
Toma como parametro la ruta del video.
"""
def __init__(self, path, ancho=640, alto=480):
self._camara = VideoDeArchivo(path)
pilas.actores.Actor.__init__(self)
self._altura_cuadro = self._camara.altura
self._ancho_cuadro = self._camara.ancho
subrect = self._actor.GetSubRect()
subrect.Right = self._ancho_cuadro
subrect.Bottom = self._altura_cuadro
self._actor.SetSubRect(subrect)
self.centro = ('centro', 'centro')
pilas.mundo.agregar_tarea_siempre(1/self._camara.fps,self.actualizar_video)
def actualizar_video(self):
self.imagen.LoadFromPixels(self._ancho_cuadro, self._altura_cuadro, self._camara.obtener_imagen())
return True
'''
| gpl-3.0 |
labcodes/django | django/db/models/options.py | 6 | 34598 | import copy
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db import connections
from django.db.models import Manager
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'indexes',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
if not field.remote_field.parent_link:
raise ImproperlyConfigured(
'Add parent_link=True to %s.' % field,
)
else:
auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (self.app_label, self.model_name)
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
related_objects_graph[f.remote_field.model._meta.concrete_model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if getattr(obj, 'parent_link', False) and obj.model != self.concrete_model:
continue
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.private_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@property
def has_auto_field(self):
warnings.warn(
'Model._meta.has_auto_field is deprecated in favor of checking if '
'Model._meta.auto_field is not None.',
RemovedInDjango21Warning, stacklevel=2
)
return self.auto_field is not None
@has_auto_field.setter
def has_auto_field(self, value):
pass
@cached_property
def _property_names(self):
"""
Return a set of the names of the properties defined on the model.
Internal helper for model initialization.
"""
return frozenset({
attr for attr in
dir(self.model) if isinstance(getattr(self.model, attr), property)
})
| bsd-3-clause |
GrimDerp/httpie | tests/test_sessions.py | 42 | 6319 | # coding=utf-8
import os
import shutil
import sys
import pytest
from httpie.plugins.builtin import HTTPBasicAuth
from utils import TestEnvironment, mk_config_dir, http, HTTP_OK, \
no_content_type
from fixtures import UNICODE
class SessionTestBase(object):
def start_session(self, httpbin):
"""Create and reuse a unique config dir for each test."""
self.config_dir = mk_config_dir()
def teardown_method(self, method):
shutil.rmtree(self.config_dir)
def env(self):
"""
Return an environment.
Each environment created withing a test method
will share the same config_dir. It is necessary
for session files being reused.
"""
return TestEnvironment(config_dir=self.config_dir)
class TestSessionFlow(SessionTestBase):
"""
These tests start with an existing session created in `setup_method()`.
"""
def start_session(self, httpbin):
"""
Start a full-blown session with a custom request header,
authorization, and response cookies.
"""
super(TestSessionFlow, self).start_session(httpbin)
r1 = http('--follow', '--session=test', '--auth=username:password',
'GET', httpbin.url + '/cookies/set?hello=world',
'Hello:World',
env=self.env())
assert HTTP_OK in r1
def test_session_created_and_reused(self, httpbin):
self.start_session(httpbin)
# Verify that the session created in setup_method() has been used.
r2 = http('--session=test',
'GET', httpbin.url + '/get', env=self.env())
assert HTTP_OK in r2
assert r2.json['headers']['Hello'] == 'World'
assert r2.json['headers']['Cookie'] == 'hello=world'
assert 'Basic ' in r2.json['headers']['Authorization']
def test_session_update(self, httpbin):
self.start_session(httpbin)
# Get a response to a request from the original session.
r2 = http('--session=test', 'GET', httpbin.url + '/get', env=self.env())
assert HTTP_OK in r2
# Make a request modifying the session data.
r3 = http('--follow', '--session=test', '--auth=username:password2',
'GET', httpbin.url + '/cookies/set?hello=world2', 'Hello:World2',
env=self.env())
assert HTTP_OK in r3
# Get a response to a request from the updated session.
r4 = http('--session=test', 'GET', httpbin.url + '/get', env=self.env())
assert HTTP_OK in r4
assert r4.json['headers']['Hello'] == 'World2'
assert r4.json['headers']['Cookie'] == 'hello=world2'
assert (r2.json['headers']['Authorization'] !=
r4.json['headers']['Authorization'])
def test_session_read_only(self, httpbin):
self.start_session(httpbin)
# Get a response from the original session.
r2 = http('--session=test', 'GET', httpbin.url + '/get', env=self.env())
assert HTTP_OK in r2
# Make a request modifying the session data but
# with --session-read-only.
r3 = http('--follow', '--session-read-only=test',
'--auth=username:password2', 'GET',
httpbin.url + '/cookies/set?hello=world2', 'Hello:World2',
env=self.env())
assert HTTP_OK in r3
# Get a response from the updated session.
r4 = http('--session=test', 'GET', httpbin.url + '/get', env=self.env())
assert HTTP_OK in r4
# Origin can differ on Travis.
del r2.json['origin'], r4.json['origin']
# Different for each request.
# Should be the same as before r3.
assert r2.json == r4.json
class TestSession(SessionTestBase):
"""Stand-alone session tests."""
def test_session_ignored_header_prefixes(self, httpbin):
self.start_session(httpbin)
r1 = http('--session=test', 'GET', httpbin.url + '/get',
'Content-Type: text/plain',
'If-Unmodified-Since: Sat, 29 Oct 1994 19:43:31 GMT',
env=self.env())
assert HTTP_OK in r1
r2 = http('--session=test', 'GET', httpbin.url + '/get', env=self.env())
assert HTTP_OK in r2
assert no_content_type(r2.json['headers'])
assert 'If-Unmodified-Since' not in r2.json['headers']
def test_session_by_path(self, httpbin):
self.start_session(httpbin)
session_path = os.path.join(self.config_dir, 'session-by-path.json')
r1 = http('--session=' + session_path, 'GET', httpbin.url + '/get',
'Foo:Bar', env=self.env())
assert HTTP_OK in r1
r2 = http('--session=' + session_path, 'GET', httpbin.url + '/get',
env=self.env())
assert HTTP_OK in r2
assert r2.json['headers']['Foo'] == 'Bar'
@pytest.mark.skipif(
sys.version_info >= (3,),
reason="This test fails intermittently on Python 3 - "
"see https://github.com/jkbrzt/httpie/issues/282")
def test_session_unicode(self, httpbin):
self.start_session(httpbin)
r1 = http('--session=test', u'--auth=test:' + UNICODE,
'GET', httpbin.url + '/get', u'Test:%s' % UNICODE,
env=self.env())
assert HTTP_OK in r1
r2 = http('--session=test', '--verbose', 'GET',
httpbin.url + '/get', env=self.env())
assert HTTP_OK in r2
# FIXME: Authorization *sometimes* is not present on Python3
assert (r2.json['headers']['Authorization']
== HTTPBasicAuth.make_header(u'test', UNICODE))
# httpbin doesn't interpret utf8 headers
assert UNICODE in r2
def test_session_default_header_value_overwritten(self, httpbin):
self.start_session(httpbin)
# https://github.com/jkbrzt/httpie/issues/180
r1 = http('--session=test',
httpbin.url + '/headers', 'User-Agent:custom',
env=self.env())
assert HTTP_OK in r1
assert r1.json['headers']['User-Agent'] == 'custom'
r2 = http('--session=test', httpbin.url + '/headers', env=self.env())
assert HTTP_OK in r2
assert r2.json['headers']['User-Agent'] == 'custom'
| bsd-3-clause |
vebin/Wox | PythonHome/Lib/site-packages/pip/vcs/subversion.py | 473 | 10640 | import os
import re
from pip.backwardcompat import urlparse
from pip.index import Link
from pip.util import rmtree, display_path, call_subprocess
from pip.log import logger
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
bundle_file = 'svn-checkout.txt'
guide = ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %(rev)s %(url)s .\n')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
output = call_subprocess(
[self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, None
return url, match.group(1)
def parse_vcs_bundle_file(self, content):
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^-r\s*([^ ])?', line)
if not match:
return None, None
rev = match.group(1)
rest = line[match.end():].strip().split(None, 1)[0]
return rest, rev
return None, None
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
logger.notify('Exporting svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
[self.cmd, 'export'] + rev_options + [url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
from pip.exceptions import InstallationError
f = open(os.path.join(location, self.dirname, 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = call_subprocess([self.cmd, 'info', '--xml', location], show_stdout=False)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
[self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urlparse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
| mit |
0x0all/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 23 | 5540 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
"""Check binomial deviance loss.
Check against alternative definitions in ESLII.
"""
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
"""Check log odds estimator. """
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
"""Smoke test for init estimators with sample weights. """
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
"""Test if deviance supports sample weights. """
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
brandonPurvis/osf.io | api/users/views.py | 3 | 19716 | from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework.exceptions import NotAuthenticated
from django.contrib.auth.models import AnonymousUser
from modularodm import Q
from framework.auth.core import Auth
from framework.auth.oauth_scopes import CoreScopes
from website.models import User, Node
from api.base import permissions as base_permissions
from api.base.utils import get_object_or_error
from api.base.exceptions import Conflict
from api.base.views import JSONAPIBaseView
from api.base.filters import ODMFilterMixin
from api.base.parsers import JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON
from api.nodes.serializers import NodeSerializer
from api.institutions.serializers import InstitutionSerializer
from api.registrations.serializers import RegistrationSerializer
from .serializers import UserSerializer, UserDetailSerializer, UserInstitutionsRelationshipSerializer
from .permissions import ReadOnlyOrCurrentUser, ReadOnlyOrCurrentUserRelationship
class UserMixin(object):
"""Mixin with convenience methods for retrieving the current node based on the
current URL. By default, fetches the user based on the user_id kwarg.
"""
serializer_class = UserSerializer
user_lookup_url_kwarg = 'user_id'
def get_user(self, check_permissions=True):
key = self.kwargs[self.user_lookup_url_kwarg]
current_user = self.request.user
if key == 'me':
if isinstance(current_user, AnonymousUser):
raise NotAuthenticated
else:
return self.request.user
obj = get_object_or_error(User, key, 'user')
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class UserList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin):
"""List of users registered on the OSF. *Read-only*.
Paginated list of users ordered by the date they registered. Each resource contains the full representation of the
user, meaning additional requests to an individual user's detail view are not necessary.
Note that if an anonymous view_only key is being used, user information will not be serialized, and the id will be
an empty string. Relationships to a user object will not show in this case, either.
The subroute [`/me/`](me/) is a special endpoint that always points to the currently logged-in user.
##User Attributes
<!--- Copied Attributes From UserDetail -->
OSF User entities have the "users" `type`.
name type description
----------------------------------------------------------------------------------------
full_name string full name of the user; used for display
given_name string given name of the user; for bibliographic citations
middle_names string middle name of user; for bibliographic citations
family_name string family name of user; for bibliographic citations
suffix string suffix of user's name for bibliographic citations
date_registered iso8601 timestamp timestamp when the user's account was created
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Users may be filtered by their `id`, `full_name`, `given_name`, `middle_names`, or `family_name`.
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` of the user entities so that it points to
the user's profile image scaled to the given size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.USERS_READ]
required_write_scopes = [CoreScopes.USERS_WRITE]
serializer_class = UserSerializer
ordering = ('-date_registered')
view_category = 'users'
view_name = 'user-list'
# overrides ODMFilterMixin
def get_default_odm_query(self):
return (
Q('is_registered', 'eq', True) &
Q('is_merged', 'ne', True) &
Q('date_disabled', 'eq', None)
)
# overrides ListAPIView
def get_queryset(self):
# TODO: sort
query = self.get_query_from_request()
return User.find(query)
class UserDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, UserMixin):
"""Details about a specific user. *Writeable*.
The User Detail endpoint retrieves information about the user whose id is the final part of the path. If `me`
is given as the id, the record of the currently logged-in user will be returned. The returned information includes
the user's bibliographic information and the date the user registered.
Note that if an anonymous view_only key is being used, user information will not be serialized, and the id will be
an empty string. Relationships to a user object will not show in this case, either.
##Attributes
OSF User entities have the "users" `type`.
name type description
----------------------------------------------------------------------------------------
full_name string full name of the user; used for display
given_name string given name of the user; for bibliographic citations
middle_names string middle name of user; for bibliographic citations
family_name string family name of user; for bibliographic citations
suffix string suffix of user's name for bibliographic citations
date_registered iso8601 timestamp timestamp when the user's account was created
##Relationships
###Nodes
A list of all nodes the user has contributed to. If the user id in the path is the same as the logged-in user, all
nodes will be visible. Otherwise, you will only be able to see the other user's publicly-visible nodes.
##Links
self: the canonical api endpoint of this user
html: this user's page on the OSF website
profile_image_url: a url to the user's profile image
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "users", # required
"id": {user_id}, # required
"attributes": {
"full_name": {full_name}, # mandatory
"given_name": {given_name}, # optional
"middle_names": {middle_names}, # optional
"family_name": {family_name}, # optional
"suffix": {suffix} # optional
}
}
}
Success: 200 OK + node representation
To update your user profile, issue a PUT request to either the canonical URL of your user resource (as given in
`/links/self`) or to `/users/me/`. Only the `full_name` attribute is required. Unlike at signup, the given, middle,
and family names will not be inferred from the `full_name`. Currently, only `full_name`, `given_name`,
`middle_names`, `family_name`, and `suffix` are updateable.
A PATCH request issued to this endpoint will behave the same as a PUT request, but does not require `full_name` to
be set.
**NB:** If you PUT/PATCH to the `/users/me/` endpoint, you must still provide your full user id in the `id` field of
the request. We do not support using the `me` alias in request bodies at this time.
##Query Params
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` so that it points the image scaled to the given
size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyOrCurrentUser,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.USERS_READ]
required_write_scopes = [CoreScopes.USERS_WRITE]
view_category = 'users'
view_name = 'user-detail'
serializer_class = UserDetailSerializer
# overrides RetrieveAPIView
def get_object(self):
return self.get_user()
# overrides RetrieveUpdateAPIView
def get_serializer_context(self):
# Serializer needs the request in order to make an update to privacy
context = JSONAPIBaseView.get_serializer_context(self)
context['request'] = self.request
return context
class UserNodes(JSONAPIBaseView, generics.ListAPIView, UserMixin, ODMFilterMixin):
"""List of nodes that the user contributes to. *Read-only*.
Paginated list of nodes that the user contributes to. Each resource contains the full representation of the node,
meaning additional requests to an individual node's detail view are not necessary. If the user id in the path is the
same as the logged-in user, all nodes will be visible. Otherwise, you will only be able to see the other user's
publicly-visible nodes. The special user id `me` can be used to represent the currently logged-in user.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
---------------------------------------------------------------------------------
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
fork boolean is this project a fork?
registration boolean has this project been registered?
fork boolean is this node a fork of another node?
dashboard boolean is this node visible on the user dashboard?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.USERS_READ, CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.USERS_WRITE, CoreScopes.NODE_BASE_WRITE]
serializer_class = NodeSerializer
view_category = 'users'
view_name = 'user-nodes'
# overrides ODMFilterMixin
def get_default_odm_query(self):
user = self.get_user()
return (
Q('contributors', 'eq', user) &
Q('is_folder', 'ne', True) &
Q('is_deleted', 'ne', True)
)
# overrides ListAPIView
def get_queryset(self):
current_user = self.request.user
if current_user.is_anonymous():
auth = Auth(None)
else:
auth = Auth(current_user)
query = self.get_query_from_request()
raw_nodes = Node.find(self.get_default_odm_query() & query)
nodes = [each for each in raw_nodes if each.is_public or each.can_view(auth)]
return nodes
class UserInstitutions(JSONAPIBaseView, generics.ListAPIView, UserMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.USERS_READ, CoreScopes.INSTITUTION_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = InstitutionSerializer
view_category = 'users'
view_name = 'user-institutions'
def get_default_odm_query(self):
return None
def get_queryset(self):
user = self.get_user()
return user.affiliated_institutions
class UserRegistrations(UserNodes):
"""List of registrations that the user contributes to. *Read-only*.
Paginated list of registrations that the user contributes to. Each resource contains the full representation of the
registration, meaning additional requests to an individual registration's detail view are not necessary. If the user
id in the path is the same as the logged-in user, all nodes will be visible. Otherwise, you will only be able to
see the other user's publicly-visible nodes. The special user id `me` can be used to represent the currently
logged-in user. Retracted registrations will display a limited number of fields, namely, title, description,
date_created, registration, retracted, date_registered, retraction_justification, and registration supplement.
##Registration Attributes
<!--- Copied Attributes from RegistrationList -->
Registrations have the "registrations" `type`.
name type description
-------------------------------------------------------------------------------------------------------
title string title of the registered project or component
description string description of the registered node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
fork boolean is this project a fork?
registration boolean has this project been registered?
dashboard boolean is this registered node visible on the user dashboard?
public boolean has this registration been made publicly-visible?
retracted boolean has this registration been retracted?
date_registered iso8601 timestamp timestamp that the registration was created
retraction_justification string reasons for retracting the registration
pending_retraction boolean is this registration pending retraction?
pending_registration_approval boolean is this registration pending approval?
pending_embargo boolean is this registration pending an embargo?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Relationships
###Registered from
The registration is branched from this node.
###Registered by
The registration was initiated by this user.
###Other Relationships
See documentation on registered_from detail view. A registration has many of the same properties as a node.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Registrations may be filtered by their `title`, `category`, `description`, `public`, or `tags`. `title`, `description`,
and `category` are string fields and will be filtered using simple substring matching. `public` is a boolean and
can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true` or `false` in
the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
required_read_scopes = [CoreScopes.USERS_READ, CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.USERS_WRITE, CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationSerializer
view_category = 'users'
view_name = 'user-registrations'
# overrides ODMFilterMixin
def get_default_odm_query(self):
user = self.get_user()
return (
Q('contributors', 'eq', user) &
Q('is_folder', 'ne', True) &
Q('is_deleted', 'ne', True) &
Q('is_registration', 'eq', True)
)
class UserInstitutionsRelationship(JSONAPIBaseView, generics.RetrieveDestroyAPIView, UserMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyOrCurrentUserRelationship
)
required_read_scopes = [CoreScopes.USERS_READ]
required_write_scopes = [CoreScopes.USERS_WRITE]
serializer_class = UserInstitutionsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
view_category = 'users'
view_name = 'user-institutions-relationship'
def get_object(self):
user = self.get_user(check_permissions=False)
obj = {
'data': user.affiliated_institutions,
'self': user
}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
user = self.request.user
current_institutions = {inst._id for inst in user.affiliated_institutions}
# DELETEs normally dont get type checked
# not the best way to do it, should be enforced everywhere, maybe write a test for it
for val in data:
if val['type'] != self.serializer_class.Meta.type_:
raise Conflict()
for val in data:
if val['id'] in current_institutions:
user.remove_institution(val['id'])
user.save()
| apache-2.0 |
MrSenko/Kiwi | tcms/testcases/migrations/0001_initial.py | 2 | 22715 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.db import migrations, models
import tcms.core.models.base
test_case_statuss = ["PROPOSED", "CONFIRMED", "DISABLED", "NEED_UPDATE"]
CASE_STATUS_ID_COLUMN = "case_status_id"
CATEGORY_ID_COLUMN = "category_id"
if settings.DATABASES["default"]["ENGINE"].find("sqlite") > -1:
CASE_STATUS_ID_COLUMN = ""
CATEGORY_ID_COLUMN = ""
def forwards_add_initial_data(apps, schema_editor):
bug_system_model = apps.get_model("testcases", "BugSystem")
bug_system_model.objects.bulk_create(
[
bug_system_model(
name="Bugzilla",
description="1-7 digit, e.g. 1001234",
url_reg_exp="https://bugzilla.example.com/show_bug.cgi?id=%s",
validate_reg_exp=r"^\d{1,7}$",
),
bug_system_model(
name="JIRA",
description="e.g. KIWI-222",
url_reg_exp="https://jira.example.com/browse/%s",
validate_reg_exp=r"^[A-Z0-9]+-\d+$",
),
]
)
test_case_status_model = apps.get_model("testcases", "TestCaseStatus")
test_case_status_model.objects.bulk_create(
[
test_case_status_model(name=name, description="")
for name in test_case_statuss
]
)
def reverse_add_initial_data(apps, schema_editor):
bug_system_model = apps.get_model("testcases", "BugSystem")
bug_system_model.objects.filter(name__in=["Bugzilla", "JIRA"]).delete()
test_case_status_model = apps.get_model("testcases", "TestCaseStatus")
test_case_status_model.objects.filter(name__in=test_case_statuss).delete()
class Migration(migrations.Migration):
dependencies = [
("management", "0003_squashed"),
("testplans", "0005_squashed"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="TestCaseStatus",
fields=[
(
"id",
models.AutoField(
max_length=6,
serialize=False,
primary_key=True,
db_column=CASE_STATUS_ID_COLUMN,
),
),
("name", models.CharField(max_length=255)),
("description", models.TextField(null=True, blank=True)),
],
options={
"verbose_name": "Test case status",
"verbose_name_plural": "Test case statuses",
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.AutoField(
serialize=False, primary_key=True, db_column=CATEGORY_ID_COLUMN
),
),
("name", models.CharField(max_length=255)),
("description", models.TextField(blank=True)),
(
"product",
models.ForeignKey(
related_name="category",
to="management.Product",
on_delete=models.CASCADE,
),
),
],
options={
"verbose_name_plural": "test case categories",
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.CreateModel(
name="TestCase",
fields=[
("case_id", models.AutoField(serialize=False, primary_key=True)),
(
"create_date",
models.DateTimeField(auto_now_add=True, db_column="creation_date"),
),
(
"is_automated",
models.IntegerField(default=0, db_column="isautomated"),
),
("is_automated_proposed", models.BooleanField(default=False)),
("script", models.TextField(blank=True, null=True)),
("arguments", models.TextField(blank=True, null=True)),
(
"extra_link",
models.CharField(
default=None, max_length=1024, null=True, blank=True
),
),
("summary", models.CharField(max_length=255)),
(
"requirement",
models.CharField(max_length=255, blank=True, null=True),
),
("alias", models.CharField(max_length=255, blank=True)),
("notes", models.TextField(blank=True, null=True)),
],
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.AddField(
model_name="testcase",
name="author",
field=models.ForeignKey(
on_delete=models.deletion.CASCADE,
related_name="cases_as_author",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="testcase",
name="case_status",
field=models.ForeignKey(
on_delete=models.deletion.CASCADE, to="testcases.TestCaseStatus"
),
),
migrations.AddField(
model_name="testcase",
name="category",
field=models.ForeignKey(
on_delete=models.deletion.CASCADE,
related_name="category_case",
to="testcases.Category",
),
),
migrations.CreateModel(
name="TestCaseComponent",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"case",
models.ForeignKey(
to="testcases.TestCase", on_delete=models.CASCADE
),
),
(
"component",
models.ForeignKey(
to="management.Component", on_delete=models.CASCADE
),
),
],
),
migrations.AddField(
model_name="testcase",
name="component",
field=models.ManyToManyField(
related_name="cases",
through="testcases.TestCaseComponent",
to="management.Component",
),
),
migrations.AddField(
model_name="testcase",
name="default_tester",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.CASCADE,
related_name="cases_as_default_tester",
to=settings.AUTH_USER_MODEL,
),
),
migrations.CreateModel(
name="TestCasePlan",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("sortkey", models.IntegerField(null=True, blank=True)),
(
"case",
models.ForeignKey(
to="testcases.TestCase", on_delete=models.CASCADE
),
),
(
"plan",
models.ForeignKey(
to="testplans.TestPlan", on_delete=models.CASCADE
),
),
],
),
migrations.AddField(
model_name="testcase",
name="plan",
field=models.ManyToManyField(
related_name="case",
through="testcases.TestCasePlan",
to="testplans.TestPlan",
),
),
migrations.AddField(
model_name="testcase",
name="priority",
field=models.ForeignKey(
on_delete=models.deletion.CASCADE,
related_name="priority_case",
to="management.Priority",
),
),
migrations.AddField(
model_name="testcase",
name="reviewer",
field=models.ForeignKey(
null=True,
on_delete=models.deletion.CASCADE,
related_name="cases_as_reviewer",
to=settings.AUTH_USER_MODEL,
),
),
migrations.CreateModel(
name="TestCaseTag",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"case",
models.ForeignKey(
to="testcases.TestCase", on_delete=models.CASCADE
),
),
(
"tag",
models.ForeignKey(to="management.Tag", on_delete=models.CASCADE),
),
],
),
migrations.AddField(
model_name="testcase",
name="tag",
field=models.ManyToManyField(
related_name="case",
through="testcases.TestCaseTag",
to="management.Tag",
),
),
migrations.CreateModel(
name="HistoricalTestCase",
fields=[
("case_id", models.IntegerField(blank=True, db_index=True)),
(
"create_date",
models.DateTimeField(
blank=True, db_column="creation_date", editable=False
),
),
(
"is_automated",
models.IntegerField(db_column="isautomated", default=0),
),
("is_automated_proposed", models.BooleanField(default=False)),
("script", models.TextField(blank=True, null=True)),
("arguments", models.TextField(blank=True, null=True)),
(
"extra_link",
models.CharField(
blank=True, default=None, max_length=1024, null=True
),
),
("summary", models.CharField(max_length=255)),
(
"requirement",
models.CharField(blank=True, max_length=255, null=True),
),
("alias", models.CharField(blank=True, max_length=255)),
("notes", models.TextField(blank=True, null=True)),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_change_reason", models.TextField(null=True)),
("history_date", models.DateTimeField()),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"author",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=models.deletion.DO_NOTHING,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"case_status",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=models.deletion.DO_NOTHING,
related_name="+",
to="testcases.TestCaseStatus",
),
),
(
"category",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=models.deletion.DO_NOTHING,
related_name="+",
to="testcases.Category",
),
),
(
"default_tester",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=models.deletion.DO_NOTHING,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"priority",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=models.deletion.DO_NOTHING,
related_name="+",
to="management.Priority",
),
),
(
"reviewer",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=models.deletion.DO_NOTHING,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical test case",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
),
migrations.CreateModel(
name="Bug",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("bug_id", models.CharField(max_length=25)),
("summary", models.CharField(max_length=255, null=True, blank=True)),
("description", models.TextField(null=True, blank=True)),
(
"case",
models.ForeignKey(
related_name="case_bug",
to="testcases.TestCase",
on_delete=models.CASCADE,
),
),
],
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.CreateModel(
name="BugSystem",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(max_length=255, unique=True)),
("description", models.TextField(blank=True)),
(
"url_reg_exp",
models.CharField(
max_length=8192,
help_text="A valid Python format string such as "
"http://bugs.example.com/%s",
verbose_name="URL format string",
),
),
(
"validate_reg_exp",
models.CharField(
max_length=128,
help_text="A valid JavaScript regular "
"expression such as ^\\d$",
verbose_name="RegExp for ID validation",
),
),
(
"api_url",
models.CharField(
blank=True,
max_length=1024,
null=True,
verbose_name="API URL",
help_text="This is the URL to which API requests "
"will be sent. Leave empty to disable!",
),
),
(
"api_password",
models.CharField(
blank=True,
max_length=256,
null=True,
verbose_name="API password or token",
),
),
(
"api_username",
models.CharField(
blank=True,
max_length=256,
null=True,
verbose_name="API username",
),
),
(
"tracker_type",
models.CharField(
default="IssueTrackerType",
max_length=128,
help_text="This determines how Kiwi TCMS "
"integrates with the IT system",
verbose_name="Type",
),
),
(
"base_url",
models.CharField(
max_length=1024,
null=True,
blank=True,
verbose_name="Base URL",
help_text="""Base URL, for example\
<strong>https://bugzilla.example.com</strong>!
Leave empty to disable!
""",
),
),
],
options={
"verbose_name": "Bug tracker",
"verbose_name_plural": "Bug trackers",
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.AddField(
model_name="bug",
name="bug_system",
field=models.ForeignKey(
default=1, to="testcases.BugSystem", on_delete=models.CASCADE
),
),
migrations.CreateModel(
name="TestCaseEmailSettings",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("notify_on_case_update", models.BooleanField(default=False)),
("notify_on_case_delete", models.BooleanField(default=False)),
("auto_to_case_author", models.BooleanField(default=False)),
("auto_to_case_tester", models.BooleanField(default=False)),
("auto_to_run_manager", models.BooleanField(default=False)),
("auto_to_run_tester", models.BooleanField(default=False)),
("auto_to_case_run_assignee", models.BooleanField(default=False)),
(
"case",
models.OneToOneField(
related_name="email_settings",
to="testcases.TestCase",
on_delete=models.CASCADE,
),
),
("cc_list", models.TextField(default="")),
],
),
migrations.CreateModel(
name="TestCaseText",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("case_text_version", models.IntegerField()),
(
"create_date",
models.DateTimeField(auto_now_add=True, db_column="creation_ts"),
),
("action", models.TextField(blank=True)),
("effect", models.TextField(blank=True)),
("setup", models.TextField(blank=True)),
("breakdown", models.TextField(blank=True)),
(
"author",
models.ForeignKey(
to=settings.AUTH_USER_MODEL,
db_column="who",
on_delete=models.CASCADE,
),
),
(
"case",
models.ForeignKey(
related_name="text",
to="testcases.TestCase",
on_delete=models.CASCADE,
),
),
],
options={
"ordering": ["case", "-case_text_version"],
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.AlterUniqueTogether(
name="testcasetext",
unique_together={("case", "case_text_version")},
),
migrations.AlterUniqueTogether(
name="testcaseplan",
unique_together={("plan", "case")},
),
migrations.AlterUniqueTogether(
name="category",
unique_together={("product", "name")},
),
migrations.RunPython(forwards_add_initial_data, reverse_add_initial_data),
]
| gpl-2.0 |
cogeorg/BlackRhino | networkx/algorithms/approximation/dominating_set.py | 7 | 4402 | # -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
"""Functions for finding node and edge dominating sets.
A *`dominating set`_[1] for an undirected graph *G* with vertex set *V*
and edge set *E* is a subset *D* of *V* such that every vertex not in
*D* is adjacent to at least one member of *D*. An *`edge dominating
set`_[2] is a subset *F* of *E* such that every edge not in *F* is
incident to an endpoint of at least one edge in *F*.
.. [1] dominating set: https://en.wikipedia.org/wiki/Dominating_set
.. [2] edge dominating set: https://en.wikipedia.org/wiki/Edge_dominating_set
"""
from __future__ import division
from ..matching import maximal_matching
from ...utils import not_implemented_for
__all__ = ["min_weighted_dominating_set",
"min_edge_dominating_set"]
__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
# TODO Why doesn't this algorithm work for directed graphs?
@not_implemented_for('directed')
def min_weighted_dominating_set(G, weight=None):
"""Returns a dominating set that approximates the minimum weight node
dominating set.
Parameters
----------
G : NetworkX graph
Undirected graph.
weight : string
The node attribute storing the weight of an edge. If provided,
the node attribute with this key must be a number for each
node. If not provided, each node is assumed to have weight one.
Returns
-------
min_weight_dominating_set : set
A set of nodes, the sum of whose weights is no more than `(\log
w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of
each node in the graph and `w(V^*)` denotes the sum of the
weights of each node in the minimum weight dominating set.
Notes
-----
This algorithm computes an approximate minimum weighted dominating
set for the graph `G`. The returned solution has weight `(\log
w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each
node in the graph and `w(V^*)` denotes the sum of the weights of
each node in the minimum weight dominating set for the graph.
This implementation of the algorithm runs in `O(m)` time, where `m`
is the number of edges in the graph.
References
----------
.. [1] Vazirani, Vijay V.
*Approximation Algorithms*.
Springer Science & Business Media, 2001.
"""
# The unique dominating set for the null graph is the empty set.
if len(G) == 0:
return set()
# This is the dominating set that will eventually be returned.
dom_set = set()
def _cost(node_and_neighborhood):
"""Returns the cost-effectiveness of greedily choosing the given
node.
`node_and_neighborhood` is a two-tuple comprising a node and its
closed neighborhood.
"""
v, neighborhood = node_and_neighborhood
return G.node[v].get(weight, 1) / len(neighborhood - dom_set)
# This is a set of all vertices not already covered by the
# dominating set.
vertices = set(G)
# This is a dictionary mapping each node to the closed neighborhood
# of that node.
neighborhoods = {v: {v} | set(G[v]) for v in G}
# Continue until all vertices are adjacent to some node in the
# dominating set.
while vertices:
# Find the most cost-effective node to add, along with its
# closed neighborhood.
dom_node, min_set = min(neighborhoods.items(), key=_cost)
# Add the node to the dominating set and reduce the remaining
# set of nodes to cover.
dom_set.add(dom_node)
del neighborhoods[dom_node]
vertices -= min_set
return dom_set
def min_edge_dominating_set(G):
r"""Return minimum cardinality edge dominating set.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
min_edge_dominating_set : set
Returns a set of dominating edges whose size is no more than 2 * OPT.
Notes
-----
The algorithm computes an approximate solution to the edge dominating set
problem. The result is no more than 2 * OPT in terms of size of the set.
Runtime of the algorithm is `O(|E|)`.
"""
if not G:
raise ValueError("Expected non-empty NetworkX graph!")
return maximal_matching(G)
| gpl-3.0 |
JavierGarciaD/AlgoRepo | quants/mac.py | 1 | 3528 | import datetime
import numpy as np
from backtest import Backtest
from data import HistoricCSVDataHandler
from event import SignalEvent
from execution import SimulatedExecutionHandler
from portfolio import Portfolio
from strategy import Strategy
class MovingAverageCrossStrategy(Strategy):
"""
Carries out a basic Moving Average Crossover strategy with a
short/long simple weighted moving average. Default short/long
windows are 100/400 periods respectively.
"""
def __init__(self, bars, events, short_window=100, long_window=400):
"""
Initialises the buy and hold strategy.
Parameters:
bars - The DataHandler object that provides bar information
events - The Event Queue object.
short_window - The short moving average lookback.
long_window - The long moving average lookback.
"""
self.bars = bars
self.symbol_list = self.bars.symbol_list
self.events = events
self.short_window = short_window
self.long_window = long_window
# Set to True if a symbol is in the market
self.bought = self._calculate_initial_bought()
def _calculate_initial_bought(self):
"""
Adds keys to the bought dictionary for all symbols
and sets them to 'OUT'.
"""
bought = {}
for s in self.symbol_list:
bought[s] = 'OUT'
return bought
def calculate_signals(self, event):
"""
Generates a new set of signals based on the MAC
SMA with the short window crossing the long window
meaning a long entry and vice versa for a short entry.
Parameters
event - A MarketEvent object.
"""
if event.type == 'MARKET':
for symbol in self.symbol_list:
bars = self.bars.get_latest_bars_values(symbol, "close", N=self.long_window)
if bars is not None and bars != []:
short_sma = np.mean(bars[-self.short_window:])
long_sma = np.mean(bars[-self.long_window:])
dt = self.bars.get_latest_bar_datetime(symbol)
sig_dir = ""
strength = 1.0
strategy_id = 1
if short_sma > long_sma and self.bought[symbol] == "OUT":
sig_dir = 'LONG'
signal = SignalEvent(strategy_id, symbol, dt, sig_dir, strength)
self.events.put(signal)
self.bought[symbol] = 'LONG'
elif short_sma < long_sma and self.bought[symbol] == "LONG":
sig_dir = 'EXIT'
signal = SignalEvent(strategy_id, symbol, dt, sig_dir, strength)
self.events.put(signal)
self.bought[symbol] = 'OUT'
if __name__ == "__main__":
csv_dir = 'C:/Users/javgar119/Documents/Python/Data/'
symbol_list = ['AAPL']
initial_capital = 100000.0
start_date = datetime.datetime(1990,1,1,0,0,0)
heartbeat = 0.0
backtest = Backtest(csv_dir,
symbol_list,
initial_capital,
heartbeat,
start_date,
HistoricCSVDataHandler,
SimulatedExecutionHandler,
Portfolio,
MovingAverageCrossStrategy)
backtest.simulate_trading()
| apache-2.0 |
citrix-openstack-build/nova | nova/tests/scheduler/test_rpcapi.py | 10 | 3484 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.scheduler.rpcapi
"""
from oslo.config import cfg
from nova import context
from nova.openstack.common import rpc
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
CONF = cfg.CONF
class SchedulerRpcAPITestCase(test.NoDBTestCase):
def _test_scheduler_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = scheduler_rpcapi.SchedulerAPI()
expected_retval = 'foo' if method == 'call' else None
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = expected_version
self.fake_args = None
self.fake_kwargs = None
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, CONF.scheduler_topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_run_instance(self):
self._test_scheduler_api('run_instance', rpc_method='cast',
request_spec='fake_request_spec',
admin_password='pw', injected_files='fake_injected_files',
requested_networks='fake_requested_networks',
is_first_time=True, filter_properties='fake_filter_properties',
legacy_bdm_in_spec=False, version='2.9')
def test_prep_resize(self):
self._test_scheduler_api('prep_resize', rpc_method='cast',
instance='fake_instance',
instance_type='fake_type', image='fake_image',
request_spec='fake_request_spec',
filter_properties='fake_props', reservations=list('fake_res'))
def test_update_service_capabilities(self):
self._test_scheduler_api('update_service_capabilities',
rpc_method='fanout_cast', service_name='fake_name',
host='fake_host', capabilities='fake_capabilities',
version='2.4')
def test_select_hosts(self):
self._test_scheduler_api('select_hosts', rpc_method='call',
request_spec='fake_request_spec',
filter_properties='fake_prop',
version='2.6')
def test_select_destinations(self):
self._test_scheduler_api('select_destinations', rpc_method='call',
request_spec='fake_request_spec',
filter_properties='fake_prop',
version='2.7')
| apache-2.0 |
alma-siwon/Solid_Kernel-GPROJ | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
ZuluPro/moto | tests/test_swf/responses/test_activity_tasks.py | 7 | 8550 | from boto.swf.exceptions import SWFResponseError
from freezegun import freeze_time
from moto import mock_swf_deprecated
from moto.swf import swf_backend
from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION
# PollForActivityTask endpoint
@mock_swf_deprecated
def test_poll_for_activity_task_when_one():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
resp = conn.poll_for_activity_task(
"test-domain", "activity-task-list", identity="surprise")
resp["activityId"].should.equal("my-activity-001")
resp["taskToken"].should_not.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted")
resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal(
{"identity": "surprise", "scheduledEventId": 5}
)
@mock_swf_deprecated
def test_poll_for_activity_task_when_none():
conn = setup_workflow()
resp = conn.poll_for_activity_task("test-domain", "activity-task-list")
resp.should.equal({"startedEventId": 0})
@mock_swf_deprecated
def test_poll_for_activity_task_on_non_existent_queue():
conn = setup_workflow()
resp = conn.poll_for_activity_task("test-domain", "non-existent-queue")
resp.should.equal({"startedEventId": 0})
# CountPendingActivityTasks endpoint
@mock_swf_deprecated
def test_count_pending_activity_tasks():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
resp = conn.count_pending_activity_tasks(
"test-domain", "activity-task-list")
resp.should.equal({"count": 1, "truncated": False})
@mock_swf_deprecated
def test_count_pending_decision_tasks_on_non_existent_task_list():
conn = setup_workflow()
resp = conn.count_pending_activity_tasks("test-domain", "non-existent")
resp.should.equal({"count": 0, "truncated": False})
# RespondActivityTaskCompleted endpoint
@mock_swf_deprecated
def test_respond_activity_task_completed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
resp = conn.respond_activity_task_completed(
activity_token, result="result of the task")
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted")
resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal(
{"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6}
)
@mock_swf_deprecated
def test_respond_activity_task_completed_on_closed_workflow_execution():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
# bad: we're closing workflow execution manually, but endpoints are not
# coded for now..
wfe = swf_backend.domains[0].workflow_executions[-1]
wfe.execution_status = "CLOSED"
# /bad
conn.respond_activity_task_completed.when.called_with(
activity_token
).should.throw(SWFResponseError, "WorkflowExecution=")
@mock_swf_deprecated
def test_respond_activity_task_completed_with_task_already_completed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
conn.respond_activity_task_completed(activity_token)
conn.respond_activity_task_completed.when.called_with(
activity_token
).should.throw(SWFResponseError, "Unknown activity, scheduledEventId = 5")
# RespondActivityTaskFailed endpoint
@mock_swf_deprecated
def test_respond_activity_task_failed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
resp = conn.respond_activity_task_failed(activity_token,
reason="short reason",
details="long details")
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed")
resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal(
{"reason": "short reason", "details": "long details",
"scheduledEventId": 5, "startedEventId": 6}
)
@mock_swf_deprecated
def test_respond_activity_task_completed_with_wrong_token():
# NB: we just test ONE failure case for RespondActivityTaskFailed
# because the safeguards are shared with RespondActivityTaskCompleted, so
# no need to retest everything end-to-end.
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
conn.poll_for_activity_task("test-domain", "activity-task-list")
conn.respond_activity_task_failed.when.called_with(
"not-a-correct-token"
).should.throw(SWFResponseError, "Invalid token")
# RecordActivityTaskHeartbeat endpoint
@mock_swf_deprecated
def test_record_activity_task_heartbeat():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
resp = conn.record_activity_task_heartbeat(activity_token)
resp.should.equal({"cancelRequested": False})
@mock_swf_deprecated
def test_record_activity_task_heartbeat_with_wrong_token():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
conn.record_activity_task_heartbeat.when.called_with(
"bad-token", details="some progress details"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task(
"test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(decision_token, decisions=[
SCHEDULE_ACTIVITY_TASK_DECISION
])
with freeze_time("2015-01-01 12:00:00"):
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list")["taskToken"]
conn.record_activity_task_heartbeat(
activity_token, details="some progress details")
with freeze_time("2015-01-01 12:05:30"):
# => Activity Task Heartbeat timeout reached!!
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234")
resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut")
attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"]
attrs["details"].should.equal("some progress details")
| apache-2.0 |
lepistone/server-tools | fetchmail_attach_from_folder/match_algorithm/openerp_standard.py | 51 | 2133 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .base import base
class openerp_standard(base):
'''No search at all. Use OpenERP's standard mechanism to attach mails to
mail.thread objects. Note that this algorithm always matches.'''
name = 'Odoo standard'
readonly_fields = [
'model_field',
'mail_field',
'match_first',
'domain',
'model_order',
'flag_nonmatching',
]
def search_matches(self, cr, uid, conf, mail_message, mail_message_org):
'''Always match. Duplicates will be fished out by message_id'''
return [True]
def handle_match(
self, cr, uid, connection, object_id, folder,
mail_message, mail_message_org, msgid, context):
result = folder.pool.get('mail.thread').message_process(
cr, uid,
folder.model_id.model, mail_message_org,
save_original=folder.server_id.original,
strip_attachments=(not folder.server_id.attach),
context=context
)
if folder.delete_matching:
connection.store(msgid, '+FLAGS', '\\DELETED')
return [result]
| agpl-3.0 |
TRox1972/youtube-dl | youtube_dl/extractor/azubu.py | 4 | 5200 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
sanitized_Request,
)
class AzubuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.(?:tv|uol.com.br)/[^/]+#!/play/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1',
'md5': 'a88b42fcf844f29ad6035054bd9ecaf4',
'info_dict': {
'id': '15575',
'ext': 'mp4',
'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1',
'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1417523507.334,
'upload_date': '20141202',
'duration': 9988.7,
'uploader': 'GSL',
'uploader_id': 414310,
'view_count': int,
},
},
{
'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-',
'md5': 'b72a871fe1d9f70bd7673769cdb3b925',
'info_dict': {
'id': '9344',
'ext': 'mp4',
'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"',
'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1410530893.320,
'upload_date': '20140912',
'duration': 172.385,
'uploader': 'FnaticTV',
'uploader_id': 272749,
'view_count': int,
},
'skip': 'Channel offline',
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
title = data['title'].strip()
description = data.get('description')
thumbnail = data.get('thumbnail')
view_count = data.get('view_count')
user = data.get('user', {})
uploader = user.get('username')
uploader_id = user.get('id')
stream_params = json.loads(data['stream_params'])
timestamp = float_or_none(stream_params.get('creationDate'), 1000)
duration = float_or_none(stream_params.get('length'), 1000)
renditions = stream_params.get('renditions') or []
video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
if video:
renditions.append(video)
if not renditions and not user.get('channel', {}).get('is_live', True):
raise ExtractorError('%s said: channel is offline.' % self.IE_NAME, expected=True)
formats = [{
'url': fmt['url'],
'width': fmt['frameWidth'],
'height': fmt['frameHeight'],
'vbr': float_or_none(fmt['encodingRate'], 1000),
'filesize': fmt['size'],
'vcodec': fmt['videoCodec'],
'container': fmt['videoContainer'],
} for fmt in renditions if fmt['url']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'view_count': view_count,
'formats': formats,
}
class AzubuLiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.(?:tv|uol.com.br)/(?P<id>[^/]+)$'
_TESTS = [{
'url': 'http://www.azubu.tv/MarsTVMDLen',
'only_matching': True,
}, {
'url': 'http://azubu.uol.com.br/adolfz',
'only_matching': True,
}]
def _real_extract(self, url):
user = self._match_id(url)
info = self._download_json(
'http://api.azubu.tv/public/modules/last-video/{0}/info'.format(user),
user)['data']
if info['type'] != 'STREAM':
raise ExtractorError('{0} is not streaming live'.format(user), expected=True)
req = sanitized_Request(
'https://edge-elb.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:' + info['reference_id'])
req.add_header('Accept', 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV')
bc_info = self._download_json(req, user)
m3u8_url = next(source['src'] for source in bc_info['sources'] if source['container'] == 'M2TS')
formats = self._extract_m3u8_formats(m3u8_url, user, ext='mp4')
self._sort_formats(formats)
return {
'id': info['id'],
'title': self._live_title(info['title']),
'uploader_id': user,
'formats': formats,
'is_live': True,
'thumbnail': bc_info['poster'],
}
| unlicense |
andela-cmutembei/tubuy | api/serializers.py | 1 | 2298 | from api.models.user import User
from api.models.commodity import Commodity
from api.models.contribution import Contribution
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for user model
"""
uuid = serializers.UUIDField(read_only=True, format='hex')
password = serializers.CharField(
style={'input_type': 'password'},
write_only=True,
required=True
)
class Meta:
model = User
fields = (
'url',
'uuid',
'username',
'password',
'phone_number',
'email',
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'}
}
def create(self, validated_data):
"""creates a user
"""
user = User(**validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class CommoditySerializer(serializers.ModelSerializer):
"""Serializer for commodity model
"""
uuid = serializers.UUIDField(read_only=True, format='hex')
requestor = serializers.ReadOnlyField(source='requestor.username')
price = serializers.DecimalField(max_digits=8, decimal_places=2)
commodity_qr = serializers.ImageField(use_url=True, read_only=True)
funded = serializers.BooleanField(read_only=True)
remaining_amount = serializers.DecimalField(
max_digits=8,
decimal_places=2,
read_only=True
)
class Meta:
model = Commodity
fields = (
'url',
'commodity_qr',
'uuid',
'name',
'description',
'requestor',
'funded',
'price',
'remaining_amount'
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'}
}
class ContributionSerializer(serializers.ModelSerializer):
"""Serializer for contribution model
"""
contributer = serializers.ReadOnlyField(source='contributer.username')
contributing_to = serializers.SlugRelatedField(
queryset=Commodity.objects.all(),
slug_field='name'
)
class Meta:
model = Contribution
fields = ('amount', 'contributer', 'contributing_to')
| mit |
westerncapelabs/promopuffin-core | web/test_promopuffin-core.py | 1 | 16697 | import unittest
from promopuffincore import main, accounts, campaigns, codes
import test_data
import json
class PromoPuffinCoreTestCase(unittest.TestCase):
def setUp(self):
# db_conf = {
# 'name': '',
# 'engine': 'SqliteDatabase',
# 'check_same_thread': False,
# }
# self.db_fd, db_conf["name"] = tempfile.mkstemp()
# main.app.config['DATABASE'] = db_conf
main.app.config['TESTING'] = True
accounts.accounts_data = dict(test_data.data_accounts_data)
campaigns.campaigns_data = dict(test_data.data_campaigns_data)
codes.codes_data = dict(test_data.data_campaigns_codes_data)
self.app = main.app.test_client()
def tearDown(self):
pass
# os.close(self.db_fd)
# os.unlink(main.app.config['DATABASE']['name'])
""" general tests """
def test_404_render(self):
rv = self.app.get('/kldjfljsdlkfjsdlkjfdslkj') # Should never validate!
assert '404' in rv.data # Should be the <title> of the page
def test_hello_world(self):
rv = self.app.get('/heartbeat')
assert 'Hello World!' in rv.data # Should be the <title> of the page
""" Accounts Tests """
def test_accounts_account_login_success(self):
rv = self.app.post("/accounts?auth=somekey", data=test_data.data_accounts_login_good)
account_data = json.loads(rv.data)
data = test_data.data_accounts_login_good
data['account_id'] = account_data['account_id']
rv = self.app.post('/accounts/login', data=data)
assert rv.status_code == 201
def test_accounts_account_login_fail(self):
rv = self.app.post("/accounts?auth=somekey", data=test_data.data_accounts_login_good)
account_data = json.loads(rv.data)
data = test_data.data_accounts_login_bad
data['account_id'] = account_data['account_id']
rv = self.app.post('/accounts/login', data=data)
assert rv.status_code == 401
assert "Unauthorized: Incorrect username and password match" in rv.data
def test_accounts_account_login_no_data(self):
rv = self.app.post("/accounts?auth=somekey", data=test_data.data_accounts_login_good)
account_data = json.loads(rv.data)
data = {
'account_id': account_data['account_id'],
}
rv = self.app.post('/accounts/login', data=data)
assert rv.status_code == 400
def test_accounts_list(self):
rv = self.app.get('/accounts?auth=somekey')
assert "user1@example.com" in rv.data
def test_accounts_add_new(self):
rv = self.app.post("/accounts?auth=somekey", data=test_data.data_accounts_post_good)
assert "mike+testpromopuffin@westerncapelabs.com" in rv.data
def test_accounts_account_found(self):
rv = self.app.get('/accounts/uuid_1?auth=somekey')
assert rv.status_code == 200
assert "user1@example.com" in rv.data
def test_accounts_account_not_found(self):
rv = self.app.get('/accounts/uuid_90j0j0j?auth=somekey')
assert rv.status_code == 404
def test_accounts_account_delete_found(self):
rv = self.app.delete('/accounts/uuid_1?auth=somekey')
assert rv.status_code == 204
rv = self.app.get('/accounts/uuid_1?auth=somekey')
assert rv.status_code == 404
def test_accounts_account_delete_not_found(self):
rv = self.app.delete('/accounts/uuid_342fhdjs41?auth=somekey')
assert rv.status_code == 404
def test_accounts_account_put_found(self):
rv = self.app.put('/accounts/uuid_1?auth=somekey', data=test_data.data_accounts_put_good)
assert rv.status_code == 201
rv = self.app.get('/accounts/uuid_1?auth=somekey')
assert rv.status_code == 200
assert "user1@example.com" in rv.data
def test_accounts_account_put_not_found(self):
rv = self.app.put('/accounts/uuid_4234jhkjhk4?auth=somekey', data=test_data.data_accounts_put_good)
assert rv.status_code == 404
def test_accounts_list_not_authenticated(self):
rv = self.app.get('/accounts?auth=some3424gegkey')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_accounts_account_post_not_authenticated(self):
rv = self.app.post('/accounts?auth=somedskfjslf', data=test_data.data_accounts_post_good)
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_accounts_account_post_no_data(self):
rv = self.app.post('/accounts?auth=somekey', data="")
assert rv.status_code == 400
def test_accounts_account_get_not_authenticated(self):
rv = self.app.get('/accounts/uuid_1?auth=somedskfjslf')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_accounts_account_put_not_authenticated(self):
rv = self.app.put('/accounts/uuid_1?auth=somedskfjslf', data=test_data.data_accounts_put_good)
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_accounts_account_delete_not_authenticated(self):
rv = self.app.delete('/accounts/uuid_1?auth=somedskfjslf')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_accounts_account_put_no_data(self):
rv = self.app.put('/accounts/uuid_1?auth=thisandthat', data="")
assert rv.status_code == 400
""" Campaigns Tests """
def test_campaigns_list(self):
rv = self.app.get('/campaigns?auth=somekey')
assert "Campaign3" in rv.data
def test_campaigns_add_new(self):
rv = self.app.post("/campaigns?auth=somekey", data=test_data.data_campaigns_post_good)
assert "OneTheWayCampaign" in rv.data
def test_campaigns_campaign_not_found(self):
rv = self.app.get('/campaigns/uuid_66hj768?auth=somekey')
assert rv.status_code == 404
def test_campaigns_campaign_found(self):
rv = self.app.get('/campaigns/uuid_1?auth=somekey')
assert rv.status_code == 200
assert "Campaign1" in rv.data
def test_campaigns_campaign_delete_found(self):
rv = self.app.delete('/campaigns/uuid_1?auth=somekey')
assert rv.status_code == 204
rv = self.app.get('/campaigns/uuid_1?auth=somekey')
assert rv.status_code == 404
def test_campaigns_campaign_delete_not_found(self):
rv = self.app.delete('/campaigns/uuid_342jh4khk231?auth=somekey')
assert rv.status_code == 404
def test_campaigns_campaign_put_found(self):
rv = self.app.put('/campaigns/uuid_1?auth=somekey', data=test_data.data_campaigns_put_good)
assert rv.status_code == 201
assert "2013-06-21T19:12:04.781462" in rv.data
rv = self.app.get('/campaigns/uuid_1?auth=somekey')
assert rv.status_code == 200
assert "OneTheWayCampaign" in rv.data
def test_campaigns_campaign_put_not_found(self):
rv = self.app.put('/campaigns/uuid_43420jkds21?auth=somekey', data=test_data.data_campaigns_put_good)
assert rv.status_code == 404
def test_campaigns_campaign_status_list_found(self):
rv = self.app.get('/campaigns/uuid_2/status?auth=somekey')
assert rv.status_code == 200
assert "running" in rv.data
def test_campaigns_campaign_status_list_not_found(self):
rv = self.app.get('/campaigns/uuid_3423jh2k1/status?auth=somekey')
assert rv.status_code == 404
def test_campaigns_campaign_status_update_found(self):
rv = self.app.post('/campaigns/uuid_1/status?auth=somekey', data=test_data.data_campaigns_status_post_good)
assert rv.status_code == 201
assert "halted" in rv.data
def test_campaigns_campaign_status_update_not_found(self):
rv = self.app.post('/campaigns/uuid_3429kjkj31/status?auth=somekey', data=test_data.data_campaigns_status_post_good)
assert rv.status_code == 404
def test_campaigns_list_not_authenticated(self):
rv = self.app.get('/campaigns?auth=some3424gegkey')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_post_not_authenticated(self):
rv = self.app.post('/campaigns?auth=somedskfjslf', data=test_data.data_campaigns_post_good)
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_post_no_data(self):
rv = self.app.post('/campaigns?auth=somekey', data="")
assert rv.status_code == 400
def test_campaigns_campaign_get_not_authenticated(self):
rv = self.app.get('/campaigns/uuid_1?auth=somedskfjslf')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_put_not_authenticated(self):
rv = self.app.put('/campaigns/uuid_1?auth=somedskfjslf', data=test_data.data_campaigns_put_good)
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_no_campaign_id(self):
rv = self.app.put('/campaigns?auth=somekey', data=test_data.data_campaigns_put_good)
assert rv.status_code == 405
def test_campaigns_campaign_put_no_data(self):
rv = self.app.put('/campaigns/uuid_1?auth=somekey', data="")
assert rv.status_code == 400
def test_campaigns_campaign_delete_not_authenticated(self):
rv = self.app.delete('/campaigns/uuid_1?auth=somedskfjslf')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_status_not_authenticated(self):
rv = self.app.get('/campaigns/uuid_1/status?auth=some3432423f22key')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_status_post_not_authenticated(self):
rv = self.app.post('/campaigns/uuid_1/status?auth=some3432423f22key', data=test_data.data_campaigns_status_post_good)
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_status_no_data(self):
rv = self.app.post('/campaigns/uuid_1/status?auth=somekey', data="")
assert rv.status_code == 400
def test_campaigns_campaign_status_no_campaign_id(self):
rv = self.app.post('/campaigns/status?auth=somekey', data=test_data.data_campaigns_status_post_good)
assert rv.status_code == 405 # method not allowed
def test_campaigns_campaign_put_bad(self):
rv = self.app.put('/campaigns/uuid_1?auth=somekey', data=test_data.data_campaigns_put_bad)
assert "Start datetime starts after end datetime" in rv.data
""" Codes Tests """
def test_campaigns_campaign_codes_list(self):
rv = self.app.get('/campaigns/uuid_3/codes?auth=thisandthat')
assert "QWZ-EMD-ABCDEF" in rv.data
def test_campaigns_campaign_codes_list_not_authenticated(self):
rv = self.app.post('/campaigns/uuid_1/codes?auth=some3432423f22key')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_codes_add_new(self):
rv = self.app.post('/campaigns/uuid_1/codes?auth=somekey', data=test_data.data_campaigns_codes_post_good)
assert rv.status_code == 201
assert "ABC-DEF-GIJKLM" in rv.data
def test_campaigns_campaign_codes_code_post_no_data(self):
rv = self.app.post('campaigns/uuid_1/codes?auth=somekey', data="")
assert rv.status_code == 400
def test_campaigns_campaign_codes_post_not_authenticated(self):
rv = self.app.post('/campaigns/uuid_1/codes?auth=some3432423f22key', data=test_data.data_campaigns_codes_post_good)
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_codes_code_found(self):
rv = self.app.get('/campaigns/uuid_1/codes/uuid_1?auth=somekey')
assert rv.status_code == 200
assert "ACT-EKL-ABCDEF" in rv.data
def test_campaigns_campaign_codes_code_found_not_authenticated(self):
rv = self.app.get('/campaigns/uuid_1/codes/uuid_1?auth=some3432423f22ke')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_codes_code_not_found(self):
rv = self.app.get('/campaigns/uuid_1/codes/uuid_34532errwr?auth=somekey')
assert rv.status_code == 404
def test_campaigns_campaign_codes_code_delete_found(self):
rv = self.app.delete('/campaigns/uuid_1/codes/uuid_1?auth=somekey')
assert rv.status_code == 204
rv = self.app.get('/campaigns/uuid_1/codes/uuid_1?auth=somekey')
assert rv.status_code == 404
def test_campaigns_campaign_codes_code_delete_not_authenticated(self):
rv = self.app.delete('/campaigns/uuid_1/codes/uuid_1?auth=some3432423f22key')
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_codes_code_delete_not_found(self):
rv = self.app.delete('/campaigns/uuid_1/codes/uuid_342dfs1?auth=somekey')
assert rv.status_code == 404
def test_campaigns_campaign_codes_code_put_found(self):
rv = self.app.put('/campaigns/uuid_1/codes/uuid_1?auth=somekey', data=test_data.data_campaigns_codes_put_good)
assert rv.status_code == 201
rv = self.app.get('/campaigns/uuid_1/codes/uuid_1?auth=somekey')
assert rv.status_code == 200
assert "redeemed" in rv.data
def test_campaigns_campaign_codes_code_put_not_authenticated(self):
rv = self.app.put('/campaigns/uuid_1/codes/uuid_1', data=test_data.data_campaigns_codes_put_good)
assert "Unauthorized" in rv.data
assert rv.status_code == 401
def test_campaigns_campaign_codes_code_put_not_found(self):
rv = self.app.put('/campaigns/uuid_1/codes/uuid_432dfs341?auth=somekey', data=test_data.data_campaigns_codes_put_good)
assert rv.status_code == 404
def test_campaigns_campaign_codes_put_no_data(self):
rv = self.app.put('/campaigns/uuid_1/codes/uuid_1?auth=somekey', data="")
assert rv.status_code == 400
def test_campaigns_campaign_codes_no_campaign_id(self):
rv = self.app.put('/campaigns/codes/uuid_1?auth=thisandthat', data=test_data.data_campaigns_codes_put_good)
assert rv.status_code == 404
""" Validation Tests """
def test_validate_success_percentage(self):
rv = self.app.post('/validate', data=test_data.data_validation_post_percentage_good)
assert rv.status_code == 201
assert "true" in rv.data
def test_validate_success_fixed(self):
rv = self.app.post('/validate', data=test_data.data_validation_post_fixed_good)
assert rv.status_code == 201
assert "true" in rv.data
def test_validate_fail(self):
rv = self.app.post('/validate', data=test_data.data_validation_post_bad)
assert rv.status_code == 400
assert "false" in rv.data
def test_validate_no_data(self):
rv = self.app.post('/validate', data="")
assert "Code None doesn't exist" in rv.data
assert rv.status_code == 404
""" Redeemed Tests """
def test_redeem_percentage_success(self):
rv = self.app.post('/redeem/uuid_3?auth=thisandthat', data=test_data.data_redeem_percentage_good)
assert rv.status_code == 201
assert "true" in rv.data
def test_redeem_percentage_success_admin_auth(self):
rv = self.app.post('/redeem/uuid_3?auth=somekey', data=test_data.data_redeem_percentage_good)
assert rv.status_code == 201
assert "true" in rv.data
def test_redeem_percentage_fail(self):
rv = self.app.post('/redeem/uuid_2?auth=thisandthat', data=test_data.data_redeem_percentage_bad)
assert rv.status_code == 400
assert "false" in rv.data
def test_redeem_fixed_success(self):
rv = self.app.post('/redeem/uuid_3?auth=thisandthat', data=test_data.data_redeem_fixed_good)
assert rv.status_code == 201
assert "true" in rv.data
def test_redeem_auth_fail(self):
rv = self.app.post('/redeem/uuid_1?auth=dskfsld9', data=test_data.data_redeem_percentage_good)
assert rv.status_code == 401
assert "Unauthorized" in rv.data
def test_redeem_no_data(self):
rv = self.app.post('/redeem/uuid_1?auth=somekey', data="")
assert "Code None doesn't exist" in rv.data
assert rv.status_code == 404
def test_redeem_no_campaign_id(self):
rv = self.app.post('/redeem?auth=somekey', data=test_data.data_redeem_percentage_good)
assert rv.status_code == 404
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.