repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
biocore/micronota
micronota/database/rfam.py
2
3670
# ---------------------------------------------------------------------------- # Copyright (c) 2016--, micronota development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from logging import getLogger from ..util import split, SplitterTail logger = getLogger(__name__) def filter_models(ifile, ofile, negate=False, models={('RF00001', '5S_rRNA'), ('RF00002', '5_8S_rRNA'), # Permuted mitochondrial genome encoded 5S rRNA ('RF02547', 'mtPerm_5S'), ('RF01118', 'PK-G12rRNA'), ('RF00177', 'SSU_rRNA_bacteria'), ('RF01959', 'SSU_rRNA_archaea'), ('RF01960', 'SSU_rRNA_eukarya'), ('RF02542', 'SSU_rRNA_microsporidia'), ('RF02540', 'LSU_rRNA_archaea'), ('RF02541', 'LSU_rRNA_bacteria'), ('RF02543', 'LSU_rRNA_eukarya'), # Trypanosomatid mitochondrial rRNA ('RF02545', 'SSU_trypano_mito'), ('RF02546', 'LSU_trypano_mito'), ('RF00005', 'tRNA'), ('RF01852', 'tRNA-Sec'), # Mitochondrion encoded tmRNA ('RF02544', 'mt_tmRNA'), # Alphaproteobacteria transfer messenger RNA ('RF01849', 'alpha_tmRNA'), # Betaproteobacteria transfer messenger RNA ('RF01850', 'beta_tmRNA'), # Cyanobacteria transfer messenger RNA ('RF01851', 'cyano_tmRNA')}): '''Filter away some cm models. Parameters ---------- ifile : file-like input file of rfam files ofile : file-like output file with some models filtered away negate : bool negate the filtering. Keep the specified instead of filtering away them. models : Iterable list of models to filter away. Default is a list of tRNA, tmRNA, and 5S/5.8S/16S/18S/23S/28S rRNA ''' splitter = SplitterTail(lambda s: s == '//\n') gen = split(splitter) j = 0 i = 0 for i, record in enumerate(gen(ifile), 1): name = record[1].split()[1] accn = record[2].split()[1] accn_name = (accn, name) discard = accn_name in models if negate is True: discard = not discard if discard: # logger.debug('Filter %s : %s' % accn_name) j += 1 continue else: for line in record: ofile.write(line) logger.debug('Processed %d and filtered %d cm and hmm models' % (i, j))
bsd-3-clause
justajeffy/arsenalsuite
python/blur/globals.py
10
1528
# # __PYDOC__ # # [TITLE] # TODO:Add Document Title # # [DESCRIPTION] # TODO: Add Document Description # # [CREATION INFO] # Author: Eric Hulser # Email: eric@blur.com # Company: Blur Studios # Date: 06/30/06 # # [HISTORY] # --1.0 - Created # # [DEPENDENCIES] # # __END__ # import ConfigParser, os.path, sys; configFile = 'c:/blur/config.ini' #------------------------------------------------------------------------------------------------------------- # INI FUNCTIONS #------------------------------------------------------------------------------------------------------------- def __getINISetting( inFileName, inSection, inKey = "" ): if ( os.path.isfile( inFileName ) ): tParser = ConfigParser.ConfigParser(); tParser.read( inFileName ); inSection = str( inSection ); inKey = str( inKey ); if ( tParser.has_section( inSection ) ): if ( inKey ): if ( tParser.has_option( inSection, inKey ) ): return ( tParser.get( inSection, inKey ) ); else: tItemList = tParser.items( inSection ); return [ tItem[0] for tItem in tItemList ]; return ""; if ( os.path.exists( configFile ) ): tEnvironment = __getINISetting( configFile, 'GLOBALS', 'environment' ) tStartupLib = os.path.normpath( __getINISetting( configFile, tEnvironment, 'startupPath' ) ); if ( not tStartupLib in sys.path ): sys.path.append( tStartupLib ); from blurGlobals import *; else: print ( "Blur Library Error: Could not find 'c:/blur/config.ini'" );
gpl-2.0
hyperledgerchina/fabric_zh_CN
bddtests/server_admin_pb2.py
48
7728
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: server_admin.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='server_admin.proto', package='protos', syntax='proto3', serialized_pb=_b('\n\x12server_admin.proto\x12\x06protos\x1a\x1bgoogle/protobuf/empty.proto\"\x9a\x01\n\x0cServerStatus\x12/\n\x06status\x18\x01 \x01(\x0e\x32\x1f.protos.ServerStatus.StatusCode\"Y\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07STARTED\x10\x01\x12\x0b\n\x07STOPPED\x10\x02\x12\n\n\x06PAUSED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\x32\xc1\x01\n\x05\x41\x64min\x12;\n\tGetStatus\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12=\n\x0bStartServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12<\n\nStopServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x62\x06proto3') , dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _SERVERSTATUS_STATUSCODE = _descriptor.EnumDescriptor( name='StatusCode', full_name='protos.ServerStatus.StatusCode', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNDEFINED', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='STARTED', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='STOPPED', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='PAUSED', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='ERROR', index=4, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( name='UNKNOWN', index=5, number=5, options=None, type=None), ], containing_type=None, options=None, serialized_start=125, serialized_end=214, ) _sym_db.RegisterEnumDescriptor(_SERVERSTATUS_STATUSCODE) _SERVERSTATUS = _descriptor.Descriptor( name='ServerStatus', full_name='protos.ServerStatus', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='status', full_name='protos.ServerStatus.status', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _SERVERSTATUS_STATUSCODE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=60, serialized_end=214, ) _SERVERSTATUS.fields_by_name['status'].enum_type = _SERVERSTATUS_STATUSCODE _SERVERSTATUS_STATUSCODE.containing_type = _SERVERSTATUS DESCRIPTOR.message_types_by_name['ServerStatus'] = _SERVERSTATUS ServerStatus = _reflection.GeneratedProtocolMessageType('ServerStatus', (_message.Message,), dict( DESCRIPTOR = _SERVERSTATUS, __module__ = 'server_admin_pb2' # @@protoc_insertion_point(class_scope:protos.ServerStatus) )) _sym_db.RegisterMessage(ServerStatus) import abc import six from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities class BetaAdminServicer(object): """Interface exported by the server. """ def GetStatus(self, request, context): """Return the serve status. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def StartServer(self, request, context): context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def StopServer(self, request, context): context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) class BetaAdminStub(object): """Interface exported by the server. """ def GetStatus(self, request, timeout): """Return the serve status. """ raise NotImplementedError() GetStatus.future = None def StartServer(self, request, timeout): raise NotImplementedError() StartServer.future = None def StopServer(self, request, timeout): raise NotImplementedError() StopServer.future = None def beta_create_Admin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): import google.protobuf.empty_pb2 import server_admin_pb2 import google.protobuf.empty_pb2 import server_admin_pb2 import google.protobuf.empty_pb2 import server_admin_pb2 request_deserializers = { ('protos.Admin', 'GetStatus'): google.protobuf.empty_pb2.Empty.FromString, ('protos.Admin', 'StartServer'): google.protobuf.empty_pb2.Empty.FromString, ('protos.Admin', 'StopServer'): google.protobuf.empty_pb2.Empty.FromString, } response_serializers = { ('protos.Admin', 'GetStatus'): server_admin_pb2.ServerStatus.SerializeToString, ('protos.Admin', 'StartServer'): server_admin_pb2.ServerStatus.SerializeToString, ('protos.Admin', 'StopServer'): server_admin_pb2.ServerStatus.SerializeToString, } method_implementations = { ('protos.Admin', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus), ('protos.Admin', 'StartServer'): face_utilities.unary_unary_inline(servicer.StartServer), ('protos.Admin', 'StopServer'): face_utilities.unary_unary_inline(servicer.StopServer), } server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) def beta_create_Admin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): import google.protobuf.empty_pb2 import server_admin_pb2 import google.protobuf.empty_pb2 import server_admin_pb2 import google.protobuf.empty_pb2 import server_admin_pb2 request_serializers = { ('protos.Admin', 'GetStatus'): google.protobuf.empty_pb2.Empty.SerializeToString, ('protos.Admin', 'StartServer'): google.protobuf.empty_pb2.Empty.SerializeToString, ('protos.Admin', 'StopServer'): google.protobuf.empty_pb2.Empty.SerializeToString, } response_deserializers = { ('protos.Admin', 'GetStatus'): server_admin_pb2.ServerStatus.FromString, ('protos.Admin', 'StartServer'): server_admin_pb2.ServerStatus.FromString, ('protos.Admin', 'StopServer'): server_admin_pb2.ServerStatus.FromString, } cardinalities = { 'GetStatus': cardinality.Cardinality.UNARY_UNARY, 'StartServer': cardinality.Cardinality.UNARY_UNARY, 'StopServer': cardinality.Cardinality.UNARY_UNARY, } stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) return beta_implementations.dynamic_stub(channel, 'protos.Admin', cardinalities, options=stub_options) # @@protoc_insertion_point(module_scope)
apache-2.0
mvaled/OpenUpgrade
addons/l10n_th/__openerp__.py
260
1453
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Thailand - Accounting', 'version': '1.0', 'category': 'Localization/Account Charts', 'description': """ Chart of Accounts for Thailand. =============================== Thai accounting chart and localization. """, 'author': 'Almacom', 'website': 'http://almacom.co.th/', 'depends': ['account_chart'], 'data': [ 'account_data.xml' ], 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
lento/cortex
test/IECoreGL/CoordinateSystemTest.py
7
4103
########################################################################## # # Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import unittest import os.path import shutil import IECore import IECoreGL IECoreGL.init( False ) class CoordinateSystemTest( unittest.TestCase ) : __outputFileName = os.path.dirname( __file__ ) + "/output/testCoordinateSystem.tif" def testNoVisualisation( self ) : r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "immediate" ) ) r.camera( "main", { "projection" : IECore.StringData( "orthographic" ), "resolution" : IECore.V2iData( IECore.V2i( 256 ) ), "clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ), "screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) } ) r.display( self.__outputFileName, "tif", "rgba", {} ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) r.coordinateSystem( "myCoordSys" ) i = IECore.Reader.create( self.__outputFileName ).read() a = i["A"].data for i in range( a.size() ) : self.assertEqual( a[i], 0 ) def testVisualisation( self ) : r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "immediate" ) ) r.camera( "main", { "projection" : IECore.StringData( "orthographic" ), "resolution" : IECore.V2iData( IECore.V2i( 256 ) ), "clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ), "screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) } ) r.display( self.__outputFileName, "tif", "rgba", {} ) r.setOption( "gl:drawCoordinateSystems", IECore.BoolData( True ) ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) r.coordinateSystem( "myCoordSys" ) i = IECore.Reader.create( self.__outputFileName ).read() a = i["A"].data self.failUnless( a[127*256 + 127] > 0 ) self.failUnless( a[127*256 + 255] > 0 ) self.failUnless( a[127] > 0 ) self.assertEqual( a[200*127 + 127], 0 ) self.assertEqual( a[120*127 + 127], 0 ) def setUp( self ) : if not os.path.isdir( "test/IECoreGL/output" ) : os.makedirs( "test/IECoreGL/output" ) def tearDown( self ) : if os.path.isdir( "test/IECoreGL/output" ) : shutil.rmtree( "test/IECoreGL/output" ) if __name__ == "__main__": unittest.main()
bsd-3-clause
sznekol/django-cms
cms/south_migrations/0052_auto__add_placeholderreference__add_staticplaceholder__add_field_page_.py
63
18423
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'StaticPlaceholder' db.create_table(u'cms_staticplaceholder', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)), ('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, blank=True)), ('draft', self.gf('django.db.models.fields.related.ForeignKey')(related_name='static_draft', null=True, to=orm['cms.Placeholder'])), ('public', self.gf('django.db.models.fields.related.ForeignKey')(related_name='static_public', null=True, to=orm['cms.Placeholder'])), ('dirty', self.gf('django.db.models.fields.BooleanField')(default=False)), ('creation_method', self.gf('django.db.models.fields.CharField')(default='code', max_length=20, blank=True)), )) db.send_create_signal('cms', ['StaticPlaceholder']) def backwards(self, orm): # Deleting model 'StaticPlaceholder' db.delete_table(u'cms_staticplaceholder') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.globalpagepermission': { 'Meta': {'object_name': 'GlobalPagePermission'}, 'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cms.page': { 'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'}, 'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}), 'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}), 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}), 'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}), 'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}), 'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}), 'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}), 'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}), 'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.pagemoderatorstate': { 'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'}, 'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}) }, 'cms.pagepermission': { 'Meta': {'object_name': 'PagePermission'}, 'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cms.pageuser': { 'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}), u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}) }, 'cms.pageusergroup': { 'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}), u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}) }, 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, 'cms.placeholderreference': { 'Meta': {'object_name': 'PlaceholderReference', 'db_table': "u'cmsplugin_placeholderreference'", '_ormbases': ['cms.CMSPlugin']}, u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}) }, 'cms.staticplaceholder': { 'Meta': {'object_name': 'StaticPlaceholder'}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}), 'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}) }, 'cms.title': { 'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'}, 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}), 'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'cms.usersettings': { 'Meta': {'object_name': 'UserSettings'}, 'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['cms']
bsd-3-clause
gvanhorn38/inception
inputs/stat_tfrecords.py
1
6169
""" These utility functions are meant for computing basic statistics in a set of tfrecord files. They can be used to sanity check the training and testing files. """ import argparse import tensorflow as tf def class_stats(tfrecords): """ Sum the number of images and compute the number of images available for each class. """ filename_queue = tf.train.string_input_producer( tfrecords, num_epochs=1 ) # Construct a Reader to read examples from the .tfrecords file reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features = { 'image/class/label' : tf.FixedLenFeature([], tf.int64) } ) label = tf.cast(features['image/class/label'], tf.int32) image_count = 0 class_image_count = {} coord = tf.train.Coordinator() with tf.Session() as sess: tf.initialize_all_variables().run() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: while not coord.should_stop(): outputs = sess.run([label]) class_label = outputs[0] if class_label not in class_image_count: class_image_count[class_label] = 0 class_image_count[class_label] += 1 image_count += 1 except tf.errors.OutOfRangeError as e: pass # Basic info print "Found %d images" % (image_count,) print "Found %d classes" % (len(class_image_count),) class_labels = class_image_count.keys() class_labels.sort() # Print out the per class image counts print "Class Index | Image Count" for class_label in class_labels: print "{0:11d} | {1:6d} ".format(class_label, class_image_count[class_label]) # Can we detect if there any missing classes? max_class_index = max(class_labels) # We expect class id for each value in the range [0, max_class_id] # So lets see if we are missing any of these values missing_values = list(set(range(max_class_index+1)).difference(class_labels)) if len(missing_values) > 0: print "WARNING: expected %d classes but only found %d classes." % (max_class_index, len(class_labels)) missing_values.sort() for index in missing_values: print "Missing class %d" % (index,) def bbox_stats(tfrecords): """ Check that each example has valid bounding box data. """ filename_queue = tf.train.string_input_producer( tfrecords, num_epochs=1 ) # Construct a Reader to read examples from the .tfrecords file reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) # Parse an Example to access the Features features = tf.parse_single_example( serialized_example, features = { 'image/class/label' : tf.FixedLenFeature([], tf.int64), 'image/id' : tf.FixedLenFeature([], tf.string), 'image/object/bbox/xmin' : tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin' : tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax' : tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax' : tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/label' : tf.VarLenFeature(dtype=tf.int64), 'image/object/id' : tf.VarLenFeature(dtype=tf.string), } ) #label = tf.cast(features['image/class/label'], tf.int32) image_id = features['image/id'] xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) label = tf.expand_dims(tf.cast(features['image/object/bbox/label'].values, tf.float32), 0) annot_id = tf.expand_dims(features['image/object/id'].values, 0) image_count = 0 image_missing_bbox_count = 0 flat_bbox_count = 0 coord = tf.train.Coordinator() with tf.Session() as sess: tf.initialize_all_variables().run() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: while not coord.should_stop(): #outputs = sess.run([xmin, xmax, ymin, ymax, label]) outputs = sess.run([xmin, xmax, ymin, ymax, label, image_id, annot_id]) #outputs = sess.run([label]) image_xmin = outputs[0] image_xmax = outputs[1] image_ymin = outputs[2] image_ymax = outputs[3] image_label = outputs[4] id = outputs[5] annotation_id = outputs[6] image_count += 1 if(len(image_xmin) == 0): image_missing_bbox_count += 1 print "MISSING bbox: image_id %s, box_id %s, Class %s, xmin %s, xmax %s, ymin %s, ymax %s" % (id, annotation_id, image_label, image_xmin, image_xmax, image_ymin, image_ymax) elif image_xmin[0] >= image_xmax[0] or image_ymin[0] >= image_ymax[0]: flat_bbox_count += 1 print "bbox ERROR: image_id %s, box_id %s, Class %s, xmin %s, xmax %s, ymin %s, ymax %s" % (id, annotation_id, image_label, image_xmin, image_xmax, image_ymin, image_ymax) if image_count % 1000 == 0: print "Processed %d images" % image_count except tf.errors.OutOfRangeError as e: pass # Basic info print "Found %d images" % (image_count,) print "Found %d missing bboxes" % (image_missing_bbox_count,) print "Found %d 2d bboxes" % (flat_bbox_count,) def parse_args(): parser = argparse.ArgumentParser(description='Basic statistics on tfrecord files') parser.add_argument('--stat', dest='stat_type', choices=['class_stats', 'bbox_stats'], required=True) parser.add_argument('--tfrecords', dest='tfrecords', help='paths to tfrecords files', type=str, nargs='+', required=True) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() if args.stat_type == 'class_stats': class_stats(args.tfrecords) elif args.stat_type == 'bbox_stats': bbox_stats(args.tfrecords)
mit
pedro2d10/SickRage-FR
lib/hachoir_parser/image/xcf.py
95
10353
""" Gimp image parser (XCF file, ".xcf" extension). You can find informations about XCF file in Gimp source code. URL to read CVS online: http://cvs.gnome.org/viewcvs/gimp/app/xcf/ \--> files xcf-read.c and xcf-load.c Author: Victor Stinner """ from hachoir_parser import Parser from hachoir_core.field import (StaticFieldSet, FieldSet, ParserError, UInt8, UInt32, Enum, Float32, String, PascalString32, RawBytes) from hachoir_parser.image.common import RGBA from hachoir_core.endian import NETWORK_ENDIAN class XcfCompression(FieldSet): static_size = 8 COMPRESSION_NAME = { 0: u"None", 1: u"RLE", 2: u"Zlib", 3: u"Fractal" } def createFields(self): yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME) class XcfResolution(StaticFieldSet): format = ( (Float32, "xres", "X resolution in DPI"), (Float32, "yres", "Y resolution in DPI") ) class XcfTattoo(StaticFieldSet): format = ((UInt32, "tattoo", "Tattoo"),) class LayerOffsets(StaticFieldSet): format = ( (UInt32, "ofst_x", "Offset X"), (UInt32, "ofst_y", "Offset Y") ) class LayerMode(FieldSet): static_size = 32 MODE_NAME = { 0: u"Normal", 1: u"Dissolve", 2: u"Behind", 3: u"Multiply", 4: u"Screen", 5: u"Overlay", 6: u"Difference", 7: u"Addition", 8: u"Subtract", 9: u"Darken only", 10: u"Lighten only", 11: u"Hue", 12: u"Saturation", 13: u"Color", 14: u"Value", 15: u"Divide", 16: u"Dodge", 17: u"Burn", 18: u"Hard light", 19: u"Soft light", 20: u"Grain extract", 21: u"Grain merge", 22: u"Color erase" } def createFields(self): yield Enum(UInt32(self, "mode", "Layer mode"), self.MODE_NAME) class GimpBoolean(UInt32): def __init__(self, parent, name): UInt32.__init__(self, parent, name) def createValue(self): return 1 == UInt32.createValue(self) class XcfUnit(StaticFieldSet): format = ((UInt32, "unit", "Unit"),) class XcfParasiteEntry(FieldSet): def createFields(self): yield PascalString32(self, "name", "Name", strip="\0", charset="UTF-8") yield UInt32(self, "flags", "Flags") yield PascalString32(self, "data", "Data", strip=" \0", charset="UTF-8") class XcfLevel(FieldSet): def createFields(self): yield UInt32(self, "width", "Width in pixel") yield UInt32(self, "height", "Height in pixel") yield UInt32(self, "offset", "Offset") offset = self["offset"].value if offset == 0: return data_offsets = [] while (self.absolute_address + self.current_size)/8 < offset: chunk = UInt32(self, "data_offset[]", "Data offset") yield chunk if chunk.value == 0: break data_offsets.append(chunk) if (self.absolute_address + self.current_size)/8 != offset: raise ParserError("Problem with level offset.") previous = offset for chunk in data_offsets: data_offset = chunk.value size = data_offset - previous yield RawBytes(self, "data[]", size, "Data content of %s" % chunk.name) previous = data_offset class XcfHierarchy(FieldSet): def createFields(self): yield UInt32(self, "width", "Width") yield UInt32(self, "height", "Height") yield UInt32(self, "bpp", "Bits/pixel") offsets = [] while True: chunk = UInt32(self, "offset[]", "Level offset") yield chunk if chunk.value == 0: break offsets.append(chunk.value) for offset in offsets: padding = self.seekByte(offset, relative=False) if padding is not None: yield padding yield XcfLevel(self, "level[]", "Level") # yield XcfChannel(self, "channel[]", "Channel")) class XcfChannel(FieldSet): def createFields(self): yield UInt32(self, "width", "Channel width") yield UInt32(self, "height", "Channel height") yield PascalString32(self, "name", "Channel name", strip="\0", charset="UTF-8") for field in readProperties(self): yield field yield UInt32(self, "hierarchy_ofs", "Hierarchy offset") yield XcfHierarchy(self, "hierarchy", "Hierarchy") def createDescription(self): return 'Channel "%s"' % self["name"].value class XcfLayer(FieldSet): def createFields(self): yield UInt32(self, "width", "Layer width in pixels") yield UInt32(self, "height", "Layer height in pixels") yield Enum(UInt32(self, "type", "Layer type"), XcfFile.IMAGE_TYPE_NAME) yield PascalString32(self, "name", "Layer name", strip="\0", charset="UTF-8") for prop in readProperties(self): yield prop # -- # TODO: Hack for Gimp 1.2 files # -- yield UInt32(self, "hierarchy_ofs", "Hierarchy offset") yield UInt32(self, "mask_ofs", "Layer mask offset") padding = self.seekByte(self["hierarchy_ofs"].value, relative=False) if padding is not None: yield padding yield XcfHierarchy(self, "hierarchy", "Hierarchy") # TODO: Read layer mask if needed: self["mask_ofs"].value != 0 def createDescription(self): return 'Layer "%s"' % self["name"].value class XcfParasites(FieldSet): def createFields(self): size = self["../size"].value * 8 while self.current_size < size: yield XcfParasiteEntry(self, "parasite[]", "Parasite") class XcfProperty(FieldSet): PROP_COMPRESSION = 17 PROP_RESOLUTION = 19 PROP_PARASITES = 21 TYPE_NAME = { 0: u"End", 1: u"Colormap", 2: u"Active layer", 3: u"Active channel", 4: u"Selection", 5: u"Floating selection", 6: u"Opacity", 7: u"Mode", 8: u"Visible", 9: u"Linked", 10: u"Lock alpha", 11: u"Apply mask", 12: u"Edit mask", 13: u"Show mask", 14: u"Show masked", 15: u"Offsets", 16: u"Color", 17: u"Compression", 18: u"Guides", 19: u"Resolution", 20: u"Tattoo", 21: u"Parasites", 22: u"Unit", 23: u"Paths", 24: u"User unit", 25: u"Vectors", 26: u"Text layer flags", } handler = { 6: RGBA, 7: LayerMode, 8: GimpBoolean, 9: GimpBoolean, 10: GimpBoolean, 11: GimpBoolean, 12: GimpBoolean, 13: GimpBoolean, 15: LayerOffsets, 17: XcfCompression, 19: XcfResolution, 20: XcfTattoo, 21: XcfParasites, 22: XcfUnit } def __init__(self, *args, **kw): FieldSet.__init__(self, *args, **kw) self._size = (8 + self["size"].value) * 8 def createFields(self): yield Enum(UInt32(self, "type", "Property type"), self.TYPE_NAME) yield UInt32(self, "size", "Property size") size = self["size"].value if 0 < size: cls = self.handler.get(self["type"].value, None) if cls: yield cls(self, "data", size=size*8) else: yield RawBytes(self, "data", size, "Data") def createDescription(self): return "Property: %s" % self["type"].display def readProperties(parser): while True: prop = XcfProperty(parser, "property[]") yield prop if prop["type"].value == 0: return class XcfFile(Parser): PARSER_TAGS = { "id": "xcf", "category": "image", "file_ext": ("xcf",), "mime": (u"image/x-xcf", u"application/x-gimp-image"), "min_size": (26 + 8 + 4 + 4)*8, # header+empty property+layer offset+channel offset "magic": ( ('gimp xcf file\0', 0), ('gimp xcf v002\0', 0), ), "description": "Gimp (XCF) picture" } endian = NETWORK_ENDIAN IMAGE_TYPE_NAME = { 0: u"RGB", 1: u"Gray", 2: u"Indexed" } def validate(self): if self.stream.readBytes(0, 14) not in ('gimp xcf file\0', 'gimp xcf v002\0'): return "Wrong signature" return True def createFields(self): # Read signature yield String(self, "signature", 14, "Gimp picture signature (ends with nul byte)", charset="ASCII") # Read image general informations (width, height, type) yield UInt32(self, "width", "Image width") yield UInt32(self, "height", "Image height") yield Enum(UInt32(self, "type", "Image type"), self.IMAGE_TYPE_NAME) for prop in readProperties(self): yield prop # Read layer offsets layer_offsets = [] while True: chunk = UInt32(self, "layer_offset[]", "Layer offset") yield chunk if chunk.value == 0: break layer_offsets.append(chunk.value) # Read channel offsets channel_offsets = [] while True: chunk = UInt32(self, "channel_offset[]", "Channel offset") yield chunk if chunk.value == 0: break channel_offsets.append(chunk.value) # Read layers for index, offset in enumerate(layer_offsets): if index+1 < len(layer_offsets): size = (layer_offsets[index+1] - offset) * 8 else: size = None padding = self.seekByte(offset, relative=False) if padding: yield padding yield XcfLayer(self, "layer[]", size=size) # Read channels for index, offset in enumerate(channel_offsets): if index+1 < len(channel_offsets): size = (channel_offsets[index+1] - offset) * 8 else: size = None padding = self.seekByte(offset, relative=False) if padding is not None: yield padding yield XcfChannel(self, "channel[]", "Channel", size=size)
gpl-3.0
yongshengwang/hue
build/env/lib/python2.7/site-packages/boto-2.38.0-py2.7.egg/boto/ses/__init__.py
131
2013
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2011 Harry Marr http://hmarr.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.ses.connection import SESConnection from boto.regioninfo import RegionInfo, get_regions def regions(): """ Get all available regions for the SES service. :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ return get_regions('ses', connection_cls=SESConnection) def connect_to_region(region_name, **kw_params): """ Given a valid region name, return a :class:`boto.ses.connection.SESConnection`. :type: str :param region_name: The name of the region to connect to. :rtype: :class:`boto.ses.connection.SESConnection` or ``None`` :return: A connection to the given region, or None if an invalid region name is given """ for region in regions(): if region.name == region_name: return region.connect(**kw_params) return None
apache-2.0
firebase/grpc
src/python/grpcio/support.py
10
4388
# Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import os.path import shutil import sys import tempfile from distutils import errors import commands C_PYTHON_DEV = """ #include <Python.h> int main(int argc, char **argv) { return 0; } """ C_PYTHON_DEV_ERROR_MESSAGE = """ Could not find <Python.h>. This could mean the following: * You're on Ubuntu and haven't run `apt-get install <PY_REPR>-dev`. * You're on RHEL/Fedora and haven't run `yum install <PY_REPR>-devel` or `dnf install <PY_REPR>-devel` (make sure you also have redhat-rpm-config installed) * You're on Mac OS X and the usual Python framework was somehow corrupted (check your environment variables or try re-installing?) * You're on Windows and your Python installation was somehow corrupted (check your environment variables or try re-installing?) """ if sys.version_info[0] == 2: PYTHON_REPRESENTATION = 'python' elif sys.version_info[0] == 3: PYTHON_REPRESENTATION = 'python3' else: raise NotImplementedError('Unsupported Python version: %s' % sys.version) C_CHECKS = { C_PYTHON_DEV: C_PYTHON_DEV_ERROR_MESSAGE.replace('<PY_REPR>', PYTHON_REPRESENTATION), } def _compile(compiler, source_string): tempdir = tempfile.mkdtemp() cpath = os.path.join(tempdir, 'a.c') with open(cpath, 'w') as cfile: cfile.write(source_string) try: compiler.compile([cpath]) except errors.CompileError as error: return error finally: shutil.rmtree(tempdir) def _expect_compile(compiler, source_string, error_message): if _compile(compiler, source_string) is not None: sys.stderr.write(error_message) raise commands.CommandError( "Diagnostics found a compilation environment issue:\n{}".format( error_message)) def diagnose_compile_error(build_ext, error): """Attempt to diagnose an error during compilation.""" for c_check, message in C_CHECKS.items(): _expect_compile(build_ext.compiler, c_check, message) python_sources = [ source for source in build_ext.get_source_files() if source.startswith('./src/python') and source.endswith('c') ] for source in python_sources: if not os.path.isfile(source): raise commands.CommandError(( "Diagnostics found a missing Python extension source file:\n{}\n\n" "This is usually because the Cython sources haven't been transpiled " "into C yet and you're building from source.\n" "Try setting the environment variable " "`GRPC_PYTHON_BUILD_WITH_CYTHON=1` when invoking `setup.py` or " "when using `pip`, e.g.:\n\n" "pip install -rrequirements.txt\n" "GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install .").format(source)) def diagnose_attribute_error(build_ext, error): if any('_needs_stub' in arg for arg in error.args): raise commands.CommandError( "We expect a missing `_needs_stub` attribute from older versions of " "setuptools. Consider upgrading setuptools.") _ERROR_DIAGNOSES = { errors.CompileError: diagnose_compile_error, AttributeError: diagnose_attribute_error, } def diagnose_build_ext_error(build_ext, error, formatted): diagnostic = _ERROR_DIAGNOSES.get(type(error)) if diagnostic is None: raise commands.CommandError( "\n\nWe could not diagnose your build failure. If you are unable to " "proceed, please file an issue at http://www.github.com/grpc/grpc " "with `[Python install]` in the title; please attach the whole log " "(including everything that may have appeared above the Python " "backtrace).\n\n{}".format(formatted)) else: diagnostic(build_ext, error)
apache-2.0
realsaiko/odoo
addons/l10n_hu/__openerp__.py
320
1815
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 InnOpen Group Kft (<http://www.innopen.eu>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Hungarian - Accounting', 'version': '1.0', 'category': 'Localization/Account Charts', 'description': """ Base module for Hungarian localization ========================================== This module consists : - Generic Hungarian chart of accounts - Hungarian taxes - Hungarian Bank information """, 'author': 'InnOpen Group Kft', 'website': 'http://www.innopen.eu', 'license': 'AGPL-3', 'depends': ['account','account_chart'], 'data': [ 'data/account.account.template.csv', 'data/account.tax.code.template.csv', 'data/account.chart.template.csv', 'data/account.tax.template.csv', 'data/account.fiscal.position.template.csv', 'data/account.fiscal.position.tax.template.csv', 'data/res.bank.csv', ], 'installable': True, 'auto_install': False, }
agpl-3.0
zycdragonball/tensorflow
tensorflow/user_ops/duplicate_op_test.py
146
1303
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for custom user ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import tensorflow as tf class DuplicateOpTest(tf.test.TestCase): def testBasic(self): library_filename = os.path.join(tf.resource_loader.get_data_files_path(), 'duplicate_op.so') duplicate = tf.load_op_library(library_filename) self.assertEqual(len(duplicate.OP_LIST.op), 0) with self.test_session(): self.assertEqual(tf.add(1, 41).eval(), 42) if __name__ == '__main__': tf.test.main()
apache-2.0
Redjumpman/Jumper-Cogs
casino/data.py
1
10487
from copy import deepcopy from redbot.core import Config from collections import namedtuple user_defaults = { "Pending_Credits": 0, "Membership": { "Name": "Basic", "Assigned": False }, "Played": { "Allin": 0, "Blackjack": 0, "Coin": 0, "Craps": 0, "Cups": 0, "Dice": 0, "Hilo": 0, "War": 0, "Double": 0 }, "Won": { "Allin": 0, "Blackjack": 0, "Coin": 0, "Craps": 0, "Cups": 0, "Dice": 0, "Hilo": 0, "War": 0, "Double": 0 }, "Cooldowns": { "Allin": 0, "Blackjack": 0, "Coin": 0, "Craps": 0, "Cups": 0, "Dice": 0, "Hilo": 0, "War": 0, "Double": 0 } } guild_defaults = { "Memberships": {}, "Settings": { "Global": False, "Casino_Name": "Redjumpman's", "Casino_Open": True, "Payout_Switch": False, "Payout_Limit": 10000 }, "Games": { "Allin": { "Access": 0, "Cooldown": 43200, "Min": None, "Max": None, "Multiplier": None, "Open": True }, "Blackjack": { "Access": 0, "Cooldown": 5, "Min": 50, "Max": 500, "Multiplier": 2.0, "Open": True }, "Coin": { "Access": 0, "Cooldown": 5, "Max": 10, "Min": 10, "Multiplier": 1.5, "Open": True }, "Craps": { "Access": 0, "Cooldown": 5, "Max": 500, "Min": 50, "Multiplier": 2.0, "Open": True }, "Cups": { "Access": 0, "Cooldown": 5, "Max": 100, "Min": 25, "Multiplier": 1.8, "Open": True }, "Dice": { "Access": 0, "Cooldown": 5, "Max": 100, "Min": 25, "Multiplier": 1.8, "Open": True }, "Hilo": { "Access": 0, "Cooldown": 5, "Min": 25, "Max": 75, "Multiplier": 1.7, "Open": True }, "Double": { "Access": 0, "Cooldown": 5, "Min": 10, "Max": 250, "Multiplier": None, "Open": True }, "War": { "Access": 0, "Cooldown": 5, "Min": 25, "Max": 75, "Multiplier": 1.5, "Open": True } } } member_defaults = deepcopy(user_defaults) global_defaults = deepcopy(guild_defaults) global_defaults["Settings"]["Global"] = True _DataNamedTuple = namedtuple("Casino", "foo") _DataObj = _DataNamedTuple(foo=None) class Database: db = Config.get_conf(_DataObj, 5074395001, force_registration=True) def __init__(self): self.db.register_guild(**guild_defaults) self.db.register_global(**global_defaults) self.db.register_member(**member_defaults) self.db.register_user(**user_defaults) async def casino_is_global(self): """Checks to see if the casino is storing data on a per server basis or globally.""" return await self.db.Settings.Global() async def get_data(self, ctx, player=None): """ :param ctx: Context object :param player: Member or user object :return: Database that corresponds to the given data. Returns the appropriate config category based on the given data, and wheater or not the casino is global. """ if await self.casino_is_global(): if player is None: return self.db else: return self.db.user(player) else: if player is None: return self.db.guild(ctx.guild) else: return self.db.member(player) async def get_all(self, ctx, player): """ :param ctx: Context Object :param player: Member or user object :return: Tuple with two dictionaries Returns a dictionary representation of casino's settings data and the player data. """ settings = await self.get_data(ctx) player_data = await self.get_data(ctx, player=player) return await settings.all(), await player_data.all() async def _wipe_casino(self, ctx): """ Wipes all the casino data available :param ctx: context object :return: None This wipes everything, including member/user data. """ await self.db.clear_all() msg = "{0.name} ({0.id}) wiped all casino data.".format(ctx.author) await ctx.send(msg) async def _reset_settings(self, ctx): """ Resets only the settings data. """ data = await self.get_data(ctx) await data.Settings.clear() msg = ("{0.name} ({0.id}) reset all " "casino settings.").format(ctx.author) await ctx.send(msg) async def _reset_memberships(self, ctx): """ Resets all the information pertaining to memberships """ data = await self.get_data(ctx) await data.Memberships.clear() msg = ("{0.name} ({0.id}) cleared " "all casino memberships.").format(ctx.author) await ctx.send(msg) async def _reset_games(self, ctx): """ Resets all game settings, such as multipliers and bets. """ data = await self.get_data(ctx) await data.Games.clear() msg = ("{0.name} ({0.id}) restored casino games to " "default settings.").format(ctx.author) await ctx.send(msg) async def _reset_all_settings(self, ctx): """ Resets all settings, but retains all player data. """ await self._reset_settings(ctx) await self._reset_memberships(ctx) await self._reset_games(ctx) await self._reset_cooldowns(ctx) async def _reset_player_stats(self, ctx, player): """ :param ctx: Context object :param player: user or member object :return: None Resets a player's win / played stats. """ data = await self.get_data(ctx, player=player) await data.Played.clear() await data.Won.clear() msg = ("{0.name} ({0.id}) reset all stats for " "{1.name} ({1.id}).").format(ctx.author, player) await ctx.send(msg) async def _reset_player_all(self, ctx, player): """ :param ctx: context object :param player: user or member object :return: None Resets all data belonging to the user, including stats and memberships. """ data = await self.get_data(ctx, player=player) await data.clear() msg = ("{0.name} ({0.id}) reset all data " "for {1.name} ({1.id}).").format(ctx.author, player) await ctx.send(msg) async def _reset_player_cooldowns(self, ctx, player): """ :param ctx: context object :param player: user or member object :return: None Resets all game cooldowns for a player. """ data = await self.get_data(ctx, player=player) await data.Cooldowns.clear() msg = ("{0.name} ({0.id}) reset all cooldowns " "for {1.name} ({1.id}).").format(ctx.author, player) await ctx.send(msg) async def _reset_cooldowns(self, ctx): """ Resets all game cooldowns for every player in the database. """ if await self.casino_is_global(): for player in await self.db.all_users(): try: user = await ctx.bot.fetch_user(player) except AttributeError: user = await ctx.bot.get_user_info(player) await self.db.user(user).Cooldowns.clear() msg = ("{0.name} ({0.id}) reset all " "global cooldowns.").format(ctx.author) else: for player in await self.db.all_members(ctx.guild): user = ctx.guild.get_member(player) await self.db.member(user).Cooldowns.clear() msg = ("{0.name} ({0.id}) reset all " "cooldowns on {1.name}.").format(ctx.author, ctx.guild) await ctx.send(msg) async def change_mode(self, mode): """ :param mode: String, must be local or global. :return: None Toggles how data is stored for casino between local and global. When switching modes, all perviously stored data will be deleted. """ if mode == 'global': await self.db.clear_all_members() await self.db.clear_all_guilds() await self.db.Settings.Global.set(True) else: await self.db.clear_all_users() await self.db.clear_all_globals() await self.db.Settings.Global.set(False) async def _update_cooldown(self, ctx, game, time): player_data = await self.get_data(ctx, player=ctx.author) await player_data.set_raw("Cooldowns", game, value=time) async def _get_player_membership(self, ctx, player): """ :param ctx: context object :param player: user or member object :return: Membership name and a dictionary with the perks Performs a lookup on the user and the created memberhips for casino. If the user has a memberhip that was deleted, it will return the default basic membership. It will also set their new membership to the default. """ basic = {"Reduction": 0, "Access": 0, "Color": "grey", "Bonus": 1} player_data = await self.get_data(ctx, player=player) name = await player_data.Membership.Name() if name == "Basic": return name, basic data = await self.get_data(ctx) memberships = await data.Memberships.all() try: return name, memberships[name] except KeyError: await player_data.Membership.set({"Name": "Basic", "Assigned": False}) return "Basic", basic
gpl-3.0
kaedroho/wagtail
wagtail/images/fields.py
7
4310
import os import willow from django.conf import settings from django.core.exceptions import ValidationError from django.forms.fields import ImageField from django.template.defaultfilters import filesizeformat from django.utils.translation import gettext_lazy as _ ALLOWED_EXTENSIONS = ['gif', 'jpg', 'jpeg', 'png', 'webp'] SUPPORTED_FORMATS_TEXT = _("GIF, JPEG, PNG, WEBP") class WagtailImageField(ImageField): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Get max upload size from settings self.max_upload_size = getattr(settings, 'WAGTAILIMAGES_MAX_UPLOAD_SIZE', 10 * 1024 * 1024) self.max_image_pixels = getattr(settings, 'WAGTAILIMAGES_MAX_IMAGE_PIXELS', 128 * 1000000) max_upload_size_text = filesizeformat(self.max_upload_size) # Help text if self.max_upload_size is not None: self.help_text = _( "Supported formats: %(supported_formats)s. Maximum filesize: %(max_upload_size)s." ) % { 'supported_formats': SUPPORTED_FORMATS_TEXT, 'max_upload_size': max_upload_size_text, } else: self.help_text = _( "Supported formats: %(supported_formats)s." ) % { 'supported_formats': SUPPORTED_FORMATS_TEXT, } # Error messages self.error_messages['invalid_image_extension'] = _( "Not a supported image format. Supported formats: %s." ) % SUPPORTED_FORMATS_TEXT self.error_messages['invalid_image_known_format'] = _( "Not a valid %s image." ) self.error_messages['file_too_large'] = _( "This file is too big (%%s). Maximum filesize %s." ) % max_upload_size_text self.error_messages['file_too_many_pixels'] = _( "This file has too many pixels (%%s). Maximum pixels %s." ) % self.max_image_pixels self.error_messages['file_too_large_unknown_size'] = _( "This file is too big. Maximum filesize %s." ) % max_upload_size_text def check_image_file_format(self, f): # Check file extension extension = os.path.splitext(f.name)[1].lower()[1:] if extension not in ALLOWED_EXTENSIONS: raise ValidationError(self.error_messages['invalid_image_extension'], code='invalid_image_extension') image_format = extension.upper() if image_format == 'JPG': image_format = 'JPEG' internal_image_format = f.image.format.upper() if internal_image_format == 'MPO': internal_image_format = 'JPEG' # Check that the internal format matches the extension # It is possible to upload PSD files if their extension is set to jpg, png or gif. This should catch them out if internal_image_format != image_format: raise ValidationError(self.error_messages['invalid_image_known_format'] % ( image_format, ), code='invalid_image_known_format') def check_image_file_size(self, f): # Upload size checking can be disabled by setting max upload size to None if self.max_upload_size is None: return # Check the filesize if f.size > self.max_upload_size: raise ValidationError(self.error_messages['file_too_large'] % ( filesizeformat(f.size), ), code='file_too_large') def check_image_pixel_size(self, f): # Upload pixel size checking can be disabled by setting max upload pixel to None if self.max_image_pixels is None: return # Check the pixel size image = willow.Image.open(f) width, height = image.get_size() frames = image.get_frame_count() num_pixels = width * height * frames if num_pixels > self.max_image_pixels: raise ValidationError(self.error_messages['file_too_many_pixels'] % ( num_pixels ), code='file_too_many_pixels') def to_python(self, data): f = super().to_python(data) if f is not None: self.check_image_file_size(f) self.check_image_file_format(f) self.check_image_pixel_size(f) return f
bsd-3-clause
taotie12010/bigfour
lms/djangoapps/courseware/tests/test_word_cloud.py
134
8327
# -*- coding: utf-8 -*- """Word cloud integration tests using mongo modulestore.""" import json from operator import itemgetter from nose.plugins.attrib import attr from . import BaseTestXmodule from xmodule.x_module import STUDENT_VIEW @attr('shard_1') class TestWordCloud(BaseTestXmodule): """Integration test for word cloud xmodule.""" CATEGORY = "word_cloud" def _get_users_state(self): """Return current state for each user: {username: json_state} """ # check word cloud response for every user users_state = {} for user in self.users: response = self.clients[user.username].post(self.get_url('get_state')) users_state[user.username] = json.loads(response.content) return users_state def _post_words(self, words): """Post `words` and return current state for each user: {username: json_state} """ users_state = {} for user in self.users: response = self.clients[user.username].post( self.get_url('submit'), {'student_words[]': words}, HTTP_X_REQUESTED_WITH='XMLHttpRequest' ) users_state[user.username] = json.loads(response.content) return users_state def _check_response(self, response_contents, correct_jsons): """Utility function that compares correct and real responses.""" for username, content in response_contents.items(): # Used in debugger for comparing objects. # self.maxDiff = None # We should compare top_words for manually, # because they are unsorted. keys_to_compare = set(content.keys()).difference(set(['top_words'])) self.assertDictEqual( {k: content[k] for k in keys_to_compare}, {k: correct_jsons[username][k] for k in keys_to_compare}) # comparing top_words: top_words_content = sorted( content['top_words'], key=itemgetter('text') ) top_words_correct = sorted( correct_jsons[username]['top_words'], key=itemgetter('text') ) self.assertListEqual(top_words_content, top_words_correct) def test_initial_state(self): """Inital state of word cloud is correct. Those state that is sended from server to frontend, when students load word cloud page. """ users_state = self._get_users_state() self.assertEqual( ''.join(set([ content['status'] for _, content in users_state.items() ])), 'success') # correct initial data: correct_initial_data = { u'status': u'success', u'student_words': {}, u'total_count': 0, u'submitted': False, u'top_words': {}, u'display_student_percents': False } for _, response_content in users_state.items(): self.assertEquals(response_content, correct_initial_data) def test_post_words(self): """Students can submit data succesfully. Word cloud data properly updates after students submit. """ input_words = [ "small", "BIG", " Spaced ", " few words", ] correct_words = [ u"small", u"big", u"spaced", u"few words", ] users_state = self._post_words(input_words) self.assertEqual( ''.join(set([ content['status'] for _, content in users_state.items() ])), 'success') correct_state = {} for index, user in enumerate(self.users): correct_state[user.username] = { u'status': u'success', u'submitted': True, u'display_student_percents': True, u'student_words': {word: 1 + index for word in correct_words}, u'total_count': len(input_words) * (1 + index), u'top_words': [ { u'text': word, u'percent': 100 / len(input_words), u'size': (1 + index) } for word in correct_words ] } self._check_response(users_state, correct_state) def test_collective_users_submits(self): """Test word cloud data flow per single and collective users submits. Make sures that: 1. Inital state of word cloud is correct. Those state that is sended from server to frontend, when students load word cloud page. 2. Students can submit data succesfully. 3. Next submits produce "already voted" error. Next submits for user are not allowed by user interface, but techically it possible, and word_cloud should properly react. 4. State of word cloud after #3 is still as after #2. """ # 1. users_state = self._get_users_state() self.assertEqual( ''.join(set([ content['status'] for _, content in users_state.items() ])), 'success') # 2. # Invcemental state per user. users_state_after_post = self._post_words(['word1', 'word2']) self.assertEqual( ''.join(set([ content['status'] for _, content in users_state_after_post.items() ])), 'success') # Final state after all posts. users_state_before_fail = self._get_users_state() # 3. users_state_after_post = self._post_words( ['word1', 'word2', 'word3']) self.assertEqual( ''.join(set([ content['status'] for _, content in users_state_after_post.items() ])), 'fail') # 4. current_users_state = self._get_users_state() self._check_response(users_state_before_fail, current_users_state) def test_unicode(self): input_words = [u" this is unicode Юникод"] correct_words = [u"this is unicode юникод"] users_state = self._post_words(input_words) self.assertEqual( ''.join(set([ content['status'] for _, content in users_state.items() ])), 'success') for user in self.users: self.assertListEqual( users_state[user.username]['student_words'].keys(), correct_words) def test_handle_ajax_incorrect_dispatch(self): responses = { user.username: self.clients[user.username].post( self.get_url('whatever'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') for user in self.users } status_codes = {response.status_code for response in responses.values()} self.assertEqual(status_codes.pop(), 200) for user in self.users: self.assertDictEqual( json.loads(responses[user.username].content), { 'status': 'fail', 'error': 'Unknown Command!' } ) def test_word_cloud_constructor(self): """Make sure that all parameters extracted correclty from xml""" fragment = self.runtime.render(self.item_descriptor, STUDENT_VIEW) expected_context = { 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url, 'element_class': self.item_descriptor.location.category, 'element_id': self.item_descriptor.location.html_id(), 'num_inputs': 5, # default value 'submitted': False # default value } self.assertEqual(fragment.content, self.runtime.render_template('word_cloud.html', expected_context))
agpl-3.0
civisanalytics/ansible
lib/ansible/modules/network/eos/eos_user.py
7
11844
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: eos_user version_added: "2.3" author: "Peter Sprygada (@privateip)" short_description: Manage the collection of local users on EOS devices description: - This module provides declarative management of the local usernames configured on Arista EOS devices. It allows playbooks to manage either individual usernames or the collection of usernames in the current running config. It also supports purging usernames from the configuration that are not explicitly defined. options: users: description: - The set of username objects to be configured on the remote Arista EOS device. The list entries can either be the username or a hash of username and properties. This argument is mutually exclusive with the C(username) argument. required: false default: null username: description: - The username to be configured on the remote Arista EOS device. This argument accepts a stringv value and is mutually exclusive with the C(users) argument. required: false default: null update_password: description: - Since passwords are encrypted in the device running config, this argument will instruct the module when to change the password. When set to C(always), the password will always be updated in the device and when set to C(on_create) the password will be updated only if the username is created. required: false default: always choices: ['on_create', 'always'] privilege: description: - The C(privilege) argument configures the privilege level of the user when logged into the system. This argument accepts integer values in the range of 1 to 15. required: false default: null role: description: - The C(role) argument configures the role for the username in the device running configuration. The argument accepts a string value defining the role name. This argument does not check if the role has been configured on the device. required: false default: null sshkey: description: - The C(sshkey) argument defines the SSH public key to configure for the username. This argument accepts a valid SSH key value. required: false default: null nopassword: description: - The C(nopassword) argument defines the username without assigning a password. This will allow the user to login to the system without being authenticated by a password. This argument accepts boolean values. required: false default: null choices: ['true', 'false'] purge: description: - The C(purge) argument instructs the module to consider the resource definition absolute. It will remove any previously configured usernames on the device with the exception of the `admin` user which cannot be deleted per EOS constraints. required: false default: false state: description: - The C(state) argument configures the state of the uername definition as it relates to the device operational configuration. When set to I(present), the username(s) should be configured in the device active configuration and when set to I(absent) the username(s) should not be in the device active configuration required: false default: present choices: ['present', 'absent'] """ EXAMPLES = """ - name: create a new user eos_user: username: ansible sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" state: present - name: remove all users except admin eos_user: purge: yes - name: set multiple users to privilege level users: - username: netop - username: netend privilege: 15 state: present """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always type: list sample: - username ansible secret password - username admin secret admin session_name: description: The EOS config session name used to load the configuration returned: when changed is True type: str sample: ansible_1479315771 """ import re from functools import partial from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.eos import get_config, load_config from ansible.module_utils.six import iteritems from ansible.module_utils.eos import eos_argument_spec, check_args def validate_privilege(value, module): if not 1 <= value <= 15: module.fail_json(msg='privilege must be between 1 and 15, got %s' % value) def map_obj_to_commands(updates, module): commands = list() state = module.params['state'] update_password = module.params['update_password'] for update in updates: want, have = update needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x)) add = lambda x: commands.append('username %s %s' % (want['username'], x)) if want['state'] == 'absent': commands.append('no username %s' % want['username']) continue if needs_update('role'): add('role %s' % want['role']) if needs_update('privilege'): add('privilege %s' % want['privilege']) if needs_update('password'): if update_password == 'always' or not have: add('secret %s' % want['password']) if needs_update('sshkey'): add('sshkey %s' % want['sshkey']) if needs_update('nopassword'): if want['nopassword']: add('nopassword') else: add('no username %s nopassword' % want['username']) return commands def parse_role(data): match = re.search(r'role (\S+)', data, re.M) if match: return match.group(1) def parse_sshkey(data): match = re.search(r'sshkey (.+)$', data, re.M) if match: return match.group(1) def parse_privilege(data): match = re.search(r'privilege (\S+)', data, re.M) if match: return int(match.group(1)) def map_config_to_obj(module): data = get_config(module, flags=['section username']) match = re.findall(r'^username (\S+)', data, re.M) if not match: return list() instances = list() for user in set(match): regex = r'username %s .+$' % user cfg = re.findall(r'username %s .+$' % user, data, re.M) cfg = '\n'.join(cfg) obj = { 'username': user, 'state': 'present', 'nopassword': 'nopassword' in cfg, 'password': None, 'sshkey': parse_sshkey(cfg), 'privilege': parse_privilege(cfg), 'role': parse_role(cfg) } instances.append(obj) return instances def get_param_value(key, item, module): # if key doesn't exist in the item, get it from module.params if not item.get(key): value = module.params[key] # if key does exist, do a type check on it to validate it else: value_type = module.argument_spec[key].get('type', 'str') type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type] type_checker(item[key]) value = item[key] # validate the param value (if validator func exists) validator = globals().get('validate_%s' % key) if all((value, validator)): validator(value, module) return value def map_params_to_obj(module): users = module.params['users'] if not users: if not module.params['username'] and module.params['purge']: return list() elif not module.params['username']: module.fail_json(msg='username is required') else: collection = [{'username': module.params['username']}] else: collection = list() for item in users: if not isinstance(item, dict): collection.append({'username': item}) elif 'username' not in item: module.fail_json(msg='username is required') else: collection.append(item) objects = list() for item in collection: get_value = partial(get_param_value, item=item, module=module) item['password'] = get_value('password') item['nopassword'] = get_value('nopassword') item['privilege'] = get_value('privilege') item['role'] = get_value('role') item['sshkey'] = get_value('sshkey') item['state'] = get_value('state') objects.append(item) return objects def update_objects(want, have): updates = list() for entry in want: item = next((i for i in have if i['username'] == entry['username']), None) if all((item is None, entry['state'] == 'present')): updates.append((entry, {})) elif item: for key, value in iteritems(entry): if value and value != item[key]: updates.append((entry, item)) return updates def main(): """ main entry point for module execution """ argument_spec = dict( users=dict(type='list', no_log=True), username=dict(), password=dict(no_log=True), nopassword=dict(type='bool'), update_password=dict(default='always', choices=['on_create', 'always']), privilege=dict(type='int'), role=dict(), sshkey=dict(), purge=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']) ) argument_spec.update(eos_argument_spec) mutually_exclusive = [('username', 'users')] module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True) warnings = list() check_args(module, warnings) result = {'changed': False} if warnings: result['warnings'] = warnings want = map_params_to_obj(module) have = map_config_to_obj(module) commands = map_obj_to_commands(update_objects(want, have), module) if module.params['purge']: want_users = [x['username'] for x in want] have_users = [x['username'] for x in have] for item in set(have_users).difference(want_users): if item != 'admin': commands.append('no username %s' % item) result['commands'] = commands # the eos cli prevents this by rule so capture it and display # a nice failure message if 'no username admin' in commands: module.fail_json(msg='cannot delete the `admin` account') if commands: commit = not module.check_mode response = load_config(module, commands, commit=commit) if response.get('diff') and module._diff: result['diff'] = {'prepared': response.get('diff')} result['session_name'] = response.get('session') result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
ericoporto/Chove-Agora
lib/requests/packages/urllib3/__init__.py
155
1864
""" urllib3 - Thread-safe connection pooling and re-using. """ __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' __license__ = 'MIT' __version__ = '1.10.2' from .connectionpool import ( HTTPConnectionPool, HTTPSConnectionPool, connection_from_url ) from . import exceptions from .filepost import encode_multipart_formdata from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .response import HTTPResponse from .util.request import make_headers from .util.url import get_host from .util.timeout import Timeout from .util.retry import Retry # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """ # This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug('Added a stderr logging handler to logger: %s' % __name__) return handler # ... Clean up. del NullHandler # Set security warning to always go off by default. import warnings warnings.simplefilter('always', exceptions.SecurityWarning) def disable_warnings(category=exceptions.HTTPWarning): """ Helper for quickly disabling all urllib3 warnings. """ warnings.simplefilter('ignore', category)
apache-2.0
vitaly4uk/django
tests/model_formsets_regress/tests.py
182
20697
from __future__ import unicode_literals from django import forms from django.forms.formsets import DELETION_FIELD_NAME, BaseFormSet from django.forms.models import ( BaseModelFormSet, inlineformset_factory, modelform_factory, modelformset_factory, ) from django.forms.utils import ErrorDict, ErrorList from django.test import TestCase from django.utils import six from .models import ( Host, Manager, Network, ProfileNetwork, Restaurant, User, UserProfile, UserSite, ) class InlineFormsetTests(TestCase): def test_formset_over_to_field(self): "A formset over a ForeignKey with a to_field can be saved. Regression for #10243" Form = modelform_factory(User, fields="__all__") FormSet = inlineformset_factory(User, UserSite, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a form with no data form = Form() form_set = FormSet(instance=User()) # Now create a new User and UserSite instance data = { 'serial': '1', 'username': 'apollo13', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '0', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-data': '10', 'usersite_set-0-user': 'apollo13' } user = User() form = Form(data) if form.is_valid(): user = form.save() else: self.fail('Errors found on form:%s' % form_set) form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values() self.assertEqual(usersite[0]['data'], 10) self.assertEqual(usersite[0]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now update the UserSite instance data = { 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-id': six.text_type(usersite[0]['id']), 'usersite_set-0-data': '11', 'usersite_set-0-user': 'apollo13' } form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values() self.assertEqual(usersite[0]['data'], 11) self.assertEqual(usersite[0]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now add a new UserSite instance data = { 'usersite_set-TOTAL_FORMS': '2', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-id': six.text_type(usersite[0]['id']), 'usersite_set-0-data': '11', 'usersite_set-0-user': 'apollo13', 'usersite_set-1-data': '42', 'usersite_set-1-user': 'apollo13' } form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values().order_by('data') self.assertEqual(usersite[0]['data'], 11) self.assertEqual(usersite[0]['user_id'], 'apollo13') self.assertEqual(usersite[1]['data'], 42) self.assertEqual(usersite[1]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) def test_formset_over_inherited_model(self): "A formset over a ForeignKey with a to_field can be saved. Regression for #11120" Form = modelform_factory(Restaurant, fields="__all__") FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a form with no data form = Form() form_set = FormSet(instance=Restaurant()) # Now create a new Restaurant and Manager instance data = { 'name': "Guido's House of Pasta", 'manager_set-TOTAL_FORMS': '1', 'manager_set-INITIAL_FORMS': '0', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-name': 'Guido Van Rossum' } restaurant = User() form = Form(data) if form.is_valid(): restaurant = form.save() else: self.fail('Errors found on form:%s' % form_set) form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values() self.assertEqual(manager[0]['name'], 'Guido Van Rossum') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now update the Manager instance data = { 'manager_set-TOTAL_FORMS': '1', 'manager_set-INITIAL_FORMS': '1', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-id': six.text_type(manager[0]['id']), 'manager_set-0-name': 'Terry Gilliam' } form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values() self.assertEqual(manager[0]['name'], 'Terry Gilliam') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now add a new Manager instance data = { 'manager_set-TOTAL_FORMS': '2', 'manager_set-INITIAL_FORMS': '1', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-id': six.text_type(manager[0]['id']), 'manager_set-0-name': 'Terry Gilliam', 'manager_set-1-name': 'John Cleese' } form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values().order_by('name') self.assertEqual(manager[0]['name'], 'John Cleese') self.assertEqual(manager[1]['name'], 'Terry Gilliam') else: self.fail('Errors found on formset:%s' % form_set.errors) def test_inline_model_with_to_field(self): """ #13794 --- An inline model with a to_field of a formset with instance has working relations. """ FormSet = inlineformset_factory(User, UserSite, exclude=('is_superuser',)) user = User.objects.create(username="guido", serial=1337) UserSite.objects.create(user=user, data=10) formset = FormSet(instance=user) # Testing the inline model's relation self.assertEqual(formset[0].instance.user_id, "guido") def test_inline_model_with_to_field_to_rel(self): """ #13794 --- An inline model with a to_field to a related field of a formset with instance has working relations. """ FormSet = inlineformset_factory(UserProfile, ProfileNetwork, exclude=[]) user = User.objects.create(username="guido", serial=1337, pk=1) self.assertEqual(user.pk, 1) profile = UserProfile.objects.create(user=user, about="about", pk=2) self.assertEqual(profile.pk, 2) ProfileNetwork.objects.create(profile=profile, network=10, identifier=10) formset = FormSet(instance=profile) # Testing the inline model's relation self.assertEqual(formset[0].instance.profile_id, 1) def test_formset_with_none_instance(self): "A formset with instance=None can be created. Regression for #11872" Form = modelform_factory(User, fields="__all__") FormSet = inlineformset_factory(User, UserSite, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a formset with an instance of None Form(instance=None) FormSet(instance=None) def test_empty_fields_on_modelformset(self): "No fields passed to modelformset_factory should result in no fields on returned forms except for the id. See #14119." UserFormSet = modelformset_factory(User, fields=()) formset = UserFormSet() for form in formset.forms: self.assertIn('id', form.fields) self.assertEqual(len(form.fields), 1) def test_save_as_new_with_new_inlines(self): """ Existing and new inlines are saved with save_as_new. Regression for #14938. """ efnet = Network.objects.create(name="EFNet") host1 = Host.objects.create(hostname="irc.he.net", network=efnet) HostFormSet = inlineformset_factory(Network, Host, fields="__all__") # Add a new host, modify previous host, and save-as-new data = { 'host_set-TOTAL_FORMS': '2', 'host_set-INITIAL_FORMS': '1', 'host_set-MAX_NUM_FORMS': '0', 'host_set-0-id': six.text_type(host1.id), 'host_set-0-hostname': 'tranquility.hub.dal.net', 'host_set-1-hostname': 'matrix.de.eu.dal.net' } # To save a formset as new, it needs a new hub instance dalnet = Network.objects.create(name="DALnet") formset = HostFormSet(data, instance=dalnet, save_as_new=True) self.assertTrue(formset.is_valid()) formset.save() self.assertQuerysetEqual( dalnet.host_set.order_by("hostname"), ["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"] ) def test_initial_data(self): user = User.objects.create(username="bibi", serial=1) UserSite.objects.create(user=user, data=7) FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__") formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}]) self.assertEqual(formset.forms[0].initial['data'], 7) self.assertEqual(formset.extra_forms[0].initial['data'], 41) self.assertIn('value="42"', formset.extra_forms[1].as_p()) class FormsetTests(TestCase): def test_error_class(self): ''' Test the type of Formset and Form error attributes ''' Formset = modelformset_factory(User, fields="__all__") data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '0', 'form-0-id': '', 'form-0-username': 'apollo13', 'form-0-serial': '1', 'form-1-id': '', 'form-1-username': 'apollo13', 'form-1-serial': '2', } formset = Formset(data) # check if the returned error classes are correct # note: formset.errors returns a list as documented self.assertIsInstance(formset.errors, list) self.assertIsInstance(formset.non_form_errors(), ErrorList) for form in formset.forms: self.assertIsInstance(form.errors, ErrorDict) self.assertIsInstance(form.non_field_errors(), ErrorList) def test_initial_data(self): User.objects.create(username="bibi", serial=1) Formset = modelformset_factory(User, fields="__all__", extra=2) formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}]) self.assertEqual(formset.forms[0].initial['username'], "bibi") self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11") self.assertIn('value="apollo12"', formset.extra_forms[1].as_p()) def test_extraneous_query_is_not_run(self): Formset = modelformset_factory(Network, fields="__all__") data = {'test-TOTAL_FORMS': '1', 'test-INITIAL_FORMS': '0', 'test-MAX_NUM_FORMS': '', 'test-0-name': 'Random Place', } with self.assertNumQueries(1): formset = Formset(data, prefix="test") formset.save() class CustomWidget(forms.widgets.TextInput): pass class UserSiteForm(forms.ModelForm): class Meta: model = UserSite fields = "__all__" widgets = { 'id': CustomWidget, 'data': CustomWidget, } localized_fields = ('data',) class Callback(object): def __init__(self): self.log = [] def __call__(self, db_field, **kwargs): self.log.append((db_field, kwargs)) return db_field.formfield(**kwargs) class FormfieldCallbackTests(TestCase): """ Regression for #13095 and #17683: Using base forms with widgets defined in Meta should not raise errors and BaseModelForm should respect the specified pk widget. """ def test_inlineformset_factory_default(self): Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__") form = Formset().forms[0] self.assertIsInstance(form['id'].field.widget, CustomWidget) self.assertIsInstance(form['data'].field.widget, CustomWidget) self.assertFalse(form.fields['id'].localize) self.assertTrue(form.fields['data'].localize) def test_modelformset_factory_default(self): Formset = modelformset_factory(UserSite, form=UserSiteForm) form = Formset().forms[0] self.assertIsInstance(form['id'].field.widget, CustomWidget) self.assertIsInstance(form['data'].field.widget, CustomWidget) self.assertFalse(form.fields['id'].localize) self.assertTrue(form.fields['data'].localize) def assertCallbackCalled(self, callback): id_field, user_field, data_field = UserSite._meta.fields expected_log = [ (id_field, {'widget': CustomWidget}), (user_field, {}), (data_field, {'widget': CustomWidget, 'localize': True}), ] self.assertEqual(callback.log, expected_log) def test_inlineformset_custom_callback(self): callback = Callback() inlineformset_factory(User, UserSite, form=UserSiteForm, formfield_callback=callback, fields="__all__") self.assertCallbackCalled(callback) def test_modelformset_custom_callback(self): callback = Callback() modelformset_factory(UserSite, form=UserSiteForm, formfield_callback=callback) self.assertCallbackCalled(callback) class BaseCustomDeleteFormSet(BaseFormSet): """ A formset mix-in that lets a form decide if it's to be deleted. Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed. form.should_delete() is called. The formset delete field is also suppressed. """ def add_fields(self, form, index): super(BaseCustomDeleteFormSet, self).add_fields(form, index) self.can_delete = True if DELETION_FIELD_NAME in form.fields: del form.fields[DELETION_FIELD_NAME] def _should_delete_form(self, form): return hasattr(form, 'should_delete') and form.should_delete() class FormfieldShouldDeleteFormTests(TestCase): """ Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form """ class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet): """ Model FormSet with CustomDelete MixIn """ class CustomDeleteUserForm(forms.ModelForm): """ A model form with a 'should_delete' method """ class Meta: model = User fields = "__all__" def should_delete(self): """ delete form if odd PK """ return self.instance.pk % 2 != 0 NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True) DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet) data = { 'form-TOTAL_FORMS': '4', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '4', 'form-0-username': 'John', 'form-0-serial': '1', 'form-1-username': 'Paul', 'form-1-serial': '2', 'form-2-username': 'George', 'form-2-serial': '3', 'form-3-username': 'Ringo', 'form-3-serial': '5', } delete_all_ids = { 'form-0-DELETE': '1', 'form-1-DELETE': '1', 'form-2-DELETE': '1', 'form-3-DELETE': '1', } def test_init_database(self): """ Add test data to database via formset """ formset = self.NormalFormset(self.data) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 4) def test_no_delete(self): """ Verify base formset doesn't modify database """ # reload database self.test_init_database() # pass standard data dict & see none updated data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) formset = self.NormalFormset(data, queryset=User.objects.all()) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 4) def test_all_delete(self): """ Verify base formset honors DELETE field """ # reload database self.test_init_database() # create data dict with all fields marked for deletion data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) data.update(self.delete_all_ids) formset = self.NormalFormset(data, queryset=User.objects.all()) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 0) def test_custom_delete(self): """ Verify DeleteFormset ignores DELETE field and uses form method """ # reload database self.test_init_database() # Create formset with custom Delete function # create data dict with all fields marked for deletion data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) data.update(self.delete_all_ids) formset = self.DeleteFormset(data, queryset=User.objects.all()) # verify two were deleted self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 2) # verify no "odd" PKs left odd_ids = [user.pk for user in User.objects.all() if user.pk % 2] self.assertEqual(len(odd_ids), 0) class RedeleteTests(TestCase): def test_resubmit(self): u = User.objects.create(username='foo', serial=1) us = UserSite.objects.create(user=u, data=7) formset_cls = inlineformset_factory(User, UserSite, fields="__all__") data = { 'serial': '1', 'username': 'foo', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '1', 'usersite_set-0-id': six.text_type(us.pk), 'usersite_set-0-data': '7', 'usersite_set-0-user': 'foo', 'usersite_set-0-DELETE': '1' } formset = formset_cls(data, instance=u) self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0) formset = formset_cls(data, instance=u) # Even if the "us" object isn't in the DB any more, the form # validates. self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0) def test_delete_already_deleted(self): u = User.objects.create(username='foo', serial=1) us = UserSite.objects.create(user=u, data=7) formset_cls = inlineformset_factory(User, UserSite, fields="__all__") data = { 'serial': '1', 'username': 'foo', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '1', 'usersite_set-0-id': six.text_type(us.pk), 'usersite_set-0-data': '7', 'usersite_set-0-user': 'foo', 'usersite_set-0-DELETE': '1' } formset = formset_cls(data, instance=u) us.delete() self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0)
bsd-3-clause
hyperized/ansible
lib/ansible/modules/storage/purestorage/purefa_connect.py
21
4803
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, Simon Dodsley (simon@purestorage.com) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: purefa_connect version_added: '2.9' short_description: Manage replication connections between two FlashArrays description: - Manage array connections to specified target array author: - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com> options: state: description: - Create or delete array connection default: present type: str choices: [ absent, present ] target_url: description: - Management IP address of remote array. type: str required: true target_api: description: - API token for target array type: str connection: description: Type of connection between arrays. type: str choices: [ sync, async ] default: async extends_documentation_fragment: - purestorage.fa ''' EXAMPLES = r''' - name: Create an async connection to remote array purefa_connect: target_url: 10.10.10.20 target_api: connection: async fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 - name: Delete connection to remote array purefa_connect: state: absent target_url: 10.10.10.20 fa_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 ''' RETURN = r''' ''' HAS_PURESTORAGE = True try: from purestorage import FlashArray except ImportError: HAS_PURESTORAGE = False import platform from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pure import get_system, purefa_argument_spec def _check_connected(module, array): connected_arrays = array.list_array_connections() for target in range(0, len(connected_arrays)): if connected_arrays[target]['management_address'] == module.params['target_url'] and \ connected_arrays[target]['connected']: return connected_arrays[target] return None def break_connection(module, array, target_array): """Break connection between arrays""" changed = True if not module.check_mode: source_array = array.get()['array_name'] try: array.disconnect_array(target_array['array_name']) except Exception: module.fail_json(msg="Failed to disconnect {0} from {1}.".format(target_array['array_name'], source_array)) module.exit_json(changed=changed) def create_connection(module, array): """Create connection between arrays""" changed = True if not module.check_mode: remote_array = module.params['target_url'] user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { 'base': 'Ansible', 'class': __name__, 'version': 1.2, 'platform': platform.platform() } try: remote_system = FlashArray(module.params['target_url'], api_token=module.params['target_api'], user_agent=user_agent) connection_key = remote_system.get(connection_key=True)['connection_key'] remote_array = remote_system.get()['array_name'] array.connect_array(module.params['target_url'], connection_key, [module.params['connection']]) except Exception: module.fail_json(msg="Failed to connect to remote array {0}.".format(remote_array)) module.exit_json(changed=changed) def main(): argument_spec = purefa_argument_spec() argument_spec.update(dict( state=dict(type='str', default='present', choices=['absent', 'present']), connection=dict(type='str', default='async', choices=['async', 'sync']), target_url=dict(type='str', required=True), target_api=dict(type='str'), )) required_if = [('state', 'present', ['target_api'])] module = AnsibleModule(argument_spec, required_if=required_if, supports_check_mode=True) if not HAS_PURESTORAGE: module.fail_json(msg='purestorage sdk is required for this module') state = module.params['state'] array = get_system(module) target_array = _check_connected(module, array) if state == 'present' and target_array is None: create_connection(module, array) elif state == 'absent'and target_array is not None: break_connection(module, array, target_array) module.exit_json(changed=False) if __name__ == '__main__': main()
gpl-3.0
tomasreimers/tensorflow-emscripten
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_benchmark_test.py
83
8976
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Regression test for DNNLinearCombinedEstimator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import tempfile from tensorflow.contrib.layers.python.layers import feature_column from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import test_data from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.platform import test from tensorflow.python.training import adagrad from tensorflow.python.training import ftrl from tensorflow.python.training import server_lib # Desired training steps, reported in benchmark. Actual steps might be slightly # more than this since supervisor training runs for a non-detrministic number of # steps. _ITERS = 100 _METRIC_KEYS = { 'accuracy', 'auc', 'accuracy/threshold_0.500000_mean', 'loss', 'precision/positive_threshold_0.500000_mean', 'recall/positive_threshold_0.500000_mean', } class DNNLinearCombinedClassifierBenchmark(test.Benchmark): def _assertSingleClassMetrics(self, metrics): estimator_test_utils.assert_in_range(0.9, 1.0, 'auc', metrics) estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy/threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'precision/positive_threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'recall/positive_threshold_0.500000_mean', metrics) self._assertCommonMetrics(metrics) def _assertCommonMetrics(self, metrics): estimator_test_utils.assert_in_range(_ITERS, _ITERS + 5, 'global_step', metrics) estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy', metrics) estimator_test_utils.assert_in_range(0.0, 0.2, 'loss', metrics) self.report_benchmark( iters=metrics['global_step'], extras={k: v for k, v in metrics.items() if k in _METRIC_KEYS}) def benchmarkMatrixData(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=(bucketized_feature,), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkTensorData(self): def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant( iris.data[:, i], dtype=dtypes.float32), (-1, 1)) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=('en', 'fr', 'zh'), indices=((0, 0), (0, 1), (60, 0)), dense_shape=(len(iris.target), 2)) labels = array_ops.reshape( constant_op.constant( iris.target, dtype=dtypes.int32), (-1, 1)) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [ feature_column.real_valued_column(str(i)) for i in range(4) ] linear_features = [ feature_column.bucketized_column( cont_features[i], test_data.get_quantile_based_buckets(iris.data[:, i], 10)) for i in range(4) ] linear_features.append( feature_column.sparse_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=linear_features, dnn_feature_columns=cont_features, dnn_hidden_units=(3, 3)) metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate( input_fn=_input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkCustomOptimizer(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=(bucketized_feature,), linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3), dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkMultiClass(self): iris = base.load_iris() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=(bucketized_feature,), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3)) input_fn = test_data.iris_input_multiclass_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertCommonMetrics(metrics) def benchmarkPartitionedVariables(self): def _input_fn(): features = { 'language': sparse_tensor.SparseTensor( values=('en', 'fr', 'zh'), indices=((0, 0), (0, 1), (2, 0)), dense_shape=(3, 2)) } labels = constant_op.constant(((1,), (0,), (0,))) return features, labels # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. sparse_feature = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) embedding_feature = feature_column.embedding_column( sparse_feature, dimension=1) tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=(sparse_feature,), dnn_feature_columns=(embedding_feature,), dnn_hidden_units=(3, 3), config=config) metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate( input_fn=_input_fn, steps=100) self._assertCommonMetrics(metrics) if __name__ == '__main__': test.main()
apache-2.0
tdtrask/ansible
lib/ansible/modules/system/svc.py
63
9262
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2015, Brian Coca <bcoca@ansible.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: svc author: - Brian Coca (@bcoca) version_added: "1.9" short_description: Manage daemontools services description: - Controls daemontools services on remote hosts using the svc utility. options: name: description: - Name of the service to manage. required: true state: description: - C(Started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the svc (svc -t) and C(killed) will always bounce the svc (svc -k). C(reloaded) will send a sigusr1 (svc -1). C(once) will run a normally downed svc once (svc -o), not really an idempotent operation. choices: [ killed, once, reloaded, restarted, started, stopped ] downed: description: - Should a 'down' file exist or not, if it exists it disables auto startup. defaults to no. Downed does not imply stopped. type: bool default: 'no' enabled: description: - Wheater the service is enabled or not, if disabled it also implies stopped. Make note that a service can be enabled and downed (no auto restart). type: bool service_dir: description: - directory svscan watches for services default: /service service_src: description: - directory where services are defined, the source of symlinks to service_dir. ''' EXAMPLES = ''' - name: Start svc dnscache, if not running svc: name: dnscache state: started - name: Stop svc dnscache, if running svc: name: dnscache state: stopped - name: Kill svc dnscache, in all cases svc: name: dnscache state: killed - name: Restart svc dnscache, in all cases svc: name: dnscache state: restarted - name: Reload svc dnscache, in all cases svc: name: dnscache state: reloaded - name: Using alternative svc directory location svc: name: dnscache state: reloaded service_dir: /var/service ''' import os import re import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native def _load_dist_subclass(cls, *args, **kwargs): ''' Used for derivative implementations ''' subclass = None distro = kwargs['module'].params['distro'] # get the most specific superclass for this platform if distro is not None: for sc in cls.__subclasses__(): if sc.distro is not None and sc.distro == distro: subclass = sc if subclass is None: subclass = cls return super(cls, subclass).__new__(subclass) class Svc(object): """ Main class that handles daemontools, can be subclassed and overridden in case we want to use a 'derivative' like encore, s6, etc """ # def __new__(cls, *args, **kwargs): # return _load_dist_subclass(cls, args, kwargs) def __init__(self, module): self.extra_paths = ['/command', '/usr/local/bin'] self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] self.module = module self.name = module.params['name'] self.service_dir = module.params['service_dir'] self.service_src = module.params['service_src'] self.enabled = None self.downed = None self.full_state = None self.state = None self.pid = None self.duration = None self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths) self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths) self.svc_full = '/'.join([self.service_dir, self.name]) self.src_full = '/'.join([self.service_src, self.name]) self.enabled = os.path.lexists(self.svc_full) if self.enabled: self.downed = os.path.lexists('%s/down' % self.svc_full) self.get_status() else: self.downed = os.path.lexists('%s/down' % self.src_full) self.state = 'stopped' def enable(self): if os.path.exists(self.src_full): try: os.symlink(self.src_full, self.svc_full) except OSError as e: self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e)) else: self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) def disable(self): try: os.unlink(self.svc_full) except OSError as e: self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e)) self.execute_command([self.svc_cmd, '-dx', self.src_full]) src_log = '%s/log' % self.src_full if os.path.exists(src_log): self.execute_command([self.svc_cmd, '-dx', src_log]) def get_status(self): (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full]) if err is not None and err: self.full_state = self.state = err else: self.full_state = out m = re.search(r'\(pid (\d+)\)', out) if m: self.pid = m.group(1) m = re.search(r'(\d+) seconds', out) if m: self.duration = m.group(1) if re.search(' up ', out): self.state = 'start' elif re.search(' down ', out): self.state = 'stopp' else: self.state = 'unknown' return if re.search(' want ', out): self.state += 'ing' else: self.state += 'ed' def start(self): return self.execute_command([self.svc_cmd, '-u', self.svc_full]) def stopp(self): return self.stop() def stop(self): return self.execute_command([self.svc_cmd, '-d', self.svc_full]) def once(self): return self.execute_command([self.svc_cmd, '-o', self.svc_full]) def reload(self): return self.execute_command([self.svc_cmd, '-1', self.svc_full]) def restart(self): return self.execute_command([self.svc_cmd, '-t', self.svc_full]) def kill(self): return self.execute_command([self.svc_cmd, '-k', self.svc_full]) def execute_command(self, cmd): try: (rc, out, err) = self.module.run_command(' '.join(cmd)) except Exception as e: self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc()) return (rc, out, err) def report(self): self.get_status() states = {} for k in self.report_vars: states[k] = self.__dict__[k] return states # =========================================== # Main control flow def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), enabled=dict(type='bool'), downed=dict(type='bool'), dist=dict(type='str', default='daemontools'), service_dir=dict(type='str', default='/service'), service_src=dict(type='str', default='/etc/service'), ), supports_check_mode=True, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') state = module.params['state'] enabled = module.params['enabled'] downed = module.params['downed'] svc = Svc(module) changed = False orig_state = svc.report() if enabled is not None and enabled != svc.enabled: changed = True if not module.check_mode: try: if enabled: svc.enable() else: svc.disable() except (OSError, IOError) as e: module.fail_json(msg="Could change service link: %s" % to_native(e)) if state is not None and state != svc.state: changed = True if not module.check_mode: getattr(svc, state[:-2])() if downed is not None and downed != svc.downed: changed = True if not module.check_mode: d_file = "%s/down" % svc.svc_full try: if downed: open(d_file, "a").close() else: os.unlink(d_file) except (OSError, IOError) as e: module.fail_json(msg="Could change downed file: %s " % (to_native(e))) module.exit_json(changed=changed, svc=svc.report()) if __name__ == '__main__': main()
gpl-3.0
odoobgorg/odoo
addons/website/models/ir_qweb.py
15
2010
# -*- coding: utf-8 -*- from openerp.addons.web.http import request from openerp.osv import orm class QWeb(orm.AbstractModel): """ QWeb object for rendering stuff in the website context """ _inherit = 'ir.qweb' URL_ATTRS = { 'form': 'action', 'a': 'href', } CDN_TRIGGERS = { 'link': 'href', 'script': 'src', 'img': 'src', } PRESERVE_WHITESPACE = [ 'pre', 'textarea', 'script', 'style', ] def render_attribute(self, element, name, value, qwebcontext): context = qwebcontext.context or {} if not context.get('rendering_bundle'): if name == self.URL_ATTRS.get(element.tag) and qwebcontext.get('url_for'): value = qwebcontext.get('url_for')(value) elif request and getattr(request, 'website', None) and request.website.cdn_activated and (name == self.URL_ATTRS.get(element.tag) or name == self.CDN_TRIGGERS.get(element.tag)): value = request.website.get_cdn_url(value) return super(QWeb, self).render_attribute(element, name, value, qwebcontext) def render_text(self, text, element, qwebcontext): compress = request and not request.debug and getattr(request, 'website', None) and request.website.compress_html if compress and element.tag not in self.PRESERVE_WHITESPACE: text = self.re_remove_spaces.sub(' ', text) return super(QWeb, self).render_text(text, element, qwebcontext) def render_tail(self, tail, element, qwebcontext): compress = request and not request.debug and getattr(request, 'website', None) and request.website.compress_html if compress and element.getparent().tag not in self.PRESERVE_WHITESPACE: # No need to recurse because those tags children are not html5 parser friendly tail = self.re_remove_spaces.sub(' ', tail.rstrip()) return super(QWeb, self).render_tail(tail, element, qwebcontext)
gpl-3.0
wndias/bc.repository
script.module.youtube.dl/lib/youtube_dl/extractor/addanime.py
29
3281
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, qualities, ) class AddAnimeIE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.)?add-anime\.net/(?:watch_video\.php\?(?:.*?)v=|video/)(?P<id>[\w_]+)' _TESTS = [{ 'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9', 'md5': '72954ea10bc979ab5e2eb288b21425a0', 'info_dict': { 'id': '24MR3YO5SAS9', 'ext': 'mp4', 'description': 'One Piece 606', 'title': 'One Piece 606', } }, { 'url': 'http://add-anime.net/video/MDUGWYKNGBD8/One-Piece-687', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) try: webpage = self._download_webpage(url, video_id) except ExtractorError as ee: if not isinstance(ee.cause, compat_HTTPError) or \ ee.cause.code != 503: raise redir_webpage = ee.cause.read().decode('utf-8') action = self._search_regex( r'<form id="challenge-form" action="([^"]+)"', redir_webpage, 'Redirect form') vc = self._search_regex( r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>', redir_webpage, 'redirect vc value') av = re.search( r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);', redir_webpage) if av is None: raise ExtractorError('Cannot find redirect math task') av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3)) parsed_url = compat_urllib_parse_urlparse(url) av_val = av_res + len(parsed_url.netloc) confirm_url = ( parsed_url.scheme + '://' + parsed_url.netloc + action + '?' + compat_urllib_parse_urlencode({ 'jschl_vc': vc, 'jschl_answer': compat_str(av_val)})) self._download_webpage( confirm_url, video_id, note='Confirming after redirect') webpage = self._download_webpage(url, video_id) FORMATS = ('normal', 'hq') quality = qualities(FORMATS) formats = [] for format_id in FORMATS: rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id) video_url = self._search_regex(rex, webpage, 'video file URLx', fatal=False) if not video_url: continue formats.append({ 'format_id': format_id, 'url': video_url, 'quality': quality(format_id), }) self._sort_formats(formats) video_title = self._og_search_title(webpage) video_description = self._og_search_description(webpage) return { '_type': 'video', 'id': video_id, 'formats': formats, 'title': video_title, 'description': video_description }
gpl-2.0
timvandermeij/servo
tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/support/generate-text-emphasis-position-property-tests.py
841
3343
#!/usr/bin/env python # - * - coding: UTF-8 - * - """ This script generates tests text-emphasis-position-property-001 ~ 006 which cover all possible values of text-emphasis-position property with all combination of three main writing modes and two orientations. Only test files are generated by this script. It also outputs a list of all tests it generated in the format of Mozilla reftest.list to the stdout. """ from __future__ import unicode_literals import itertools TEST_FILE = 'text-emphasis-position-property-{:03}{}.html' REF_FILE = 'text-emphasis-position-property-{:03}-ref.html' TEST_TEMPLATE = '''<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Test: text-emphasis-position: {value}, {title}</title> <link rel="author" title="Xidorn Quan" href="https://www.upsuper.org"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property"> <meta name="assert" content="'text-emphasis-position: {value}' with 'writing-mode: {wm}' puts emphasis marks {position} the text."> <link rel="match" href="text-emphasis-position-property-{index:03}-ref.html"> <p>Pass if the emphasis marks are {position} the text below:</p> <div style="line-height: 5; text-emphasis: circle; writing-mode: {wm}; text-orientation: {orient}; text-emphasis-position: {value}">試験テスト</div> ''' SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e', 'f', 'g'] WRITING_MODES = ["horizontal-tb", "vertical-rl", "vertical-lr"] POSITION_HORIZONTAL = ["over", "under"] POSITION_VERTICAL = ["right", "left"] REF_MAP_MIXED = { "over": 1, "under": 2, "right": 3, "left": 4 } REF_MAP_SIDEWAYS = { "right": 5, "left": 6 } POSITION_TEXT = { "over": "over", "under": "under", "right": "to the right of", "left": "to the left of" } suffixes = [iter(SUFFIXES) for i in range(6)] reftest_items = [] def write_file(filename, content): with open(filename, 'wb') as f: f.write(content.encode('UTF-8')) def write_test_file(idx, suffix, wm, orient, value, position): filename = TEST_FILE.format(idx, suffix) write_file(filename, TEST_TEMPLATE.format( value=value, wm=wm, orient=orient, index=idx, position=position, title=(wm if orient == "mixed" else "{}, {}".format(wm, orient)))) reftest_items.append("== {} {}".format(filename, REF_FILE.format(idx))) def write_test_files(wm, orient, pos1, pos2): idx = (REF_MAP_MIXED if orient == "mixed" else REF_MAP_SIDEWAYS)[pos1] position = POSITION_TEXT[pos1] suffix = suffixes[idx - 1] write_test_file(idx, next(suffix), wm, orient, pos1 + " " + pos2, position) write_test_file(idx, next(suffix), wm, orient, pos2 + " " + pos1, position) for wm in WRITING_MODES: if wm == "horizontal-tb": effective_pos = POSITION_HORIZONTAL ineffective_pos = POSITION_VERTICAL else: effective_pos = POSITION_VERTICAL ineffective_pos = POSITION_HORIZONTAL for pos1, pos2 in itertools.product(effective_pos, ineffective_pos): write_test_files(wm, "mixed", pos1, pos2) if wm != "horizontal-tb": write_test_files(wm, "sideways", pos1, pos2) print("# START tests from {}".format(__file__)) reftest_items.sort() for item in reftest_items: print(item) print("# END tests from {}".format(__file__))
mpl-2.0
jeffery9/mixprint_addons
stock_invoice_directly/__openerp__.py
120
1651
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Invoice Picking Directly', 'version': '1.0', 'category' : 'Warehouse Management', 'description': """ Invoice Wizard for Delivery. ============================ When you send or deliver goods, this module automatically launch the invoicing wizard if the delivery is to be invoiced. """, 'author': 'OpenERP SA', 'website': 'http://www.openerp.com', 'images': ['images/create_invoice.jpeg'], 'depends': ['delivery', 'stock'], 'data': [], 'demo': [], 'test': ['test/stock_invoice_directly.yml'], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
geekboxzone/lollipop_external_chromium_org
tools/telemetry/telemetry/core/local_server.py
26
7004
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import json import os import re import subprocess import sys from telemetry.core import forwarders from telemetry.core import util NamedPort = collections.namedtuple('NamedPort', ['name', 'port']) class LocalServerBackend(object): def __init__(self): pass def StartAndGetNamedPorts(self, args): """Starts the actual server and obtains any sockets on which it should listen. Returns a list of NamedPort on which this backend is listening. """ raise NotImplementedError() def ServeForever(self): raise NotImplementedError() class LocalServer(object): def __init__(self, server_backend_class): assert LocalServerBackend in server_backend_class.__bases__ server_module_name = server_backend_class.__module__ assert server_module_name in sys.modules, \ 'The server class\' module must be findable via sys.modules' assert getattr(sys.modules[server_module_name], server_backend_class.__name__), \ 'The server class must getattrable from its __module__ by its __name__' self._server_backend_class = server_backend_class self._subprocess = None self._devnull = None self._local_server_controller = None self.forwarder = None self.host_ip = None def Start(self, local_server_controller): assert self._subprocess == None self._local_server_controller = local_server_controller self.host_ip = local_server_controller.host_ip server_args = self.GetBackendStartupArgs() server_args_as_json = json.dumps(server_args) server_module_name = self._server_backend_class.__module__ self._devnull = open(os.devnull, 'w') cmd = [ sys.executable, '-m', __name__, 'run_backend', server_module_name, self._server_backend_class.__name__, server_args_as_json, ] env = os.environ.copy() env['PYTHONPATH'] = os.pathsep.join(sys.path) self._subprocess = subprocess.Popen( cmd, cwd=util.GetTelemetryDir(), env=env, stdout=subprocess.PIPE) named_ports = self._GetNamedPortsFromBackend() named_port_pair_map = {'http': None, 'https': None, 'dns': None} for name, port in named_ports: assert name in named_port_pair_map, '%s forwarding is unsupported' % name named_port_pair_map[name] = ( forwarders.PortPair(port, local_server_controller.GetRemotePort(port))) self.forwarder = local_server_controller.CreateForwarder( forwarders.PortPairs(**named_port_pair_map)) def _GetNamedPortsFromBackend(self): named_ports_json = None named_ports_re = re.compile('LocalServerBackend started: (?P<port>.+)') # TODO: This will hang if the subprocess doesn't print the correct output. while self._subprocess.poll() == None: m = named_ports_re.match(self._subprocess.stdout.readline()) if m: named_ports_json = m.group('port') break if not named_ports_json: raise Exception('Server process died prematurely ' + 'without giving us port pairs.') return [NamedPort(**pair) for pair in json.loads(named_ports_json.lower())] @property def is_running(self): return self._subprocess != None def __enter__(self): return self def __exit__(self, *args): self.Close() def __del__(self): self.Close() def Close(self): if self.forwarder: self.forwarder.Close() self.forwarder = None if self._subprocess: # TODO(tonyg): Should this block until it goes away? self._subprocess.kill() self._subprocess = None if self._devnull: self._devnull.close() self._devnull = None if self._local_server_controller: self._local_server_controller.ServerDidClose(self) self._local_server_controller = None def GetBackendStartupArgs(self): """Returns whatever arguments are required to start up the backend""" raise NotImplementedError() class LocalServerController(): """Manages the list of running servers This class manages the running servers, but also provides an isolation layer to prevent LocalServer subclasses from accessing the browser backend directly. """ def __init__(self, browser_backend): self._browser_backend = browser_backend self._local_servers_by_class = {} self.host_ip = self._browser_backend.forwarder_factory.host_ip def StartServer(self, server): assert not server.is_running, 'Server already started' assert isinstance(server, LocalServer) if server.__class__ in self._local_servers_by_class: raise Exception( 'Canont have two servers of the same class running at once. ' + 'Locate the existing one and use it, or call Close() on it.') server.Start(self) self._local_servers_by_class[server.__class__] = server def GetRunningServer(self, server_class, default_value): return self._local_servers_by_class.get(server_class, default_value) @property def local_servers(self): return self._local_servers_by_class.values() def Close(self): while len(self._local_servers_by_class): server = self._local_servers_by_class.itervalues().next() try: server.Close() except Exception: import traceback traceback.print_exc() def CreateForwarder(self, port_pairs): return self._browser_backend.forwarder_factory.Create(port_pairs) def GetRemotePort(self, port): return self._browser_backend.GetRemotePort(port) def ServerDidClose(self, server): del self._local_servers_by_class[server.__class__] def _LocalServerBackendMain(args): assert len(args) == 4 (cmd, server_module_name, server_backend_class_name, server_args_as_json) = args[:4] assert cmd == 'run_backend' server_module = __import__(server_module_name, fromlist=[True]) server_backend_class = getattr(server_module, server_backend_class_name) server = server_backend_class() server_args = json.loads(server_args_as_json) named_ports = server.StartAndGetNamedPorts(server_args) assert isinstance(named_ports, list) for named_port in named_ports: assert isinstance(named_port, NamedPort) # Note: This message is scraped by the parent process' # _GetNamedPortsFromBackend(). Do **not** change it. print 'LocalServerBackend started: %s' % json.dumps( [pair._asdict() for pair in named_ports]) # pylint: disable=W0212 sys.stdout.flush() return server.ServeForever() if __name__ == '__main__': # This trick is needed because local_server.NamedPort is not the # same as sys.modules['__main__'].NamedPort. The module itself is loaded # twice, basically. from telemetry.core import local_server # pylint: disable=W0406 sys.exit(local_server._LocalServerBackendMain( # pylint: disable=W0212 sys.argv[1:]))
bsd-3-clause
interlegis/sapl
sapl/base/models.py
1
12876
from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.db import models from django.db.models.deletion import CASCADE from django.db.models.signals import post_migrate from django.db.utils import DEFAULT_DB_ALIAS from django.utils.translation import ugettext_lazy as _ import reversion from sapl.utils import (LISTA_DE_UFS, YES_NO_CHOICES, get_settings_auth_user_model, models_with_gr_for_model) DOC_ADM_OSTENSIVO = 'O' DOC_ADM_RESTRITIVO = 'R' TIPO_DOCUMENTO_ADMINISTRATIVO = ((DOC_ADM_OSTENSIVO, _('Ostensiva')), (DOC_ADM_RESTRITIVO, _('Restritiva'))) RELATORIO_ATOS_ACESSADOS = (('S', _('Sim')), ('N', _('Não'))) SEQUENCIA_NUMERACAO_PROTOCOLO = (('A', _('Sequencial por ano')), ('L', _('Sequencial por legislatura')), ('U', _('Sequencial único'))) SEQUENCIA_NUMERACAO_PROPOSICAO = (('A', _('Sequencial por ano para cada autor')), ('B', _('Sequencial por ano indepententemente do autor'))) ESFERA_FEDERACAO_CHOICES = (('M', _('Municipal')), ('E', _('Estadual')), ('F', _('Federal')), ) ASSINATURA_ATA_CHOICES = ( ('M', _('Mesa Diretora da Sessão')), ('P', _('Apenas o Presidente da Sessão')), ('T', _('Todos os Parlamentares Presentes na Sessão')), ) @reversion.register() class CasaLegislativa(models.Model): # TODO ajustar todos os max_length !!!! # cod_casa => id (pk) codigo = models.CharField(max_length=100, blank=True, verbose_name=_('Codigo')) nome = models.CharField(max_length=100, verbose_name=_('Nome')) sigla = models.CharField(max_length=100, verbose_name=_('Sigla')) endereco = models.CharField(max_length=100, verbose_name=_('Endereço')) cep = models.CharField(max_length=100, verbose_name=_('CEP')) municipio = models.CharField(max_length=50, verbose_name=_('Município')) uf = models.CharField(max_length=2, choices=LISTA_DE_UFS, verbose_name=_('UF')) telefone = models.CharField( max_length=100, blank=True, verbose_name=_('Telefone')) fax = models.CharField( max_length=100, blank=True, verbose_name=_('Fax')) logotipo = models.ImageField( blank=True, upload_to='sapl/public/casa/logotipo/', verbose_name=_('Logotipo')) endereco_web = models.URLField( max_length=100, blank=True, verbose_name=_('HomePage')) email = models.EmailField( max_length=100, blank=True, verbose_name=_('E-mail')) informacao_geral = models.TextField( max_length=100, blank=True, verbose_name=_('Informação Geral')) class Meta: verbose_name = _('Casa Legislativa') verbose_name_plural = _('Casa Legislativa') ordering = ('id',) def __str__(self): return _('Casa Legislativa de %(municipio)s') % { 'municipio': self.municipio} @reversion.register() class AppConfig(models.Model): POLITICA_PROTOCOLO_CHOICES = ( ('O', _('Sempre Gerar Protocolo')), ('C', _('Perguntar se é pra gerar protocolo ao incorporar')), ('N', _('Nunca Protocolar ao incorporar uma proposição')), ) documentos_administrativos = models.CharField( max_length=1, verbose_name=_('Visibilidade dos Documentos Administrativos'), choices=TIPO_DOCUMENTO_ADMINISTRATIVO, default='O') estatisticas_acesso_normas = models.CharField( max_length=1, verbose_name=_('Estatísticas de acesso a normas'), choices=RELATORIO_ATOS_ACESSADOS, default='N') sequencia_numeracao_proposicao = models.CharField( max_length=1, verbose_name=_('Sequência de numeração de proposições'), choices=SEQUENCIA_NUMERACAO_PROPOSICAO, default='A') sequencia_numeracao_protocolo = models.CharField( max_length=1, verbose_name=_('Sequência de numeração de protocolos'), choices=SEQUENCIA_NUMERACAO_PROTOCOLO, default='A') inicio_numeracao_protocolo = models.PositiveIntegerField( verbose_name=_('Início da numeração de protocolo'), default=1 ) esfera_federacao = models.CharField( max_length=1, blank=True, default="", verbose_name=_('Esfera Federação'), choices=ESFERA_FEDERACAO_CHOICES) # TODO: a ser implementado na versão 3.2 # painel_aberto = models.BooleanField( # verbose_name=_('Painel aberto para usuário anônimo'), # choices=YES_NO_CHOICES, default=False) texto_articulado_proposicao = models.BooleanField( verbose_name=_('Usar Textos Articulados para Proposições'), choices=YES_NO_CHOICES, default=False) texto_articulado_materia = models.BooleanField( verbose_name=_('Usar Textos Articulados para Matérias'), choices=YES_NO_CHOICES, default=False) texto_articulado_norma = models.BooleanField( verbose_name=_('Usar Textos Articulados para Normas'), choices=YES_NO_CHOICES, default=True) proposicao_incorporacao_obrigatoria = models.CharField( verbose_name=_('Regra de incorporação de proposições e protocolo'), max_length=1, choices=POLITICA_PROTOCOLO_CHOICES, default='O') assinatura_ata = models.CharField( verbose_name=_('Quem deve assinar a ata'), max_length=1, choices=ASSINATURA_ATA_CHOICES, default='T') cronometro_discurso = models.DurationField( verbose_name=_('Cronômetro do Discurso'), blank=True, null=True) cronometro_aparte = models.DurationField( verbose_name=_('Cronômetro do Aparte'), blank=True, null=True) cronometro_ordem = models.DurationField( verbose_name=_('Cronômetro da Ordem'), blank=True, null=True) cronometro_consideracoes = models.DurationField( verbose_name=_('Cronômetro de Considerações Finais'), blank=True, null=True) mostrar_brasao_painel = models.BooleanField( default=False, verbose_name=_('Mostrar brasão da Casa no painel?')) receber_recibo_proposicao = models.BooleanField( verbose_name=_('Protocolar proposição somente com recibo?'), choices=YES_NO_CHOICES, default=True) protocolo_manual = models.BooleanField( verbose_name=_('Informar data e hora de protocolo?'), choices=YES_NO_CHOICES, default=False) escolher_numero_materia_proposicao = models.BooleanField( verbose_name=_( 'Indicar número da matéria a ser gerada na proposição?'), choices=YES_NO_CHOICES, default=False) tramitacao_materia = models.BooleanField( verbose_name=_( 'Tramitar matérias anexadas junto com as matérias principais?'), choices=YES_NO_CHOICES, default=True) tramitacao_documento = models.BooleanField( verbose_name=_( 'Tramitar documentos anexados junto com os documentos principais?'), choices=YES_NO_CHOICES, default=True) google_recaptcha_site_key = models.CharField( verbose_name=_('Chave pública gerada pelo Google Recaptcha'), max_length=256, default='') google_recaptcha_secret_key = models.CharField( verbose_name=_('Chave privada gerada pelo Google Recaptcha'), max_length=256, default='') sapl_as_sapn = models.BooleanField( verbose_name=_( 'Utilizar SAPL como SAPN?'), choices=YES_NO_CHOICES, default=False) class Meta: verbose_name = _('Configurações da Aplicação') verbose_name_plural = _('Configurações da Aplicação') permissions = ( ('menu_sistemas', _('Renderizar Menu Sistemas')), ('view_tabelas_auxiliares', _('Visualizar Tabelas Auxiliares')), ) ordering = ('-id',) @classmethod def attr(cls, attr): config = AppConfig.objects.first() if not config: config = AppConfig() config.save() return getattr(config, attr) def __str__(self): return _('Configurações da Aplicação - %(id)s') % { 'id': self.id} @reversion.register() class TipoAutor(models.Model): descricao = models.CharField( max_length=50, verbose_name=_('Descrição'), help_text=_( 'Obs: Não crie tipos de autores semelhante aos tipos fixos. ') ) content_type = models.OneToOneField( ContentType, null=True, default=None, verbose_name=_('Modelagem no SAPL'), on_delete=models.PROTECT) class Meta: ordering = ['descricao'] verbose_name = _('Tipo de Autor') verbose_name_plural = _('Tipos de Autor') def __str__(self): return self.descricao @reversion.register() class Autor(models.Model): operadores = models.ManyToManyField( get_settings_auth_user_model(), through='OperadorAutor', through_fields=('autor', 'user'), symmetrical=False, related_name='autor_set', verbose_name='Operadores') tipo = models.ForeignKey( TipoAutor, verbose_name=_('Tipo do Autor'), on_delete=models.PROTECT) content_type = models.ForeignKey( ContentType, blank=True, null=True, default=None, on_delete=models.PROTECT) object_id = models.PositiveIntegerField( blank=True, null=True, default=None) autor_related = GenericForeignKey('content_type', 'object_id') nome = models.CharField( max_length=120, blank=True, verbose_name=_('Nome do Autor')) cargo = models.CharField( max_length=50, blank=True) class Meta: verbose_name = _('Autor') verbose_name_plural = _('Autores') unique_together = (('content_type', 'object_id'), ) ordering = ('nome',) def __str__(self): if self.autor_related: return str(self.autor_related) else: if self.nome: if self.cargo: return '{} - {}'.format(self.nome, self.cargo) else: return str(self.nome) return '?' class OperadorAutor(models.Model): user = models.ForeignKey( get_settings_auth_user_model(), verbose_name=_('Operador do Autor'), related_name='operadorautor_set', on_delete=CASCADE) autor = models.ForeignKey( Autor, related_name='operadorautor_set', verbose_name=_('Autor'), on_delete=CASCADE) @property def user_name(self): return '%s - %s' % ( self.autor, self.user) class Meta: verbose_name = _('Operador do Autor') verbose_name_plural = _('Operadores do Autor') unique_together = ( ('user', 'autor', ),) def __str__(self): return self.user_name class AuditLog(models.Model): operation = ('C', 'D', 'U') MAX_DATA_LENGTH = 4096 # 4KB de texto username = models.CharField(max_length=100, verbose_name=_('username'), blank=True, db_index=True) operation = models.CharField(max_length=1, verbose_name=_('operation'), db_index=True) timestamp = models.DateTimeField(verbose_name=_('timestamp'), db_index=True) object = models.CharField(max_length=MAX_DATA_LENGTH, blank=True, verbose_name=_('object')) object_id = models.PositiveIntegerField(verbose_name=_('object_id'), db_index=True) model_name = models.CharField(max_length=100, verbose_name=_('model'), db_index=True) app_name = models.CharField(max_length=100, verbose_name=_('app'), db_index=True) class Meta: verbose_name = _('AuditLog') verbose_name_plural = _('AuditLogs') ordering = ('-id',) def __str__(self): return "[%s] %s %s.%s %s" % (self.timestamp, self.operation, self.app_name, self.model_name, self.username, )
gpl-3.0
thopiekar/Uranium
tests/Settings/TestInstanceContainer.py
1
7410
# Copyright (c) 2016 Ultimaker B.V. # Uranium is released under the terms of the LGPLv3 or higher. import pytest import os import UM.Settings.InstanceContainer # import UM.Settings.SettingDefinition from UM.Resources import Resources Resources.addSearchPath(os.path.dirname(os.path.abspath(__file__))) def test_create(): container = UM.Settings.InstanceContainer.InstanceContainer("test") assert container.getId() == "test" ## Test whether setting a property on an instance correctly updates dependencies. # # This test primarily tests the SettingInstance but requires some functionality # from InstanceContainer that is not easily captured in a Mock object. Therefore # it is included here. def test_instance_setProperty(): instance_container = UM.Settings.InstanceContainer.InstanceContainer("test") definition1 = UM.Settings.SettingDefinition.SettingDefinition("test_0", None) definition1.deserialize({ "label": "Test 0", "type": "float", "description": "A Test Setting", "default_value": 10.0, "minimum_value": "test_1 / 10", }) definition2 = UM.Settings.SettingDefinition.SettingDefinition("test_1", None) definition2.deserialize({ "label": "Test 1", "type": "float", "description": "A Test Setting", "default_value": 50.0, "value": "test_0 * 5", "maximum_value": "test_0 * 10" }) # Manually set up relations between definition1 and definition2 # Normally this would be taken care of by the DefinitionContainer definition1.relations.append(UM.Settings.SettingRelation.SettingRelation(owner = definition1, target = definition2, relation_type = UM.Settings.SettingRelation.RelationType.RequiredByTarget, role = "value")) definition2.relations.append(UM.Settings.SettingRelation.SettingRelation(owner = definition2, target = definition1, relation_type = UM.Settings.SettingRelation.RelationType.RequiresTarget, role = "value")) definition1.relations.append(UM.Settings.SettingRelation.SettingRelation(owner = definition1, target = definition2, relation_type = UM.Settings.SettingRelation.RelationType.RequiredByTarget, role = "maximum_value")) definition2.relations.append(UM.Settings.SettingRelation.SettingRelation(owner = definition2, target = definition1, relation_type = UM.Settings.SettingRelation.RelationType.RequiresTarget, role = "maximum_value")) definition1.relations.append(UM.Settings.SettingRelation.SettingRelation(owner = definition1, target = definition2, relation_type = UM.Settings.SettingRelation.RelationType.RequiresTarget, role = "minimum_value")) definition2.relations.append(UM.Settings.SettingRelation.SettingRelation(owner = definition2, target = definition1, relation_type = UM.Settings.SettingRelation.RelationType.RequiredByTarget, role = "minimum_value")) def1_instance = UM.Settings.SettingInstance.SettingInstance(definition1, instance_container) instance_container.addInstance(def1_instance) def1_instance.setProperty("value", 20.0) assert def1_instance.value == 20.0 with pytest.raises(AttributeError): assert def1_instance.maximum == 50.0 assert definition2.value(instance_container) == 100 assert definition2.maximum_value(instance_container) == 200 test_serialize_data = [ ({"definition": "basic", "name": "Basic"}, "basic.inst.cfg"), ({"definition": "basic", "name": "Metadata", "metadata": {"author": "Ultimaker", "bool": False, "integer": 6 }}, "metadata.inst.cfg"), ({"definition": "multiple_settings", "name": "Setting Values", "values": { "test_setting_0": 20, "test_setting_1": 20, "test_setting_2": 20, "test_setting_3": 20, "test_setting_4": 20 }}, "setting_values.inst.cfg"), ] @pytest.mark.parametrize("container_data,equals_file", test_serialize_data) def test_serialize(container_data, equals_file, loaded_container_registry): instance_container = UM.Settings.InstanceContainer.InstanceContainer("test") definition = loaded_container_registry.findDefinitionContainers(id = container_data["definition"])[0] instance_container.setDefinition(definition) instance_container.setName(container_data["name"]) if "metadata" in container_data: instance_container.setMetaData(container_data["metadata"]) if "values" in container_data: for key, value in container_data["values"].items(): instance_container.setProperty(key, "value", value) result = instance_container.serialize() path = Resources.getPath(Resources.InstanceContainers, equals_file) with open(path) as data: assert data.readline() in result test_serialize_with_ignored_metadata_keys_data = [ ({"definition": "basic", "name": "Basic", "metadata": {"secret": "something", "secret2": "something2"}}, "basic.inst.cfg"), ({"definition": "basic", "name": "Metadata", "metadata": {"author": "Ultimaker", "bool": False, "integer": 6, "secret": "something", "secret2": "something2"}}, "metadata.inst.cfg"), ({"definition": "multiple_settings", "name": "Setting Values", "metadata": {"secret": "something", "secret2": "something2"}, "values": { "test_setting_0": 20, "test_setting_1": 20, "test_setting_2": 20, "test_setting_3": 20, "test_setting_4": 20 }}, "setting_values.inst.cfg"), ] @pytest.mark.parametrize("container_data,equals_file", test_serialize_with_ignored_metadata_keys_data) def test_serialize_with_ignored_metadata_keys(container_data, equals_file, loaded_container_registry): instance_container = UM.Settings.InstanceContainer.InstanceContainer("test") definition = loaded_container_registry.findDefinitionContainers(id = container_data["definition"])[0] instance_container.setDefinition(definition) instance_container.setName(container_data["name"]) if "metadata" in container_data: instance_container.setMetaData(container_data["metadata"]) if "values" in container_data: for key, value in container_data["values"].items(): instance_container.setProperty(key, "value", value) ignored_metadata_keys = ["secret", "secret2"] result = instance_container.serialize(ignored_metadata_keys = ignored_metadata_keys) instance_container.deserialize(result) new_metadata = instance_container.getMetaData() # the ignored keys should not be in the serialised metadata for key in ignored_metadata_keys: assert key not in new_metadata test_deserialize_data = [ ("basic.inst.cfg", {"name": "Basic"}), ("metadata.inst.cfg", {"name": "Metadata", "metaData": { "author": "Ultimaker", "bool": "False", "integer": "6" } }), ("setting_values.inst.cfg", {"name": "Setting Values", "values": { "test_setting_0": 20 } }), ] @pytest.mark.parametrize("filename,expected", test_deserialize_data) def test_deserialize(filename, expected, loaded_container_registry): instance_container = UM.Settings.InstanceContainer.InstanceContainer(filename) path = Resources.getPath(Resources.InstanceContainers, filename) with open(path) as data: instance_container.deserialize(data.read()) for key, value in expected.items(): if key != "values": assert getattr(instance_container, key) == value continue for key, value in value.items(): assert instance_container.getProperty(key, "value") == value
lgpl-3.0
jmartinm/invenio
modules/bibfield/lib/bibfield_reader.py
15
18944
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ BibField Reader """ __revision__ = "$Id$" import datetime import six from invenio.importutils import try_to_eval from invenio.containerutils import SmartDict from invenio.bibfield_config_engine import BibFieldParser as FieldParser class ReaderException(Exception): """Exception raised when some error happens reading a blob""" pass class Reader(object): """ Base class inside the hierarchy that contains several method implementations that will be shared, eventually, by all the *Reader classes. In this particular case this class is expecting that the base format is json, so no conversion is needed. """ """Default reader""" def __init__(self, blob=None, **kwargs): """ :param blob: """ self.blob = blob self.json = None self._additional_info = kwargs # self._additional_info['model'] = kwargs.get('model', '__default__') self._parsed = [] @staticmethod def split_blob(blob, schema=None, **kwargs): """ In case of several records inside the blob this method specify how to split then and work one by one afterwards. """ raise NotImplementedError() @property def field_definitions(self): return FieldParser.field_definitions() @property def functions(self): from invenio.bibfield_utils import CFG_BIBFIELD_FUNCTIONS return CFG_BIBFIELD_FUNCTIONS def translate(self): """ It transforms the incoming blob into a json structure using the rules described into the field and model definitions. To apply this rules it takes into account the type of the reader, which in fact means the type of the source format or `master_format` :return: Json structure (typically a dictionary) """ if not self.blob: raise ReaderException("To perform a 'translate' operation a blob is needed") # If we already have the json return it, use add or update to modify it if self.json: return self.json self.json = {} self.json['__meta_metadata__'] = {} self.json['__meta_metadata__']['__additional_info__'] = self._additional_info self.json['__meta_metadata__']['__aliases__'] = {} self.json['__meta_metadata__']['__errors__'] = [] self.json['__meta_metadata__']['__continuable_errors__'] = [] # if self._additional_info['model'] == '__default__' or \ # self._additional_info['model'] not in self.model_definitions: # self.json['__meta_metadata__']['__continuable_errors__']\ # .append("Warning - Using 'default' model for 'transalte', given model: '%s'" % (self._additional_info['model'], )) # fields = dict(zip(self.field_definitions.keys(), self.field_definitions.keys())) # else: # fields = self.model_definitions[self._additional_info['model']]['fields'] fields = dict(zip(self.field_definitions.keys(), self.field_definitions.keys())) self.add(self.json, self.blob, fields) return self.json._dict def add(self, json, blob, fields): """Adds the list of fields to the json structure""" self.json = json if isinstance(json, SmartDict) else SmartDict(json) self.blob = blob if not self.blob or not self.json: raise ReaderException("To perform an 'add' operation a json structure and a blob are needed") if not isinstance(fields, dict): if isinstance(fields, six.string_types): fields = (fields, ) # try: # model = self.json['__meta_metadata__']['__additional_info__']['model'] # except KeyError as e: # raise ReaderException('The json structure must contain a model (%s)' % (e, )) # if model == '__default__' or model not in self.model_definitions: # self.json['__meta_metadata__']['__continuable_errors__']\ # .append("Warning - Using 'default' model for 'add', given model: '%s'" % (model, )) # fields = dict(zip(fields, fields)) # else: # fields = dict((field, self.model_definitions[model]['fields'].get(field, field)) # for field in fields) fields = dict(zip(fields, fields)) self._prepare_blob() for field_name, json_id in fields.items(): self._unpack_rule(json_id, field_name) self._post_process_json() def set(self, json, field): """ """ self.json = json if isinstance(json, SmartDict) else SmartDict(json) # try: # model = self.json['__meta_metadata__']['__additional_info__']['model'] # except KeyError as e: # raise ReaderException('The json structure must contain a model (%s)' % (e, )) # if model == '__default__' or model not in self.model_definitions: # self.json['__meta_metadata__']['__continuable_errors__']\ # .append("Warning - Using 'default' model for 'add', given model: '%s'" % (model, )) # json_id = field # else: # json_id = self.model_definitions[model]['fields'].get(field, field) json_id = field try: rule = self.field_definitions[json_id] except KeyError: rule = {} self.json['__meta_metadata__']['__continuable_errors__']\ .append("Adding a new field '%s' without definition" % (field)) try: if self.json['__meta_metadata__']['__additional_info__']['master_format'] in rule['rules']: rule_def = rule['rules'][self.json['__meta_metadata__']['__additional_info__']['master_format']][0] rule_type = 'creator' elif 'derived' in rule['rules']: rule_def = rule['rules']['derived'][0] rule_type = 'derived' elif 'calculated' in rule['rules']: rule_def = rule['rules']['calculated'][0] rule_type = 'calculated' else: rule_def = {} rule_type = 'UNKNOWN' except KeyError: rule_def = {} rule_type = 'UNKNOWN' self.json['__meta_metadata__'][field] = self._find_meta_metadata(json_id, field, rule_type, rule, rule_def) def update(self, json, blob, fields=None): """ Tries to update the json structure with the fields given. If no fields are given then it will try to update all the fields inside the json structure. """ if not blob or not blob: raise ReaderException("To perform an 'add' operation a json structure and a blob are needed") # try: # model = json['__meta_metadata__']['__additional_info__']['model'] # except KeyError as e: # raise ReaderException('The json structure must contain a model (%s)' % (e, )) if not fields: fields = dict(zip(json.keys(), json.keys())) # if model == '__default__' or model not in self.model_definitions: # json['__meta_metadata__']['__continuable_errors__']\ # .append("Warning - Using 'default' model for 'update', given model: '%s'" % (model, )) # else: # fields = dict(fields, **self.model_definitions[model]['fields']) elif not isinstance(fields, dict): if isinstance(fields, six.string_types): fields = (fields, ) # if model == '__default__' or model not in self.model_definitions: # json['__meta_metadata__']['__continuable_errors__']\ # .append("Warning - Using 'default' model for 'update', given model: '%s'" % (model, )) # fields = dict(zip(fields, fields)) # else: # fields = dict((field, self.model_definitions[model]['fields'].get(field, field)) # for field in fields) fields = dict(zip(fields, fields)) # for key in fields.keys(): # del json['key'] self.add(json, blob, fields) def validate(self, reset=True): """docstring for validate""" pass def _prepare_blob(self, *args, **kwargs): """ Responsible of doing any kind of transformation over the blob before the translation begins """ raise NotImplemented def _get_elements_from_blob(self, regex_key): """ Should handle 'entire_record' and '*' Not an iterator! """ raise NotImplemented def _unpack_rule(self, json_id, field_name=None): """From the field definitions extract the rules an tries to apply them""" try: rule_def = self.field_definitions[json_id] except KeyError as e: self.json['__meta_metadata__']['__continuable_errors__'].append("Error - Unable to find '%s' field definition" % (json_id, )) return False # if not field_name: # model = self.json['__meta_metadata__']['__additional_info__']['model'] # if model == '__default__' or model not in self.model_definitions: # field_name = json_id # else: # field_name = self.model_definitions[model].get(json_id, json_id) field_name = json_id # Undo the workaround for [0] and [n] if isinstance(rule_def, list): return all(map(self._unpack_rule, rule_def)) # Already parsed, avoid doing it again if (json_id, field_name) in self._parsed: return field_name in self.json self._parsed.append((json_id, field_name)) return self._apply_rules(json_id, field_name, rule_def) or \ self._apply_virtual_rules(json_id, field_name, rule_def) def _apply_rules(self, json_id, field_name, rule_def): """Tries to apply a 'creator' rule""" applied = False for rule in rule_def['rules'].get( self.json['__meta_metadata__']['__additional_info__']['master_format'], []): elements = self._get_elements_from_blob(rule['source_tag']) if not elements: self._set_default_value(json_id, field_name) return False if not self._evaluate_decorators(rule): return False if 'entire_record' in rule['source_tag'] or '*' in rule['source_tag']: try: value = try_to_eval(rule['value'], self.functions, value=elements, self=self.json) self._remove_none_values(value) info = self._find_meta_metadata(json_id, field_name, 'creator', rule, rule_def) if 'json_ext' in rule_def: value = rule_def['json_ext']['dumps'](value) self.json.set(field_name, value, extend=True) self.json['__meta_metadata__.%s' % (SmartDict.main_key_pattern.sub('', field_name), )] = info applied = True except Exception as e: self.json['__meta_metadata__']['__errors__']\ .append('Rule Error - Unable to apply rule for field %s - %s' % (field_name, str(e)),) applied = False else: for element in elements: if not isinstance(element, (list, tuple)): element = (element, ) applied = False for e in element: if rule['only_if_master_value'] and \ not all(try_to_eval(rule['only_if_master_value'], self.functions, value=e, self=self.json)): applied = applied or False else: try: value = try_to_eval(rule['value'], self.functions, value=e, self=self.json) self._remove_none_values(value) info = self._find_meta_metadata(json_id, field_name, 'creator', rule, rule_def) if 'json_ext' in rule_def: value = rule_def['json_ext']['dumps'](value) self.json.set(field_name, value, extend=True) self.json['__meta_metadata__.%s' % (SmartDict.main_key_pattern.sub('', field_name), )] = info applied = applied or True except Exception as e: self.json['__meta_metadata__']['__errors__']\ .append('Rule Error - Unable to apply rule for field %s - %s' % (field_name, str(e)),) applied = applied or False if field_name not in self.json or not applied: self._set_default_value(json_id, field_name) return applied def _apply_virtual_rules(self, json_id, field_name, rule_def): """Tries to apply either a 'derived' or 'calculated' rule""" rules = [] rules.append(('calculated', rule_def['rules'].get('calculated', []))) rules.append(('derived', rule_def['rules'].get('derived', []))) for (rule_type, rrules) in rules: for rule in rrules: if not self._evaluate_decorators(rule): return False try: info = self._find_meta_metadata(json_id, field_name, rule_type, rule, rule_def) if rule_type == 'derived' or rule['memoize']: value = try_to_eval(rule['value'], self.functions, self=self.json) if 'json_ext' in rule_def: value = rule_def['json_ext']['dumps'](value) self._remove_none_values(value) else: value = None self.json.set(field_name, value, extend=True) self.json['__meta_metadata__.%s' % (SmartDict.main_key_pattern.sub('', field_name), )] = info except Exception as e: self.json['__meta_metadata__']['__continuable_errors__']\ .append('Virtual Rule CError - Unable to evaluate %s - %s' % (field_name, str(e))) return False if field_name not in self.json: self._set_default_value(json_id, field_name) return True def _evaluate_decorators(self, rule): """Evaluates all 'decorators' related with the current rule""" if rule['parse_first']: map(self._unpack_rule, try_to_eval(rule['parse_first'])) if rule['depends_on']: for key in try_to_eval(rule['depends_on']): if key in self.json: continue main_key = SmartDict.main_key_pattern.sub('', key) if not self._unpack_rule(main_key): return False if rule['only_if'] and not all(try_to_eval(rule['only_if'], self.functions, self=self.json)): return False return True def _find_meta_metadata(self, json_id, field_name, rule_type, rule, rule_def): """Given one rule fills up the parallel dictionary with the needed meta-metadata""" for alias in rule_def.get('aliases', []): self.json['__meta_metadata__.__aliases__.%s' % (alias, )] = field_name info = {} info['timestamp'] = datetime.datetime.now().isoformat() if rule_def.get('persistent_identifier', None) is not None: info['pid'] = rule_def['persistent_identifier'] info['memoize'] = rule.get('memoize', None) info['type'] = rule_type if rule_type in ('calculated', 'derived'): info['function'] = (json_id, 'rules', rule_type, 0, 'value') elif rule_type == 'UNKNOWN': info['function'] = 'UNKNOWN' info['source_tag'] = 'UNKNOWN' else: info['source_tag'] = rule['source_tag'] if 'json_ext' in rule: info['dumps'] = (json_id, 'json_ext', 'dumps') info['loads'] = (json_id, 'json_ext', 'loads') return info def _set_default_value(self, json_id, field_name): """ Finds the default value inside the schema, if any :return: tuple containing if the value is required and the default value. """ schema = self.field_definitions[json_id].get('schema', {}).get(json_id) if schema and 'default' in schema: try: value = schema['default']() try: value = self.field_definitions[json_id]['json_ext']['dumps'](value) except KeyError: pass self.json.set(field_name, value, extend=True) except Exception, e: self.json['__meta_metadata__']['__continuable_errors__']\ .append('Default Value CError - Unable to set default value for %s - %s' % (field_name, str(e))) def _remove_none_values(self, obj): """Handy closure to remove recursively None values from obj""" if isinstance(obj, dict): for key, value in obj.items(): if value is None: del obj[key] else: self._remove_none_values(value) if isinstance(obj, list): for element in obj: if element is None: obj.remove(element) else: self._remove_none_values(element) def _post_process_json(self): """ Responsible of doing any kind of transformation over the json structure after it is created, e.g. pruning the json to delete None values or singletons. """ pass
gpl-2.0
TRSasasusu/blender_envmap_to_unity
blender_envmap_to_unity.py
1
1793
import sys from PIL import Image if __name__ == "__main__": param = sys.argv if len(param) == 1 or len(param) > 3: sys.exit(0) image = Image.open(param[1]) # Size of the one image. size = int(image.size[1] / 2) if len(param) == 3 and param[2] == "-t": new_image = Image.new("RGB", (size * 4, size * 3)) image_0 = image.crop((0, 0, size, size)) new_image.paste(image_0, (0, size)) image_1 = image.crop((size, 0, size * 2 , size)) new_image.paste(image_1, (size * 3, size)) image_2 = image.crop((size * 2, 0, size * 3, size)) new_image.paste(image_2, (size * 2, size)) image_3 = image.crop((0, size, size, size * 2)) new_image.paste(image_3, (size, size * 2)) image_4 = image.crop((size, size, size * 2, size * 2)) new_image.paste(image_4, (size, 0)) image_5 = image.crop((size * 2, size, size * 3, size * 2)) new_image.paste(image_5, (size, size)) new_image.save("cubemap_t_" + image.filename) else: new_image = Image.new("RGB", (size * 6, size)) image_0 = image.crop((0, 0, size, size)) new_image.paste(image_0, (size, 0)) image_1 = image.crop((size, 0, size * 2 , size)) new_image.paste(image_1, (size * 5, 0)) image_2 = image.crop((size * 2, 0, size * 3, size)) new_image.paste(image_2, (0, 0)) image_3 = image.crop((0, size, size, size * 2)) new_image.paste(image_3, (size * 3, 0)) image_4 = image.crop((size, size, size * 2, size * 2)) new_image.paste(image_4, (size * 2, 0)) image_5 = image.crop((size * 2, size, size * 3, size * 2)) new_image.paste(image_5, (size * 4, 0)) new_image.save("cubemap_" + image.filename)
gpl-3.0
YanTangZhai/tf
tensorflow/g3doc/how_tos/adding_an_op/zero_out_3_test.py
9
1853
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for version 3 of the zero_out op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.python.platform import tensorflow as tf from tensorflow.g3doc.how_tos.adding_an_op import gen_zero_out_op_3 class ZeroOut3Test(tf.test.TestCase): def test(self): with self.test_session(): result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1]) self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0]) def testAttr(self): with self.test_session(): result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=3) self.assertAllEqual(result.eval(), [0, 0, 0, 2, 0]) def testNegative(self): with self.test_session(): result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=-1) with self.assertRaisesOpError("Need preserve_index >= 0, got -1"): result.eval() def testLarge(self): with self.test_session(): result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=17) with self.assertRaisesOpError("preserve_index out of range"): result.eval() if __name__ == '__main__': tf.test.main()
apache-2.0
jpn--/larch
larch/util/activitysim/nonmand_tour_freq.py
1
5435
import numpy as np import pandas as pd import re import os import yaml import itertools from typing import Mapping from larch import P, X, DataFrames, Model from larch.util import Dict from pathlib import Path import logging from ...log import logger_name from .general import remove_apostrophes, linear_utility_from_spec, apply_coefficients, cv_to_ca _logger = logging.getLogger(logger_name) def interaction_simulate_data( name="non_mandatory_tour_frequency", edb_directory="output/estimation_data_bundle/{name}/", settings_file="{name}_model_settings.yaml", spec_file="{name}_SPEC.csv", alt_def_file="{name}_alternatives.csv", coefficients_files="{segment_name}/{name}_coefficients_{segment_name}.csv", chooser_data_files="{segment_name}/{name}_choosers_combined.csv", alt_values_files="{segment_name}/{name}_interaction_expression_values.csv", ): edb_directory = edb_directory.format(name=name) def _read_csv(filename, **kwargs): filename = filename.format(name=name) return pd.read_csv(os.path.join(edb_directory, filename), **kwargs) settings_file = settings_file.format(name=name) with open(os.path.join(edb_directory, settings_file), "r") as yf: settings = yaml.load( yf, Loader=yaml.SafeLoader, ) coefficients = {} chooser_data = {} alt_values = {} segment_names = [s['NAME'] for s in settings['SPEC_SEGMENTS']] for segment_name in segment_names: coefficients[segment_name] = _read_csv( coefficients_files.format(name=name, segment_name=segment_name), index_col='coefficient_name', ) chooser_data[segment_name] = _read_csv( chooser_data_files.format(name=name, segment_name=segment_name), ) alt_values[segment_name] = _read_csv( alt_values_files.format(name=name, segment_name=segment_name), ) spec = _read_csv( spec_file, ) spec = remove_apostrophes(spec, ['Label']) # alt_names = list(spec.columns[3:]) # alt_codes = np.arange(1, len(alt_names) + 1) # alt_names_to_codes = dict(zip(alt_names, alt_codes)) # alt_codes_to_names = dict(zip(alt_codes, alt_names)) alt_def = _read_csv(alt_def_file.format(name=name), index_col=0) return Dict( edb_directory=Path(edb_directory), settings=settings, chooser_data=chooser_data, coefficients=coefficients, alt_values=alt_values, spec=spec, alt_def=alt_def, ) def link_same_value_coefficients(segment_names, coefficients, spec): # Assume all coefficients with exactly equal current values are # actually the same estimated coefficient value and should be # treated as such by Larch. Comment out this function where called to relax # this assumption, although be careful about the number of unique # parameters to estimate in these models. relabel_coef = {} for segment_name in segment_names: coef_backwards_map = dict([(j, i) for i, j in coefficients[segment_name]['value'].items()]) relabel_coef[segment_name] = r = coefficients[segment_name]['value'].map(coef_backwards_map) spec[segment_name] = spec[segment_name].map(r) def unavail_parameters(model): return model.pf.index[(model.pf.value < -900) & (model.pf.holdfast != 0)] def unavail_data_cols(model): locks = unavail_parameters(model) return [i.data for i in model.utility_ca if i.param in locks] def unavail(model, x_ca): lock_data = unavail_data_cols(model) if len(lock_data): unav = x_ca[lock_data[0]] > 0 for j in lock_data[1:]: unav |= x_ca[j] > 0 return unav def nonmand_tour_freq_model( edb_directory="output/estimation_data_bundle/{name}/", return_data=False, ): data = interaction_simulate_data( name="non_mandatory_tour_frequency", edb_directory=edb_directory, ) settings = data.settings segment_names = [s['NAME'] for s in settings['SPEC_SEGMENTS']] link_same_value_coefficients(segment_names, data.coefficients, data.spec) spec = data.spec coefficients = data.coefficients chooser_data = data.chooser_data alt_values = data.alt_values alt_def = data.alt_def m = {} for segment_name in segment_names: segment_model = m[segment_name] = Model() # One of the alternatives is coded as 0, so # we need to explicitly initialize the MNL nesting graph # and set to root_id to a value other than zero. segment_model.initialize_graph(alternative_codes=alt_def.index, root_id=9999) ### Utility specifications segment_model.utility_ca = linear_utility_from_spec( spec, x_col='Label', p_col=segment_name, ) apply_coefficients(coefficients[segment_name], segment_model) segment_model.choice_co_code = 'override_choice' ### Attach Data x_co = chooser_data[segment_name].set_index('person_id').rename(columns={'TAZ': 'HOMETAZ'}) x_ca = cv_to_ca( alt_values[segment_name].set_index(['person_id', 'variable']) ) d = DataFrames( co=x_co, ca=x_ca, av=~unavail(segment_model, x_ca), ) m[segment_name].dataservice = d if return_data: return m, data return m
gpl-3.0
polymonster/pmtech
tools/pmbuild_ext/cgu.py
1
23757
# cgu - code gen utilities for parsing c-like languages for use in code generation tools # copyright Alex Dixon 2020: https://github.com/polymonster/cgu/blob/master/license import re import json import sys # make code gen more readable and less fiddly def in_quotes(string): return "\"" + string + "\"" # append to string with newline print() style def src_line(line): line += "\n" return line # like c style unsigned wraparound def us(val): if val < 0: val = sys.maxsize + val return val # remove all single and multi line comments def remove_comments(source): lines = source.split("\n") inside_block = False conditioned = "" for line in lines: if inside_block: ecpos = line.find("*/") if ecpos != -1: inside_block = False line = line[ecpos+2:] else: continue cpos = line.find("//") mcpos = line.find("/*") if cpos != -1: conditioned += line[:cpos] + "\n" elif mcpos != -1: conditioned += line[:mcpos] + "\n" inside_block = True else: conditioned += line + "\n" return conditioned # generates a nice UI friendly name from, snake_case, camelCase or SCAREY_CASE and strip known prefixes def display_name(token, title): prefix = ["m_", "s_", "k_", "g_"] for p in prefix: if token.startswith(p): token = token[len(p):] break spaced = "" for i in range(len(token)): if i > 0: if token[i-1].islower() and token[i].isupper(): spaced += " " spaced += token[i] spaced = spaced.replace("_", " ") if title: spaced = spaced.title() else: spaced = spaced.capitalize() return spaced.strip() # finds the end of a body of text enclosed between 2 symbols ie. [], {}, <> def enclose(open_symbol, close_symbol, source, pos): pos = source.find(open_symbol, pos) stack = [open_symbol] pos += 1 while len(stack) > 0 and pos < len(source): if source[pos] == open_symbol: stack.append(open_symbol) if source[pos] == close_symbol: stack.pop() pos += 1 return pos # parse a string and return the end position in source, taking into account escaped \" quotes def enclose_string(start, source): pos = start+1 while True: pos = source.find("\"", pos) prev = pos - 1 if prev > 0: if source[prev] == "\\": pos = pos + 1 continue return pos+1 # un-terminated string print("ERROR: unterminated string") assert 0 # format source with indents def format_source(source, indent_size): formatted = "" lines = source.splitlines() indent = 0 indents = ["{"] unindnets = ["}"] newline = False for line in lines: if newline and len(line) > 0 and line[0] != "}": formatted += "\n" newline = False cur_indent = indent line = line.strip() attr = line.find("[[") if len(line) < 1 or attr != -1: continue for c in line: if c in indents: indent += 1 elif c in unindnets: indent -= 1 cur_indent = indent newline = True formatted += " " * cur_indent * indent_size formatted += line formatted += "\n" return formatted # returns the name of a type.. ie struct <name>, enum <name> def type_name(type_declaration): pos = type_declaration.find("{") name = type_declaration[:pos].strip().split()[1] return name # tidy source with consistent spaces, remove tabs and comments to make subsequent operations easier def sanitize_source(source): # replace tabs with spaces source = source.replace("\t", " ") # replace all spaces with single space source = re.sub(' +', ' ', source) # remove comments source = remove_comments(source) # remove empty lines and strip whitespace sanitized = "" for line in source.splitlines(): line = line.strip() if len(line) > 0: sanitized += src_line(line) return sanitized # finds token in source code def find_token(token, source): delimiters = [ "(", ")", "{", "}", ".", ",", "+", "-", "=", "*", "/", "&", "|", "~", "\n", "\t", "<", ">", "[", "]", ";", " " ] fp = source.find(token) if fp != -1: left = False right = False # check left if fp > 0: for d in delimiters: if source[fp - 1] == d: left = True break else: left = True # check right ep = fp + len(token) if fp < ep-1: for d in delimiters: if source[ep] == d: right = True break else: right = True if left and right: return fp # try again tt = find_token(token, source[fp + len(token):]) if tt == -1: return -1 return fp+len(token) + tt return -1 # replace all occurrences of token in source code def replace_token(token, replace, source): while True: pos = find_token(token, source) if pos == -1: break else: source = source[:pos] + replace + source[pos + len(token):] pass return source # find all occurences of token, with their location within source def find_all_tokens(token, source): pos = 0 locations = [] while True: token_pos = find_token(token, source[pos:]) if token_pos != -1: token_pos += pos locations.append(token_pos) pos = token_pos + len(token) else: break return locations # find all string literals in source def find_string_literals(source): pos = 0 strings = [] while True: pos = source.find("\"", pos) if pos == -1: break end = enclose_string(pos, source) string = source[pos:end] strings.append(string) pos = end+1 return strings # removes string literals and inserts a place holder, returning the ist of string literals and the conditioned source def placeholder_string_literals(source): strings = find_string_literals(source) index = 0 for s in strings: source = source.replace(s, '"<placeholder_string_literal_{}>"'.format(index)) index += 1 return strings, source # replace placeholder literals with the strings def replace_placeholder_string_literals(strings, source): index = 0 for s in strings: source = source.replace('"<placeholder_string_literal_{}>"'.format(index), s) index += 1 return source # get all enum member names and values def get_enum_members(declaration): start = declaration.find("{")+1 end = enclose("{", "}", declaration, 0)-1 body = declaration[start:end] members = body.split(",") conditioned = [] for member in members: if len(member.strip()) > 0: conditioned.append(member.strip()) enum_value = 0 enum_members = [] for member in conditioned: if member.find("=") != -1: name_value = member.split("=") enum_members.append({ "name": name_value[0], "value": name_value[1] }) else: enum_members.append({ "name": member, "value": enum_value }) enum_value += 1 return enum_members # get all struct member names, types, defaults and other metadata def get_struct_members(declaration): members = [] pos = declaration.find("{")+1 while pos != -1: end_pos = declaration.find(";", pos) if end_pos == -1: break bracket_pos = declaration.find("{", pos) start_pos = pos if bracket_pos < end_pos: end_pos = enclose("{", "}", declaration, start_pos) statement = declaration[start_pos:end_pos] member_type = "variable" if statement.find("(") != -1 and statement.find("=") == -1: member_type = "function" attrubutes = None attr_start = statement.find("[[") if attr_start != -1: attr_end = statement.find("]]") attrubutes = statement[attr_start+2:attr_end] members.append({ "type": member_type, "declaration": statement, "attributes": attrubutes }) pos = end_pos + 1 return members def get_members(type_specifier, declaration): lookup = { "enum": get_enum_members, "struct": get_struct_members } if type_specifier in lookup: return lookup[type_specifier](declaration) return [] # finds the fully qualified scope for a type declaration def get_type_declaration_scope(source, type_pos): scope_identifier = [ "namespace", "struct", "class" ] pos = 0 scopes = [] while True: scope_start, i = find_first(source, scope_identifier, pos) if scope_start != -1: scp = source.find(";", scope_start) pp = source.find("{", scope_start) if us(pp) > scp: if scp == -1: return scopes pos = scp continue scope_end = enclose("{", "}", source, scope_start) if scope_end > type_pos > scope_start: scope_name = type_name(source[scope_start:scope_end]) scopes.append({ "type": i, "name": scope_name }) pos = source.find("{", scope_start) + 1 else: pos = scope_end else: return scopes if pos > type_pos: return scopes return [] # return list of any typedefs for a particular type def find_typedefs(fully_qualified_name, source): pos = 0 typedefs = [] typedef_names = [] while True: start_pos = find_token("typedef", source[pos:]) if start_pos != -1: start_pos += pos end_pos = start_pos + source[start_pos:].find(";") typedef = source[start_pos:end_pos] q = find_token(fully_qualified_name, typedef) if q != -1: typedefs.append(source[start_pos:end_pos]) name = typedef[q+len(fully_qualified_name):end_pos].strip() typedef_names.append(name) pos = end_pos else: break return typedefs, typedef_names def find_type_attributes(source, type_pos): delimiters = [";", "}"] attr = source[:type_pos].rfind("[[") first_d = us(-1) for d in delimiters: first_d = min(us(source[:type_pos].rfind(d)), first_d) if first_d == us(-1): first_d = -1 if attr > first_d: attr_end = source[attr:].find("]]") return source[attr+2:attr+attr_end] return None # finds all type declarations.. ie struct, enum. returning them in dict with name, and code def find_type_declarations(type_specifier, source): results = [] names = [] pos = 0 while True: start_pos = find_token(type_specifier, source[pos:]) if start_pos != -1: start_pos += pos # handle forward decl fp, tok = find_first(source, ["{", ";"], start_pos) forward = False members = [] if tok == ";": declaration = source[start_pos:fp] forward = True end_pos = fp else: end_pos = enclose("{", "}", source, start_pos) declaration = source[start_pos:end_pos] members = get_members(type_specifier, declaration) scope = get_type_declaration_scope(source, start_pos) name = type_name(declaration) qualified_name = "" for s in scope: if s["type"] == "namespace": qualified_name += s["name"] + "::" qualified_name += name typedefs, typedef_names = find_typedefs(qualified_name, source) attributes = find_type_attributes(source, start_pos) results.append({ "type": type_specifier, "name": name, "qualified_name": qualified_name, "declaration": declaration, "members": members, "scope": scope, "typedefs": typedefs, "typedef_names": typedef_names, "attributes": attributes, "forward_declaration": forward }) pos = end_pos+1 else: break for r in results: names.append(r["name"]) names.append(r["qualified_name"]) for name in r["typedef_names"]: names.append(name) return results, names # find include statements def find_include_statements(source): includes = [] for line in source.splitlines(): if line.strip().startswith("#include"): includes.append(line) return includes # finds the next token ignoring white space def next_token(source, start): white_space = [" ", "\n"] pos = start+1 while True: if source[pos] in white_space: pos += 1 else: return source[pos] if pos >= len(source): break return None # find first token def find_first(source, tokens, start): first = sys.maxsize first_token = "" for t in tokens: i = source.find(t, start) if first > i > -1: first = i first_token = t return first, first_token def arg_list(args): args = args.replace("\n", " ") args = re.sub(' +', ' ', args) pos = 0 a = [] while True: # find comma separators cp = args.find(",", pos) ep = args.find("=", pos) if cp == -1: # add final arg aa = args[pos:].strip() if len(aa) > 0: a.append(args[pos:].strip()) break if -1 < ep < cp: # handle function with default end, tok = find_first(args, [",", "{", "("], ep) if tok == ",": a.append(args[pos:end].strip()) pos = end+1 continue elif tok == "(": end = enclose(tok, ")", args, end) else: end = enclose(tok, "}", args, end) a.append(args[pos:end]) end = args.find(",", end) if end == -1: end = len(args) pos = end+1 else: # plain arg a.append(args[pos:cp].strip()) pos = cp+1 return a # break down arg decl (int a, int b, int c = 0) into contextual info def breakdown_function_args(args): args = arg_list(args) args_context = [] for a in args: a = a.strip() if len(a) == "": continue if a == "...": # va list args_context.append({ "type": "...", "name": "va_list", "default": None }) else: # any other args dp = a.find("=") default = None decl = a if dp != -1: # extract default default = a[dp + 1:].strip() decl = a[:dp - 1] name_pos = decl.rfind(" ") args_context.append({ "type": decl[:name_pos], "name": decl[name_pos:], "default": default }) return args_context # parse return type of function and split out any template or inline def parse_return_type(statement): template = None inline = None rt = statement.strip("}") rt = rt.strip() tp = rt.find("template") if tp != -1: etp = enclose("<", ">", rt, tp) template = rt[tp:etp] rt = rt[etp:] ip = rt.find("inline") if ip != -1: ipe = ip+len("inline") inline = rt[:ipe] rt = rt[ipe:] return rt, template, inline # find functions def find_functions(source): # look for parenthesis to identiy function decls functions = [] function_names = [] pos = 0 while True: statement_end, statement_token = find_first(source, [";", "{"], pos) if statement_end == -1: break statement = source[pos:statement_end] pp = statement.find("(") ep = statement.find("=") if pp != -1: next = next_token(statement, pp) if (ep == -1 or pp < ep) and next != "*": # this a function decl, so break down into context body = "" if statement_token == "{": body_end = enclose("{", "}", source, statement_end-1) body = source[statement_end-1:body_end] statement_end = body_end+1 args_end = enclose("(", ")", statement, pp)-1 name_pos = statement[:pp].rfind(" ") name = statement[name_pos+1:pp] name_unscoped = name.rfind(":") qualifier = "" if name_unscoped != -1: qualifier = name[:name_unscoped-1] name = name[name_unscoped+1:] return_type, template, inline = parse_return_type(statement[:name_pos]) args = breakdown_function_args(statement[pp+1:args_end]) scope = get_type_declaration_scope(source, pos) functions.append({ "name": name, "qualifier": qualifier, "return_type": return_type, "args": args, "scope": scope, "body": body, "template": template, "inline": inline }) function_names.append(name) pos = statement_end + 1 if pos > len(source): break return functions, function_names # returns prototype with no names, ie (int, int, float), from a function context def get_funtion_prototype(func): args = "" num_args = len(func["args"]) for a in range(0, num_args): args += func["args"][a]["type"] if a < num_args - 1: args += ", " if num_args == 0: args = "void" return "(" + args + ")" # main function for scope def test(): # read source from file source = open("test.h", "r").read() # sanitize source to make further ops simpler source = sanitize_source(source) print("--------------------------------------------------------------------------------") print("sanitize source ----------------------------------------------------------------") print("--------------------------------------------------------------------------------") print(source) # find all include statements, fromsanitized source to ignore commented out ones includes = find_include_statements(source) print("--------------------------------------------------------------------------------") print("find includes ------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print(includes) # find string literals within source print("--------------------------------------------------------------------------------") print("find strings literals ----------------------------------------------------------") print("--------------------------------------------------------------------------------") strings = find_string_literals(source) print(strings) # remove string literals to avoid conflicts when parsing print("--------------------------------------------------------------------------------") print("placeholder literals -----------------------------------------------------------") print("--------------------------------------------------------------------------------") strings, source = placeholder_string_literals(source) print(format_source(source, 4)) # find single token print("--------------------------------------------------------------------------------") print("find token ---------------------------------------------------------------------") print("--------------------------------------------------------------------------------") token = "SOME_TOKEN" token_pos = find_token(token, source) print("token pos: {}".format(token_pos)) print("token:" + source[token_pos:token_pos+len(token)]) print("--------------------------------------------------------------------------------") print("find all tokens ----------------------------------------------------------------") print("--------------------------------------------------------------------------------") token = "int" token_locations = find_all_tokens(token, source) for loc in token_locations: print("{}: ".format(loc) + source[loc:loc+10] + "...") # find structs print("--------------------------------------------------------------------------------") print("find structs -------------------------------------------------------------------") print("--------------------------------------------------------------------------------") structs, struct_names = find_type_declarations("struct", source) print(struct_names) print(json.dumps(structs, indent=4)) # find enums print("--------------------------------------------------------------------------------") print("find enums ---------------------------------------------------------------------") print("--------------------------------------------------------------------------------") enums, enum_names = find_type_declarations("enum", source) print(enum_names) print(json.dumps(enums, indent=4)) # find free functions print("--------------------------------------------------------------------------------") print("find functions -----------------------------------------------------------------") print("--------------------------------------------------------------------------------") functions, function_names = find_functions(source) print(function_names) print(json.dumps(functions, indent=4)) # replace placeholder literals print("--------------------------------------------------------------------------------") print("replace placeholder literals ---------------------------------------------------") print("--------------------------------------------------------------------------------") source = replace_placeholder_string_literals(strings, source) print(format_source(source, 4)) # display names print("--------------------------------------------------------------------------------") print(" display name ------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print(display_name("m_snake_case_variable", False)) print(display_name("m_camelCaseVariable", False)) print(display_name("SCAREY_CASE_DEFINE", True)) # entry if __name__ == "__main__": test()
mit
highweb-project/highweb-webcl-html5spec
third_party/WebKit/Source/devtools/scripts/compiler-runner/build_compiler_runner_jar.py
67
2199
#!/usr/bin/python import os import shutil import subprocess import sys import tempfile def rel_to_abs(rel_path): return os.path.join(script_path, rel_path) java_bin_path = os.getenv('JAVA_HOME', '') if java_bin_path: java_bin_path = os.path.join(java_bin_path, 'bin') main_class = 'org.chromium.devtools.compiler.Runner' jar_name = 'closure-runner.jar' src_dir = 'src' script_path = os.path.dirname(os.path.abspath(__file__)) closure_jar_relpath = os.path.join('..', 'closure', 'compiler.jar') src_path = rel_to_abs(src_dir) def run_and_communicate(command, error_template): proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) proc.communicate() if proc.returncode: print >> sys.stderr, error_template % proc.returncode sys.exit(proc.returncode) def build_artifacts(): print 'Compiling...' java_files = [] for root, dirs, files in sorted(os.walk(src_path)): for file_name in files: java_files.append(os.path.join(root, file_name)) bin_path = tempfile.mkdtemp() manifest_file = tempfile.NamedTemporaryFile(mode='wt', delete=False) try: manifest_file.write('Class-Path: %s\n' % closure_jar_relpath) manifest_file.close() javac_path = os.path.join(java_bin_path, 'javac') javac_command = '%s -d %s -cp %s %s' % (javac_path, bin_path, rel_to_abs(closure_jar_relpath), ' '.join(java_files)) run_and_communicate(javac_command, 'Error: javac returned %d') print 'Building jar...' artifact_path = rel_to_abs(jar_name) jar_path = os.path.join(java_bin_path, 'jar') jar_command = '%s cvfme %s %s %s -C %s .' % (jar_path, artifact_path, manifest_file.name, main_class, bin_path) run_and_communicate(jar_command, 'Error: jar returned %d') finally: os.remove(manifest_file.name) shutil.rmtree(bin_path, True) def help(): print 'usage: %s' % os.path.basename(__file__) print 'Builds compiler-runner.jar from the %s directory contents' % src_dir def main(): if len(sys.argv) > 1: help() return build_artifacts() print 'Done.' if __name__ == '__main__': main()
bsd-3-clause
bpramod/azure-linux-extensions
OSPatching/patch/SuSEPatching.py
8
3479
#!/usr/bin/python # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import shutil from Utils.WAAgentUtil import waagent from AbstractPatching import AbstractPatching class SuSEPatching(AbstractPatching): def __init__(self, hutil): super(SuSEPatching,self).__init__(hutil) self.patched_pkgs = None self.cache_dir = os.path.join(os.path.dirname(sys.argv[0]), 'packages') if not os.path.isdir(self.cache_dir): os.mkdir(self.cache_dir) self.clean_cmd = 'zypper clean' self.check_cmd = 'zypper -q --gpg-auto-import-keys --non-interactive list-patches' self.check_security_cmd = self.check_cmd + ' --category security' self.download_cmd = 'zypper --non-interactive --pkg-cache-dir ' + self.cache_dir + ' install -d --auto-agree-with-licenses -t patch ' self.patch_cmd = 'zypper --non-interactive --pkg-cache-dir ' + self.cache_dir + ' install --auto-agree-with-licenses -t patch ' self.pkg_query_cmd = 'rpm -qlp' waagent.Run('zypper -q --gpg-auto-import-keys --non-interactive refresh', False) def check(self, category): """ Check valid upgrades, Return the package list to upgrade """ if category == self.category_all: check_cmd = self.check_cmd elif category == self.category_required: check_cmd = self.check_security_cmd retcode, output = waagent.RunGetOutput(check_cmd) output_lines = output.split('\n') patch_list = [] name_position = 1 for line in output_lines: properties = [elem.strip() for elem in line.split('|')] if len(properties) > 1: if 'Name' in properties: name_position = properties.index('Name') elif not properties[name_position] in self.to_patch: patch_list.append(properties[name_position]) return retcode, patch_list def download_package(self, package): retcode = waagent.Run(self.download_cmd + package, False) if 0 < retcode and retcode < 100: return 1 else: return 0 def patch_package(self, package): if self.patched_pkgs == None: self.patched_pkgs = list() for root,dirs,files in os.walk(self.cache_dir): for filename in files: if filename.endswith('rpm'): shutil.copy(os.path.join(root, filename), "/tmp/") self.patched_pkgs.append("/tmp/"+filename) retcode = waagent.Run(self.patch_cmd + package, False) if 0 < retcode and retcode < 100: return 1 else: if retcode == 102: self.reboot_required = True return 0 def check_reboot(self): pass def get_pkg_patched(self): return self.patched_pkgs
apache-2.0
ankurankan/scikit-learn
examples/linear_model/plot_sgd_penalties.py
249
1563
""" ============== SGD: Penalties ============== Plot the contours of the three penalties. All of the above are supported by :class:`sklearn.linear_model.stochastic_gradient`. """ from __future__ import division print(__doc__) import numpy as np import matplotlib.pyplot as plt def l1(xs): return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs]) def l2(xs): return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs]) def el(xs, z): return np.array([(2 - 2 * x - 2 * z + 4 * x * z - (4 * z ** 2 - 8 * x * z ** 2 + 8 * x ** 2 * z ** 2 - 16 * x ** 2 * z ** 3 + 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2) - 2 * x * z ** 2) / (2 - 4 * z) for x in xs]) def cross(ext): plt.plot([-ext, ext], [0, 0], "k-") plt.plot([0, 0], [-ext, ext], "k-") xs = np.linspace(0, 1, 100) alpha = 0.501 # 0.5 division throuh zero cross(1.2) plt.plot(xs, l1(xs), "r-", label="L1") plt.plot(xs, -1.0 * l1(xs), "r-") plt.plot(-1 * xs, l1(xs), "r-") plt.plot(-1 * xs, -1.0 * l1(xs), "r-") plt.plot(xs, l2(xs), "b-", label="L2") plt.plot(xs, -1.0 * l2(xs), "b-") plt.plot(-1 * xs, l2(xs), "b-") plt.plot(-1 * xs, -1.0 * l2(xs), "b-") plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net") plt.plot(xs, -1.0 * el(xs, alpha), "y-") plt.plot(-1 * xs, el(xs, alpha), "y-") plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-") plt.xlabel(r"$w_0$") plt.ylabel(r"$w_1$") plt.legend() plt.axis("equal") plt.show()
bsd-3-clause
asankasanjaya/stratos
components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/databridge/thrift/thrift/transport/TTwisted.py
19
6528
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from cStringIO import StringIO from zope.interface import implements, Interface, Attribute from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \ connectionDone from twisted.internet import defer from twisted.protocols import basic from twisted.python import log from twisted.web import server, resource, http import TTransport class TMessageSenderTransport(TTransport.TTransportBase): def __init__(self): self.__wbuf = StringIO() def write(self, buf): self.__wbuf.write(buf) def flush(self): msg = self.__wbuf.getvalue() self.__wbuf = StringIO() self.sendMessage(msg) def sendMessage(self, message): raise NotImplementedError class TCallbackTransport(TMessageSenderTransport): def __init__(self, func): TMessageSenderTransport.__init__(self) self.func = func def sendMessage(self, message): self.func(message) class ThriftClientProtocol(basic.Int32StringReceiver): MAX_LENGTH = 2 ** 31 - 1 def __init__(self, client_class, iprot_factory, oprot_factory=None): self._client_class = client_class self._iprot_factory = iprot_factory if oprot_factory is None: self._oprot_factory = iprot_factory else: self._oprot_factory = oprot_factory self.recv_map = {} self.started = defer.Deferred() def dispatch(self, msg): self.sendString(msg) def connectionMade(self): tmo = TCallbackTransport(self.dispatch) self.client = self._client_class(tmo, self._oprot_factory) self.started.callback(self.client) def connectionLost(self, reason=connectionDone): for k, v in self.client._reqs.iteritems(): tex = TTransport.TTransportException( type=TTransport.TTransportException.END_OF_FILE, message='Connection closed') v.errback(tex) def stringReceived(self, frame): tr = TTransport.TMemoryBuffer(frame) iprot = self._iprot_factory.getProtocol(tr) (fname, mtype, rseqid) = iprot.readMessageBegin() try: method = self.recv_map[fname] except KeyError: method = getattr(self.client, 'recv_' + fname) self.recv_map[fname] = method method(iprot, mtype, rseqid) class ThriftServerProtocol(basic.Int32StringReceiver): MAX_LENGTH = 2 ** 31 - 1 def dispatch(self, msg): self.sendString(msg) def processError(self, error): self.transport.loseConnection() def processOk(self, _, tmo): msg = tmo.getvalue() if len(msg) > 0: self.dispatch(msg) def stringReceived(self, frame): tmi = TTransport.TMemoryBuffer(frame) tmo = TTransport.TMemoryBuffer() iprot = self.factory.iprot_factory.getProtocol(tmi) oprot = self.factory.oprot_factory.getProtocol(tmo) d = self.factory.processor.process(iprot, oprot) d.addCallbacks(self.processOk, self.processError, callbackArgs=(tmo,)) class IThriftServerFactory(Interface): processor = Attribute("Thrift processor") iprot_factory = Attribute("Input protocol factory") oprot_factory = Attribute("Output protocol factory") class IThriftClientFactory(Interface): client_class = Attribute("Thrift client class") iprot_factory = Attribute("Input protocol factory") oprot_factory = Attribute("Output protocol factory") class ThriftServerFactory(ServerFactory): implements(IThriftServerFactory) protocol = ThriftServerProtocol def __init__(self, processor, iprot_factory, oprot_factory=None): self.processor = processor self.iprot_factory = iprot_factory if oprot_factory is None: self.oprot_factory = iprot_factory else: self.oprot_factory = oprot_factory class ThriftClientFactory(ClientFactory): implements(IThriftClientFactory) protocol = ThriftClientProtocol def __init__(self, client_class, iprot_factory, oprot_factory=None): self.client_class = client_class self.iprot_factory = iprot_factory if oprot_factory is None: self.oprot_factory = iprot_factory else: self.oprot_factory = oprot_factory def buildProtocol(self, addr): p = self.protocol(self.client_class, self.iprot_factory, self.oprot_factory) p.factory = self return p class ThriftResource(resource.Resource): allowedMethods = ('POST',) def __init__(self, processor, inputProtocolFactory, outputProtocolFactory=None): resource.Resource.__init__(self) self.inputProtocolFactory = inputProtocolFactory if outputProtocolFactory is None: self.outputProtocolFactory = inputProtocolFactory else: self.outputProtocolFactory = outputProtocolFactory self.processor = processor def getChild(self, path, request): return self def _cbProcess(self, _, request, tmo): msg = tmo.getvalue() request.setResponseCode(http.OK) request.setHeader("content-type", "application/x-thrift") request.write(msg) request.finish() def render_POST(self, request): request.content.seek(0, 0) data = request.content.read() tmi = TTransport.TMemoryBuffer(data) tmo = TTransport.TMemoryBuffer() iprot = self.inputProtocolFactory.getProtocol(tmi) oprot = self.outputProtocolFactory.getProtocol(tmo) d = self.processor.process(iprot, oprot) d.addCallback(self._cbProcess, request, tmo) return server.NOT_DONE_YET
apache-2.0
sonchang/python-agent
cattle/plugins/host-api/host_api.py
4
1095
import os from cattle.plugins.docker import DockerConfig from cattle import Config from cattle.process_manager import background class HostApi(object): def on_startup(self): env = dict(os.environ) env['HOST_API_CATTLE_ACCESS_KEY'] = Config.access_key() env['HOST_API_CATTLE_SECRET_KEY'] = Config.secret_key() url = 'http://{0}:{1}'.format(Config.cadvisor_ip(), Config.cadvisor_port()) background(['host-api', '-cadvisor-url', url, '-logtostderr=true', '-ip', Config.host_api_ip(), '-port', str(Config.host_api_port()), '-auth=true', '-host-uuid', DockerConfig.docker_uuid(), '-public-key', Config.jwt_public_key_file(), '-cattle-url', Config.api_url(), '-cattle-state-dir', Config.container_state_dir()], env=env) def host_api_config(): return os.path.join(os.path.dirname(__file__), 'host-api.conf')
apache-2.0
yugui/grpc
examples/python/multiplex/helloworld_pb2_grpc.py
85
1378
import grpc from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities import helloworld_pb2 as helloworld__pb2 class GreeterStub(object): """The greeting service definition. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.SayHello = channel.unary_unary( '/helloworld.Greeter/SayHello', request_serializer=helloworld__pb2.HelloRequest.SerializeToString, response_deserializer=helloworld__pb2.HelloReply.FromString, ) class GreeterServicer(object): """The greeting service definition. """ def SayHello(self, request, context): """Sends a greeting """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_GreeterServicer_to_server(servicer, server): rpc_method_handlers = { 'SayHello': grpc.unary_unary_rpc_method_handler( servicer.SayHello, request_deserializer=helloworld__pb2.HelloRequest.FromString, response_serializer=helloworld__pb2.HelloReply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'helloworld.Greeter', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
bsd-3-clause
zhengwsh/InplusTrader_Linux
InplusTrader/backtestEngine/mod/analyser/mod.py
1
10204
# -*- coding: utf-8 -*- # # Copyright 2017 Ricequant, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pickle from collections import defaultdict import six from enum import Enum import numpy as np import pandas as pd from rqalpha.interface import AbstractMod from rqalpha.events import EVENT from rqalpha.const import ACCOUNT_TYPE, EXIT_CODE from rqalpha.utils.risk import Risk from rqalpha.utils.repr import properties from rqalpha.execution_context import ExecutionContext class AnalyserMod(AbstractMod): def __init__(self): self._env = None self._mod_config = None self._enabled = False self._result = None self._orders = defaultdict(list) self._trades = [] self._total_portfolios = [] self._sub_portfolios = defaultdict(list) self._positions = defaultdict(list) self._benchmark_daily_returns = [] self._portfolio_daily_returns = [] self._latest_portfolio = None self._latest_benchmark_portfolio = None def start_up(self, env, mod_config): self._env = env self._mod_config = mod_config self._enabled = (self._mod_config.record or self._mod_config.plot or self._mod_config.output_file or self._mod_config.plot_save_file or self._mod_config.report_save_path) if self._enabled: env.event_bus.add_listener(EVENT.POST_SETTLEMENT, self._collect_daily) env.event_bus.add_listener(EVENT.TRADE, self._collect_trade) env.event_bus.add_listener(EVENT.ORDER_CREATION_PASS, self._collect_order) def _collect_trade(self, account, trade): self._trades.append(self._to_trade_record(trade)) def _collect_order(self, account, order): self._orders[order.trading_datetime.date()].append(order) def _collect_daily(self): date = self._env.calendar_dt.date() portfolio = self._env.account.get_portfolio(date) self._latest_portfolio = portfolio self._portfolio_daily_returns.append(portfolio.daily_returns) self._total_portfolios.append(self._to_portfolio_record(date, portfolio)) if ACCOUNT_TYPE.BENCHMARK in self._env.accounts: self._latest_benchmark_portfolio = self._env.accounts[ACCOUNT_TYPE.BENCHMARK].portfolio self._benchmark_daily_returns.append(self._latest_benchmark_portfolio.daily_returns) else: self._benchmark_daily_returns.append(0) for account_type, account in six.iteritems(self._env.accounts): portfolio = account.get_portfolio(date) self._sub_portfolios[account_type].append(self._to_portfolio_record2(date, portfolio)) for order_book_id, position in six.iteritems(portfolio.positions): self._positions[account_type].append(self._to_position_record(date, order_book_id, position)) def _symbol(self, order_book_id): return self._env.data_proxy.instruments(order_book_id).symbol @staticmethod def _safe_convert(value, ndigits=3): if isinstance(value, Enum): return value.name if isinstance(value, (float, np.float64, np.float32, np.float16, np.float)): return round(value, ndigits) return value def _to_portfolio_record(self, date, portfolio): data = { k: self._safe_convert(v, 3) for k, v in six.iteritems(properties(portfolio)) if not k.startswith('_') and not k.endswith('_') and k not in { "positions", "start_date", "starting_cash" } } data['date'] = date return data def _to_portfolio_record2(self, date, portfolio): data = { k: self._safe_convert(v, 3) for k, v in six.iteritems(portfolio.__dict__) if not k.startswith('_') and not k.endswith('_') and k not in { "positions", "start_date", "starting_cash" } } data['date'] = date return data def _to_position_record(self, date, order_book_id, position): data = { k: self._safe_convert(v, 3) for k, v in six.iteritems(position.__dict__) if not k.startswith('_') and not k.endswith('_') } data['order_book_id'] = order_book_id data['symbol'] = self._symbol(order_book_id) data['date'] = date return data def _to_trade_record(self, trade): data = { k: self._safe_convert(v) for k, v in six.iteritems(properties(trade)) if not k.startswith('_') and not k.endswith('_') and k != 'order' } data['order_book_id'] = trade.order.order_book_id data['symbol'] = self._symbol(trade.order.order_book_id) data['side'] = self._safe_convert(trade.order.side) data['position_effect'] = self._safe_convert(trade.order.position_effect) data['datetime'] = data['datetime'].strftime("%Y-%m-%d %H:%M:%S") data['trading_datetime'] = data['trading_datetime'].strftime("%Y-%m-%d %H:%M:%S") return data def tear_down(self, code, exception=None): if code != EXIT_CODE.EXIT_SUCCESS or not self._enabled: return strategy_name = os.path.basename(self._env.config.base.strategy_file).split(".")[0] data_proxy = self._env.data_proxy summary = { 'strategy_name': strategy_name, } for k, v in six.iteritems(self._env.config.base.__dict__): if k in ["trading_calendar", "account_list", "timezone", "persist_mode", "resume_mode", "data_bundle_path", "handle_split", "persist"]: continue summary[k] = self._safe_convert(v, 2) risk = Risk(np.array(self._portfolio_daily_returns), np.array(self._benchmark_daily_returns), data_proxy.get_risk_free_rate(self._env.config.base.start_date, self._env.config.base.end_date), (self._env.config.base.end_date - self._env.config.base.start_date).days + 1) summary.update({ 'alpha': self._safe_convert(risk.alpha, 3), 'beta': self._safe_convert(risk.beta, 3), 'sharpe': self._safe_convert(risk.sharpe, 3), 'information_ratio': self._safe_convert(risk.information_ratio, 3), 'downside_risk': self._safe_convert(risk.annual_downside_risk, 3), 'tracking_error': self._safe_convert(risk.annual_tracking_error, 3), 'sortino': self._safe_convert(risk.sortino, 3), 'volatility': self._safe_convert(risk.annual_volatility, 3), 'max_drawdown': self._safe_convert(risk.max_drawdown, 3), }) summary.update({ k: self._safe_convert(v, 3) for k, v in six.iteritems(properties(self._latest_portfolio)) if k not in ["positions", "daily_returns", "daily_pnl"] }) if self._latest_benchmark_portfolio: summary['benchmark_total_returns'] = self._latest_benchmark_portfolio.total_returns summary['benchmark_annualized_returns'] = self._latest_benchmark_portfolio.annualized_returns trades = pd.DataFrame(self._trades) if 'datetime' in trades.columns: trades = trades.set_index('datetime') df = pd.DataFrame(self._total_portfolios) df['date'] = pd.to_datetime(df['date']) total_portfolios = df.set_index('date').sort_index() result_dict = { 'summary': summary, 'trades': trades, 'total_portfolios': total_portfolios, } if ExecutionContext.plots is not None: plots = ExecutionContext.plots.get_plots() plots_items = defaultdict(dict) for series_name, value_dict in six.iteritems(plots): for date, value in six.iteritems(value_dict): plots_items[date][series_name] = value plots_items[date]["date"] = date df = pd.DataFrame([dict_data for date, dict_data in six.iteritems(plots_items)]) df["date"] = pd.to_datetime(df["date"]) df = df.set_index("date").sort_index() result_dict["plots"] = df for account_type, account in six.iteritems(self._env.accounts): account_name = account_type.name.lower() portfolios_list = self._sub_portfolios[account_type] df = pd.DataFrame(portfolios_list) df["date"] = pd.to_datetime(df["date"]) portfolios_df = df.set_index("date").sort_index() result_dict["{}_portfolios".format(account_name)] = portfolios_df positions_list = self._positions[account_type] positions_df = pd.DataFrame(positions_list) if "date" in positions_df.columns: positions_df["date"] = pd.to_datetime(positions_df["date"]) positions_df = positions_df.set_index("date").sort_index() result_dict["{}_positions".format(account_name)] = positions_df self._result = result_dict if self._mod_config.output_file: with open(self._mod_config.output_file, 'wb') as f: pickle.dump(result_dict, f) if self._mod_config.plot: from rqalpha.plot import plot_result plot_result(result_dict) if self._mod_config.plot_save_file: from rqalpha.plot import plot_result plot_result(result_dict, False, self._mod_config.plot_save_file) if self._mod_config.report_save_path: from rqalpha.utils.report import generate_report generate_report(result_dict, self._mod_config.report_save_path)
mit
hrayr-artunyan/shuup
shuup_tests/core/test_order_source_weights.py
2
1682
# This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import decimal import pytest from shuup.core.models import OrderLineType from shuup.testing.factories import create_product, get_default_supplier from .test_order_creator import seed_source @pytest.mark.django_db def test_order_source_total_gross_weight(rf, admin_user): source = seed_source(admin_user) supplier = get_default_supplier() products_data = [ { "sku": "sku1234", "net_weight": decimal.Decimal("1"), "gross_weight": decimal.Decimal("43.34257") }, { "sku": "sku4321", "net_weight": decimal.Decimal("11.342569"), "gross_weight": decimal.Decimal("11.34257") }, { "sku": "sku1111", "net_weight": decimal.Decimal("0.00"), "gross_weight": decimal.Decimal("0.00") } ] for product_data in products_data: product = create_product( sku=product_data.pop("sku"), shop=source.shop, supplier=supplier, default_price=3.33, **product_data ) source.add_line( type=OrderLineType.PRODUCT, product=product, supplier=supplier, quantity=1, base_unit_price=source.create_price(1), ) assert len(source.get_lines()) == len(products_data) assert source.total_gross_weight == sum([data.get("gross_weight") for data in products_data])
agpl-3.0
BeDjango/intef-openedx
lms/djangoapps/instructor/enrollment.py
10
17026
""" Enrollment operations for use by instructor APIs. Does not include any access control, be sure to check access before calling. """ import json import logging from django.contrib.auth.models import User from django.conf import settings from django.core.urlresolvers import reverse from django.core.mail import send_mail from django.utils.translation import override as override_language from course_modes.models import CourseMode from student.models import CourseEnrollment, CourseEnrollmentAllowed from courseware.models import StudentModule from edxmako.shortcuts import render_to_string from lang_pref import LANGUAGE_KEY from submissions import api as sub_api # installed from the edx-submissions repository from student.models import anonymous_id_for_user from openedx.core.djangoapps.user_api.models import UserPreference from microsite_configuration import microsite from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError log = logging.getLogger(__name__) class EmailEnrollmentState(object): """ Store the complete enrollment state of an email in a class """ def __init__(self, course_id, email): exists_user = User.objects.filter(email=email).exists() if exists_user: user = User.objects.get(email=email) mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_id) # is_active is `None` if the user is not enrolled in the course exists_ce = is_active is not None and is_active full_name = user.profile.name else: mode = None exists_ce = False full_name = None ceas = CourseEnrollmentAllowed.objects.filter(course_id=course_id, email=email).all() exists_allowed = ceas.exists() state_auto_enroll = exists_allowed and ceas[0].auto_enroll self.user = exists_user self.enrollment = exists_ce self.allowed = exists_allowed self.auto_enroll = bool(state_auto_enroll) self.full_name = full_name self.mode = mode def __repr__(self): return "{}(user={}, enrollment={}, allowed={}, auto_enroll={})".format( self.__class__.__name__, self.user, self.enrollment, self.allowed, self.auto_enroll, ) def to_dict(self): """ example: { 'user': False, 'enrollment': False, 'allowed': True, 'auto_enroll': True, } """ return { 'user': self.user, 'enrollment': self.enrollment, 'allowed': self.allowed, 'auto_enroll': self.auto_enroll, } def get_user_email_language(user): """ Return the language most appropriate for writing emails to user. Returns None if the preference has not been set, or if the user does not exist. """ # Calling UserPreference directly instead of get_user_preference because the user requesting the # information is not "user" and also may not have is_staff access. return UserPreference.get_value(user, LANGUAGE_KEY) def enroll_email(course_id, student_email, auto_enroll=False, email_students=False, email_params=None, language=None): """ Enroll a student by email. `student_email` is student's emails e.g. "foo@bar.com" `auto_enroll` determines what is put in CourseEnrollmentAllowed.auto_enroll if auto_enroll is set, then when the email registers, they will be enrolled in the course automatically. `email_students` determines if student should be notified of action by email. `email_params` parameters used while parsing email templates (a `dict`). `language` is the language used to render the email. returns two EmailEnrollmentState's representing state before and after the action. """ previous_state = EmailEnrollmentState(course_id, student_email) enrollment_obj = None if previous_state.user: # if the student is currently unenrolled, don't enroll them in their # previous mode # for now, White Labels use 'shoppingcart' which is based on the # "honor" course_mode. Given the change to use "audit" as the default # course_mode in Open edX, we need to be backwards compatible with # how White Labels approach enrollment modes. if CourseMode.is_white_label(course_id): course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG else: course_mode = None if previous_state.enrollment: course_mode = previous_state.mode enrollment_obj = CourseEnrollment.enroll_by_email(student_email, course_id, course_mode) if email_students: email_params['message'] = 'enrolled_enroll' email_params['email_address'] = student_email email_params['full_name'] = previous_state.full_name send_mail_to_student(student_email, email_params, language=language) else: cea, _ = CourseEnrollmentAllowed.objects.get_or_create(course_id=course_id, email=student_email) cea.auto_enroll = auto_enroll cea.save() if email_students: email_params['message'] = 'allowed_enroll' email_params['email_address'] = student_email send_mail_to_student(student_email, email_params, language=language) after_state = EmailEnrollmentState(course_id, student_email) return previous_state, after_state, enrollment_obj def unenroll_email(course_id, student_email, email_students=False, email_params=None, language=None): """ Unenroll a student by email. `student_email` is student's emails e.g. "foo@bar.com" `email_students` determines if student should be notified of action by email. `email_params` parameters used while parsing email templates (a `dict`). `language` is the language used to render the email. returns two EmailEnrollmentState's representing state before and after the action. """ previous_state = EmailEnrollmentState(course_id, student_email) if previous_state.enrollment: CourseEnrollment.unenroll_by_email(student_email, course_id) if email_students: email_params['message'] = 'enrolled_unenroll' email_params['email_address'] = student_email email_params['full_name'] = previous_state.full_name send_mail_to_student(student_email, email_params, language=language) if previous_state.allowed: CourseEnrollmentAllowed.objects.get(course_id=course_id, email=student_email).delete() if email_students: email_params['message'] = 'allowed_unenroll' email_params['email_address'] = student_email # Since no User object exists for this student there is no "full_name" available. send_mail_to_student(student_email, email_params, language=language) after_state = EmailEnrollmentState(course_id, student_email) return previous_state, after_state def send_beta_role_email(action, user, email_params): """ Send an email to a user added or removed as a beta tester. `action` is one of 'add' or 'remove' `user` is the User affected `email_params` parameters used while parsing email templates (a `dict`). """ if action == 'add': email_params['message'] = 'add_beta_tester' email_params['email_address'] = user.email email_params['full_name'] = user.profile.name elif action == 'remove': email_params['message'] = 'remove_beta_tester' email_params['email_address'] = user.email email_params['full_name'] = user.profile.name else: raise ValueError("Unexpected action received '{}' - expected 'add' or 'remove'".format(action)) send_mail_to_student(user.email, email_params, language=get_user_email_language(user)) def reset_student_attempts(course_id, student, module_state_key, delete_module=False): """ Reset student attempts for a problem. Optionally deletes all student state for the specified problem. In the previous instructor dashboard it was possible to modify/delete modules that were not problems. That has been disabled for safety. `student` is a User `problem_to_reset` is the name of a problem e.g. 'L2Node1'. To build the module_state_key 'problem/' and course information will be appended to `problem_to_reset`. Raises: ValueError: `problem_state` is invalid JSON. StudentModule.DoesNotExist: could not load the student module. submissions.SubmissionError: unexpected error occurred while resetting the score in the submissions API. """ try: # A block may have children. Clear state on children first. block = modulestore().get_item(module_state_key) if block.has_children: for child in block.children: try: reset_student_attempts(course_id, student, child, delete_module=delete_module) except StudentModule.DoesNotExist: # If a particular child doesn't have any state, no big deal, as long as the parent does. pass except ItemNotFoundError: log.warning("Could not find %s in modulestore when attempting to reset attempts.", module_state_key) # Reset the student's score in the submissions API # Currently this is used only by open assessment (ORA 2) # We need to do this *before* retrieving the `StudentModule` model, # because it's possible for a score to exist even if no student module exists. if delete_module: sub_api.reset_score( anonymous_id_for_user(student, course_id), course_id.to_deprecated_string(), module_state_key.to_deprecated_string(), ) module_to_reset = StudentModule.objects.get( student_id=student.id, course_id=course_id, module_state_key=module_state_key ) if delete_module: module_to_reset.delete() else: _reset_module_attempts(module_to_reset) def _reset_module_attempts(studentmodule): """ Reset the number of attempts on a studentmodule. Throws ValueError if `problem_state` is invalid JSON. """ # load the state json problem_state = json.loads(studentmodule.state) # old_number_of_attempts = problem_state["attempts"] problem_state["attempts"] = 0 # save studentmodule.state = json.dumps(problem_state) studentmodule.save() def get_email_params(course, auto_enroll, secure=True, course_key=None, display_name=None): """ Generate parameters used when parsing email templates. `auto_enroll` is a flag for auto enrolling non-registered students: (a `boolean`) Returns a dict of parameters """ protocol = 'https' if secure else 'http' course_key = course_key or course.id.to_deprecated_string() display_name = display_name or course.display_name_with_default stripped_site_name = microsite.get_value( 'SITE_NAME', settings.SITE_NAME ) # TODO: Use request.build_absolute_uri rather than '{proto}://{site}{path}'.format # and check with the Services team that this works well with microsites registration_url = u'{proto}://{site}{path}'.format( proto=protocol, site=stripped_site_name, path=reverse('register_user') ) course_url = u'{proto}://{site}{path}'.format( proto=protocol, site=stripped_site_name, path=reverse('course_root', kwargs={'course_id': course_key}) ) # We can't get the url to the course's About page if the marketing site is enabled. course_about_url = None if not settings.FEATURES.get('ENABLE_MKTG_SITE', False): course_about_url = u'{proto}://{site}{path}'.format( proto=protocol, site=stripped_site_name, path=reverse('about_course', kwargs={'course_id': course_key}) ) is_shib_course = uses_shib(course) # Composition of email email_params = { 'site_name': stripped_site_name, 'registration_url': registration_url, 'course': course, 'display_name': display_name, 'auto_enroll': auto_enroll, 'course_url': course_url, 'course_about_url': course_about_url, 'is_shib_course': is_shib_course, } return email_params def send_mail_to_student(student, param_dict, language=None): """ Construct the email using templates and then send it. `student` is the student's email address (a `str`), `param_dict` is a `dict` with keys [ `site_name`: name given to edX instance (a `str`) `registration_url`: url for registration (a `str`) `display_name` : display name of a course (a `str`) `course_id`: id of course (a `str`) `auto_enroll`: user input option (a `str`) `course_url`: url of course (a `str`) `email_address`: email of student (a `str`) `full_name`: student full name (a `str`) `message`: type of email to send and template to use (a `str`) `is_shib_course`: (a `boolean`) ] `language` is the language used to render the email. If None the language of the currently-logged in user (that is, the user sending the email) will be used. Returns a boolean indicating whether the email was sent successfully. """ # add some helpers and microconfig subsitutions if 'display_name' in param_dict: param_dict['course_name'] = param_dict['display_name'] param_dict['site_name'] = microsite.get_value( 'SITE_NAME', param_dict['site_name'] ) subject = None message = None # see if we are running in a microsite and that there is an # activation email template definition available as configuration, if so, then render that message_type = param_dict['message'] email_template_dict = { 'allowed_enroll': ( 'emails/enroll_email_allowedsubject.txt', 'emails/enroll_email_allowedmessage.txt' ), 'enrolled_enroll': ( 'emails/enroll_email_enrolledsubject.txt', 'emails/enroll_email_enrolledmessage.txt' ), 'allowed_unenroll': ( 'emails/unenroll_email_subject.txt', 'emails/unenroll_email_allowedmessage.txt' ), 'enrolled_unenroll': ( 'emails/unenroll_email_subject.txt', 'emails/unenroll_email_enrolledmessage.txt' ), 'add_beta_tester': ( 'emails/add_beta_tester_email_subject.txt', 'emails/add_beta_tester_email_message.txt' ), 'remove_beta_tester': ( 'emails/remove_beta_tester_email_subject.txt', 'emails/remove_beta_tester_email_message.txt' ), 'account_creation_and_enrollment': ( 'emails/enroll_email_enrolledsubject.txt', 'emails/account_creation_and_enroll_emailMessage.txt' ), } subject_template, message_template = email_template_dict.get(message_type, (None, None)) if subject_template is not None and message_template is not None: subject, message = render_message_to_string( subject_template, message_template, param_dict, language=language ) if subject and message: # Remove leading and trailing whitespace from body message = message.strip() # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) from_address = microsite.get_value( 'email_from_address', settings.DEFAULT_FROM_EMAIL ) send_mail(subject, message, from_address, [student], fail_silently=False) def render_message_to_string(subject_template, message_template, param_dict, language=None): """ Render a mail subject and message templates using the parameters from param_dict and the given language. If language is None, the platform default language is used. Returns two strings that correspond to the rendered, translated email subject and message. """ with override_language(language): return get_subject_and_message(subject_template, message_template, param_dict) def get_subject_and_message(subject_template, message_template, param_dict): """ Return the rendered subject and message with the appropriate parameters. """ subject = render_to_string(subject_template, param_dict) message = render_to_string(message_template, param_dict) return subject, message def uses_shib(course): """ Used to return whether course has Shibboleth as the enrollment domain Returns a boolean indicating if Shibboleth authentication is set for this course. """ return course.enrollment_domain and course.enrollment_domain.startswith(settings.SHIBBOLETH_DOMAIN_PREFIX)
agpl-3.0
callakrsos/Gargoyle
gargoyle-jython/script/Test.py
1
2542
import java.lang.String from org.python.core import PyString x = 100 if x > 0: print('wow this is elegant') else: print('Oranization is the key.') print('hello ' + ' ' + 'world') def my_function(a='zz'): print(a) my_function() class my_class: def __init__(self, x, y ): self.x = x self.y = y print( java.lang.String('초기화함수' , 'utf-8')) def mul(self): print(self.x * self.y) def add(self): print(self.x + self.y) ob1 = my_class(7,8) ob1.mul() ob1.add() decimalValue = 10 stringValue = "10" floatValue = 10.0 print ('String of text goes here %d %s %f' % (decimalValue, stringValue, floatValue)) x = 3 y = 0 try: print('로켓의 궤도 : %f' % (x/y)) except Exception as ex: print(ex.__str__() ) print(range(1,5)) items = [1,2,3,4,5, 10] for item in items: print(item) items.append(11) print(items) items = [ y**2 for y in range(1,10)] print(items) pairs = [] A = ['blue', 'yellow', 'red'] B = ['red', 'green', 'blue'] for a in A: for b in B: if a != b: pairs.append((a,b)) print (pairs) pairs = [] pairs = [(a,b) for a in A for b in B if a!=b] print(pairs) a = {x for x in 'abracatabra' if x not in 'abc'} print(a) print("###############################################") alpa = 2 for a in range(1,10): print("%d x %d = %d" % (alpa , a , (alpa*a)) ) print('###############################################') print('abcdefg'.upper()) print("%s %s %s" % (ord('a'), ord('b'), ord('c'))) print("########################################") str = "abcde" newStr = "" for a in range(len(str) - 1, -1 , -1): newStr += str[a] print("print newStr : %s " % (newStr)) books = [ { "제목":"개발자의 코드", "출판연도": "2013.02.28", "출판사":"A", "쪽수":200, "추천유무":False }, { "제목":"클린코드", "출판연도": "2010.03.04", "출판사":"B", "쪽수":584, "추천유무":True }, { "제목":"빅데이터 마케팅", "출판연도": "2014.07.10", "출판사":"A", "쪽수":296, "추천유무":True }, ] print("############################################") many_page = [] my_recommand = [] for book in books: if book["쪽수"] > 250: many_page.append(book) if book["추천유무"]: my_recommand.append(book) print(many_page) print(my_recommand)
gpl-2.0
c960657/dd-agent
tests/checks/mock/test_java_jmx.py
48
4565
# stdlib import os import tempfile import threading import time from types import ListType import unittest # 3p from mock import patch from nose.plugins.attrib import attr import yaml # project from aggregator import MetricsAggregator from dogstatsd import Server from jmxfetch import JMXFetch from tests.checks.common import AgentCheckTest STATSD_PORT = 8129 class DummyReporter(threading.Thread): def __init__(self, metrics_aggregator): threading.Thread.__init__(self) self.finished = threading.Event() self.metrics_aggregator = metrics_aggregator self.interval = 10 self.metrics = None self.finished = False self.start() def run(self): while not self.finished: time.sleep(self.interval) self.flush() def flush(self): metrics = self.metrics_aggregator.flush() if metrics: self.metrics = metrics @attr('local') class JMXInitTest(AgentCheckTest): CHECK_NAME = "java_jmx" @patch("subprocess.Popen") def _get_jmxfetch_subprocess_args(self, yaml_jmx_conf, mock_subprocess_call): # Helper function # Returns the Java JMX subprocess_args called from a YAML configuration tmp_dir = tempfile.mkdtemp() filename = "jmx.yaml" with open(os.path.join(tmp_dir, filename), 'wb') as temp_file: temp_file.write(yaml.dump(yaml_jmx_conf)) jmx = JMXFetch(tmp_dir, {}) jmx.run(reporter="console") return mock_subprocess_call.call_args[0][0] def _get_jmx_conf(self, java_options): return { 'instances': [{ 'host': "localhost", 'port': 7199, 'java_options': java_options }] } def assertJavaRunsWith(self, yaml_conf, include=[], exclude=[]): # pylint doesn't get that the arg is the mock subprocess_args = self._get_jmxfetch_subprocess_args(yaml_conf) # pylint: disable=E1120 for i in include: self.assertIn(i, subprocess_args) for e in exclude: self.assertNotIn(e, subprocess_args) def test_jmx_start(self): # Empty java_options jmx_conf = self._get_jmx_conf("") self.assertJavaRunsWith(jmx_conf, ['-Xms50m', '-Xmx200m']) # Specified initial memory allocation pool for the JVM jmx_conf = self._get_jmx_conf("-Xms10m") self.assertJavaRunsWith(jmx_conf, ['-Xms10m', '-Xmx200m'], ['-Xms50m']) jmx_conf = self._get_jmx_conf("-XX:InitialHeapSize=128m") self.assertJavaRunsWith(jmx_conf, ['-XX:InitialHeapSize=128m', '-Xmx200m'], ['-Xms50m']) # Specified maximum memory allocation pool for the JVM jmx_conf = self._get_jmx_conf("-Xmx500m") self.assertJavaRunsWith(jmx_conf, ['-Xms50m', '-Xmx500m'], ['-Xmx200m']) jmx_conf = self._get_jmx_conf("-XX:MaxHeapSize=500m") self.assertJavaRunsWith(jmx_conf, ['-Xms50m', '-XX:MaxHeapSize=500m'], ['-Xmx200m']) @attr(requires='tomcat') class JMXTestCase(unittest.TestCase): def setUp(self): aggregator = MetricsAggregator("test_host") self.server = Server(aggregator, "localhost", STATSD_PORT) self.reporter = DummyReporter(aggregator) self.t1 = threading.Thread(target=self.server.start) self.t1.start() confd_path = os.path.join(os.environ['VOLATILE_DIR'], 'jmx_yaml') self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT}) self.t2 = threading.Thread(target=self.jmx_daemon.run) self.t2.start() def tearDown(self): self.server.stop() self.reporter.finished = True self.jmx_daemon.terminate() def testCustomJMXMetric(self): count = 0 while self.reporter.metrics is None: time.sleep(1) count += 1 if count > 20: raise Exception("No metrics were received in 20 seconds") metrics = self.reporter.metrics self.assertTrue(isinstance(metrics, ListType)) self.assertTrue(len(metrics) > 0) self.assertEquals(len([t for t in metrics if t['metric'] == "my.metric.buf" and "instance:jmx_instance1" in t['tags']]), 2, metrics) self.assertTrue(len([t for t in metrics if 'type:ThreadPool' in t['tags'] and "instance:jmx_instance1" in t['tags'] and "jmx.catalina" in t['metric']]) > 8, metrics) self.assertTrue(len([t for t in metrics if "jvm." in t['metric'] and "instance:jmx_instance1" in t['tags']]) == 13, metrics)
bsd-3-clause
plankton12345/litecoin
contrib/spendfrom/spendfrom.py
792
10053
#!/usr/bin/env python # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the bitcoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Bitcoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Bitcoin") return os.path.expanduser("~/.bitcoin") def read_bitcoin_config(dbdir): """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a bitcoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 19332 if testnet else 9332 connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get bitcoins from") parser.add_option("--to", dest="to", default=None, help="address to get send bitcoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of bitcoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
mit
darkryder/django
tests/schema/tests.py
3
99065
import datetime import itertools import unittest from copy import copy from django.db import ( DatabaseError, IntegrityError, OperationalError, connection, ) from django.db.models import Model from django.db.models.deletion import CASCADE, PROTECT from django.db.models.fields import ( AutoField, BigIntegerField, BinaryField, BooleanField, CharField, DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField, TextField, TimeField, ) from django.db.models.fields.related import ( ForeignKey, ForeignObject, ManyToManyField, OneToOneField, ) from django.db.models.indexes import Index from django.db.transaction import atomic from django.test import ( TransactionTestCase, mock, skipIfDBFeature, skipUnlessDBFeature, ) from django.utils.timezone import UTC from .fields import ( CustomManyToManyField, InheritedManyToManyField, MediumBlobField, ) from .models import ( Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests that the schema-alteration code works correctly. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Note, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if 'schema' in new_apps.all_models: for model in self.local_models: for many_to_many in model._meta.many_to_many: through = many_to_many.remote_field.through if through and through._meta.auto_created: del new_apps.all_models['schema'][through._meta.model_name] del new_apps.all_models['schema'][model._meta.model_name] def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.table_name_converter with atomic(): connection.disable_constraint_checking() table_names = connection.introspection.table_names() for model in itertools.chain(SchemaTests.models, self.local_models): tbl = converter(model._meta.db_table) if tbl in table_names: with connection.schema_editor() as editor: editor.delete_model(model) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) # SQLite also doesn't error properly if not columns: raise DatabaseError("Table does not exist (empty pragma)") return columns def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_indexes(cursor, table) def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def get_constraints_for_column(self, model, column_name): constraints = self.get_constraints(model._meta.db_table) constraints_for_column = [] for name, details in constraints.items(): if details['columns'] == [column_name]: constraints_for_column.append(name) return sorted(constraints_for_column) def check_added_field_default(self, schema_editor, model, field, field_name, expected_default, cast_function=None): with connection.cursor() as cursor: schema_editor.add_field(model, field) cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table)) database_default = cursor.fetchall()[0][0] if cast_function and not type(database_default) == type(expected_default): database_default = cast_function(database_default) self.assertEqual(database_default, expected_default) def get_constraints_count(self, table, column, fk_to): """ Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the number of foreign keys, unique constraints, and indexes on `table`.`column`. The `fk_to` argument is a 2-tuple specifying the expected foreign key relationship's (table, column). """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, table) counts = {'fks': 0, 'uniques': 0, 'indexes': 0} for c in constraints.values(): if c['columns'] == [column]: if c['foreign_key'] == fk_to: counts['fks'] += 1 if c['unique']: counts['uniques'] += 1 elif c['index']: counts['indexes'] += 1 return counts def assertIndexOrder(self, table, index, order): constraints = self.get_constraints(table) self.assertIn(index, constraints) index_orders = constraints[index]['orders'] self.assertTrue(all([(val == expected) for val, expected in zip(index_orders, order)])) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check that it's there list(Author.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Author) # Check that it's gone with self.assertRaises(DatabaseError): list(Author.objects.all()) @skipUnlessDBFeature('supports_foreign_keys') def test_fk(self): "Tests that creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Check that initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Make sure the new FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tag', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_fk_to_proxy(self): "Tests that creating a FK to a proxy model creates database constraints." class AuthorProxy(Author): class Meta: app_label = 'schema' apps = new_apps proxy = True class AuthorRef(Model): author = ForeignKey(AuthorProxy, on_delete=CASCADE) class Meta: app_label = 'schema' apps = new_apps self.local_models = [AuthorProxy, AuthorRef] # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorRef) constraints = self.get_constraints(AuthorRef._meta.db_table) for details in constraints.values(): if details['columns'] == ['author_id'] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail('No FK constraint for author_id found') @skipUnlessDBFeature('supports_foreign_keys') def test_fk_db_constraint(self): "Tests that the db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Check that initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) # Check that BookWeak doesn't have an FK constraint constraints = self.get_constraints(BookWeak._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.fail("FK constraint for author_id found") # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Make sure no FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Make sure the new FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tag', 'id')) break else: self.fail("No FK constraint for tag_id found") # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) # Make sure no FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Check that initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Make sure no FK constraint is present constraints = self.get_constraints(new_field.remote_field.through._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['age'][0], "IntegerField") self.assertEqual(columns['age'][1][6], True) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['surname'][0], "CharField") self.assertEqual(columns['surname'][1][6], connection.features.interprets_empty_strings_as_nulls) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns['awesome'][0] self.assertEqual( field_type, connection.features.introspected_boolean_field_type(new_field, created_separately=True) ) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns['thing'] self.assertEqual(field_type, 'IntegerField') # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns['bits'][0], ("BinaryField", "TextField")) @unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b'123') new_field.set_attributes_from_name('bits') with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns['bits'][0], "TextField") def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(columns['name'][1][6], True) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05') old_field = Note._meta.get_field('info') new_field = DateField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05 3:16:17.4567') old_field = Note._meta.get_field('info') new_field = DateTimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='3:16:17.4567') old_field = Note._meta.get_field('info') new_field = TimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='aaa') old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='bbb') def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns['height'][1][6]) # Create some test data Author.objects.create(name='Not null author', height=12) Author.objects.create(name='Null author') # Verify null value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertIsNone(Author.objects.get(name='Null author').height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertFalse(columns['height'][1][6]) # Verify default value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertEqual(Author.objects.get(name='Null author').height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field('name') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field('info') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature('supports_combined_alters') def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns['height'][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(AuthorWithDefaultHeight, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns['height'][1][6]) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for name, details in constraints.items(): if details['foreign_key']: self.fail('Found an unexpected FK constraint to %s' % details['columns']) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) constraints = self.get_constraints(LocalBook._meta.db_table) # Ensure FK constraint exists for name, details in constraints.items(): if details['foreign_key'] and details['columns'] == ["author_id"]: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) BookWithO2O.objects.all().delete() # Make sure the FK constraint is present constraints = self.get_constraints(BookWithO2O._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique anymore Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) # Make sure the FK constraint is still present constraints = self.get_constraints(Book._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) Book.objects.all().delete() # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique now BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) # Make sure the FK constraint is present constraints = self.get_constraints(BookWithO2O._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") def test_alter_field_fk_to_o2o(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) old_field = Book._meta.get_field('author') new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index on ForeignKey is replaced with a unique constraint for OneToOneField. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) def test_alter_field_fk_keeps_index(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) old_field = Book._meta.get_field('author') # on_delete changed from CASCADE. new_field = ForeignKey(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The index remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_to_fk(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint on OneToOneField is replaced with an index for ForeignKey. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 0, 'indexes': 1}) def test_alter_field_o2o_keeps_unique(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = 1 if connection.features.supports_foreign_keys else 0 # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) old_field = BookWithO2O._meta.get_field('author') # on_delete changed from CASCADE. new_field = OneToOneField(Author, PROTECT) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field('author').column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint remains. self.assertEqual(counts, {'fks': expected_fks, 'uniques': 1, 'indexes': 0}) def test_alter_db_table_case(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Alter the case of the table old_table_name = Author._meta.db_table with connection.schema_editor() as editor: editor.alter_db_table(Author, old_table_name, old_table_name.upper()) def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name='Foo') Author.objects.create(name='Bar') def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' with connection.schema_editor() as editor: editor.create_model(IntegerPK) # model requires a new PK old_field = IntegerPK._meta.get_field('j') new_field = IntegerField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('j') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) old_field = IntegerPK._meta.get_field('i') new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['display_name'][0], "CharField") self.assertNotIn("name", columns) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns['detail_info'][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual(columns['book_id'][0], "IntegerField") self.assertEqual(columns['tag_id'][0], "IntegerField") def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Make sure the model state is coherent with the table one now that # we've removed the tags field. opts = LocalAuthorWithM2M._meta opts.local_many_to_many.remove(new_field) del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name] opts._expire_cache() def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field, strict=True) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest constraints = self.get_constraints( LocalBookWithM2M._meta.get_field("tags").remote_field.through._meta.db_table ) if connection.features.supports_foreign_keys: for name, details in constraints.items(): if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id')) break else: self.fail("No FK constraint for tagm2mtest_id found") # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True) # Ensure old M2M is gone with self.assertRaises(DatabaseError): self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) # This model looks like the new model and is used for teardown. opts = LocalBookWithM2M._meta opts.local_many_to_many.remove(old_field) # Ensure the new M2M exists and points to UniqueTest constraints = self.get_constraints(new_field.remote_field.through._meta.db_table) if connection.features.supports_foreign_keys: for name, details in constraints.items(): if details['columns'] == ["uniquetest_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id')) break else: self.fail("No FK constraint for uniquetest_id found") def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @skipUnlessDBFeature('supports_column_check_constraints') def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: break else: self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: break else: self.fail("No check constraint for height found") def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") with self.assertRaises(IntegrityError): TagUniqueRename.objects.create(title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, []) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [['author', 'title']], []) def test_create_index_together(self): """ Tests creating models with index_together already defined """ # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) # Ensure there is an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c['columns'] == ["slug", "title"] ), ) def test_db_table(self): """ Tests renaming of the table """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table with connection.schema_editor() as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") # Ensure the table is there afterwards Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table again with connection.schema_editor() as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") def test_add_remove_index(self): """ Tests index addition and removal """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there and has no index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) # Add the index index = Index(fields=['name'], name='author_title_idx') with connection.schema_editor() as editor: editor.add_index(Author, index) self.assertIn('name', self.get_indexes(Author._meta.db_table)) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn('name', self.get_indexes(Author._meta.db_table)) def test_order_index(self): """ Indexes defined with ordering (ASC/DESC) defined on column """ with connection.schema_editor() as editor: editor.create_model(Author) # The table doesn't have an index self.assertNotIn('title', self.get_indexes(Author._meta.db_table)) index_name = 'author_name_idx' # Add the index index = Index(fields=['name', '-weight'], name=index_name) with connection.schema_editor() as editor: editor.add_index(Author, index) if connection.features.supports_index_column_ordering: if connection.features.uppercases_column_names: index_name = index_name.upper() self.assertIndexOrder(Author._meta.db_table, index_name, ['ASC', 'DESC']) def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_indexes(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_indexes(Book._meta.db_table), ) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertTrue( self.get_indexes(Tag._meta.db_table)['id']['primary_key'], ) # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( 'id', self.get_indexes(Tag._meta.db_table), ) self.assertTrue( self.get_indexes(Tag._meta.db_table)['slug']['primary_key'], ) def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipUnlessDBFeature('supports_foreign_keys') def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id") column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something") new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk") with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id']) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail("Errors when applying initial migration for a model " "with a table named after an SQL reserved word: %s" % e) # Check that it's there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # Check that it's gone with self.assertRaises(DatabaseError): list(Thing.objects.all()) def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, **kwargs): kwargs['db_column'] = "CamelCase" field = kwargs.pop('field_class', IntegerField)(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = "CamelCaseIndex" editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "using": "", "columns": editor.quote_name(column), "extra": "", } ) if connection.features.uppercases_column_names: constraint_name = constraint_name.upper() self.assertIn(constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table)) constraint_name = "CamelCaseUniqConstraint" editor.execute( editor.sql_create_unique % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "columns": editor.quote_name(field.column), } ) if connection.features.uppercases_column_names: constraint_name = constraint_name.upper() self.assertIn(constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table)) if connection.features.supports_foreign_keys: constraint_name = "CamelCaseFKConstraint" editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), "deferrable": connection.ops.deferrable_sql(), } ) if connection.features.uppercases_column_names: constraint_name = constraint_name.upper() self.assertIn(constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True) self.assertNotIn(constraint_name, self.get_constraints(model._meta.db_table)) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '') def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default='surname default') new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], 'surname default') # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') self.assertIsNone(Author.objects.get().height) old_field = Author._meta.get_field('height') # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_add_textfield_unhashable_default(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') # Create a field that has an unhashable default new_field = TextField(default={}) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.add_field(Author, new_field) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_indexed_charfield(self): field = CharField(max_length=255, db_index=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851', 'schema_author_nom_de_plume_7570a851_like'], ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_add_unique_charfield(self): field = CharField(max_length=255, unique=True) field.set_attributes_from_name('nom_de_plume') with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, 'nom_de_plume'), ['schema_author_nom_de_plume_7570a851_like', 'schema_author_nom_de_plume_key'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add db_index=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, db_index=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617', 'schema_author_name_1fbc5617_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add unique=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq'] ) # Remove unique=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_textfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Note) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) # Alter to add db_index=True and create 2 indexes. old_field = Note._meta.get_field('info') new_field = TextField(db_index=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Note, 'info'), ['schema_note_info_4b0ea695', 'schema_note_info_4b0ea695_like'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield_with_db_index(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove unique=True (should drop unique index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_remove_unique_and_db_index_from_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove both unique=True and db_index=True (should drop all indexes) new_field2 = CharField(max_length=100) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual(self.get_constraints_for_column(BookWithoutAuthor, 'title'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_swap_unique_and_db_index_with_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) # Alter to set unique=True and remove db_index=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to set db_index=True and remove unique=True (should restore index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_title_2dfb2dff', 'schema_book_title_2dfb2dff_like'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_db_index_to_charfield_with_unique(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(Tag) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to add db_index=True old_field = Tag._meta.get_field('slug') new_field = SlugField(db_index=True, unique=True) new_field.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to remove db_index=True new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) def test_alter_field_add_index_to_integerfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) # Alter to add db_index=True and create index. old_field = Author._meta.get_field('weight') new_field = IntegerField(null=True, db_index=True) new_field.set_attributes_from_name('weight') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) expected = 'schema_author_weight_587740f9' if connection.features.uppercases_column_names: expected = expected.upper() self.assertEqual(self.get_constraints_for_column(Author, 'weight'), [expected]) # Remove db_index=True to drop index. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'weight'), []) def test_alter_pk_with_self_referential_field(self): """ Changing the primary key field name of a model with a self-referential foreign key (#26384). """ if connection.vendor == 'mysql' and connection.mysql_version < (5, 6, 6): self.skipTest('Skip known bug renaming primary keys on older MySQL versions (#24995).') old_field = Node._meta.get_field('node_id') new_field = AutoField(primary_key=True) new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Node, old_field, new_field, strict=True) @mock.patch('django.db.backends.base.schema.datetime') @mock.patch('django.db.backends.base.schema.timezone') def test_add_datefield_and_datetimefield_use_effective_default(self, mocked_datetime, mocked_tz): """ effective_default() should be used for DateField, DateTimeField, and TimeField if auto_now or auto_add_now is set (#25005). """ now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1) now_tz = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1, tzinfo=UTC()) mocked_datetime.now = mock.MagicMock(return_value=now) mocked_tz.now = mock.MagicMock(return_value=now_tz) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check auto_now/auto_now_add attributes are not defined columns = self.column_classes(Author) self.assertNotIn("dob_auto_now", columns) self.assertNotIn("dob_auto_now_add", columns) self.assertNotIn("dtob_auto_now", columns) self.assertNotIn("dtob_auto_now_add", columns) self.assertNotIn("tob_auto_now", columns) self.assertNotIn("tob_auto_now_add", columns) # Create a row Author.objects.create(name='Anonymous1') # Ensure fields were added with the correct defaults dob_auto_now = DateField(auto_now=True) dob_auto_now.set_attributes_from_name('dob_auto_now') self.check_added_field_default( editor, Author, dob_auto_now, 'dob_auto_now', now.date(), cast_function=lambda x: x.date(), ) dob_auto_now_add = DateField(auto_now_add=True) dob_auto_now_add.set_attributes_from_name('dob_auto_now_add') self.check_added_field_default( editor, Author, dob_auto_now_add, 'dob_auto_now_add', now.date(), cast_function=lambda x: x.date(), ) dtob_auto_now = DateTimeField(auto_now=True) dtob_auto_now.set_attributes_from_name('dtob_auto_now') self.check_added_field_default( editor, Author, dtob_auto_now, 'dtob_auto_now', now, ) dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True) dt_tm_of_birth_auto_now_add.set_attributes_from_name('dtob_auto_now_add') self.check_added_field_default( editor, Author, dt_tm_of_birth_auto_now_add, 'dtob_auto_now_add', now, ) tob_auto_now = TimeField(auto_now=True) tob_auto_now.set_attributes_from_name('tob_auto_now') self.check_added_field_default( editor, Author, tob_auto_now, 'tob_auto_now', now.time(), cast_function=lambda x: x.time(), ) tob_auto_now_add = TimeField(auto_now_add=True) tob_auto_now_add.set_attributes_from_name('tob_auto_now_add') self.check_added_field_default( editor, Author, tob_auto_now_add, 'tob_auto_now_add', now.time(), cast_function=lambda x: x.time(), )
bsd-3-clause
cyberintruder/django-cms
cms/test_utils/project/placeholderapp/views.py
37
3569
from django.http import HttpResponse from django.shortcuts import render from django.template import RequestContext from django.template.base import Template from django.views.generic import DetailView from cms.test_utils.project.placeholderapp.models import ( Example1, MultilingualExample1, CharPksExample) from cms.utils import get_language_from_request def example_view(request): context = {} context['examples'] = Example1.objects.all() return render(request, 'placeholderapp.html', context) def _base_detail(request, instance, template_name='detail.html', item_name="char_1", template_string='',): context = {} context['instance'] = instance context['instance_class'] = instance.__class__() context['item_name'] = item_name if hasattr(request, 'toolbar'): request.toolbar.set_object(instance) if template_string: template = Template(template_string) return HttpResponse(template.render(RequestContext(request=request, dict_=context))) else: return render(request, template_name, context) def list_view_multi(request): context = {} context['examples'] = MultilingualExample1.objects.language( get_language_from_request(request)).all() context['instance_class'] = MultilingualExample1 return render(request, 'list.html', context) def detail_view_multi(request, pk, template_name='detail_multi.html', item_name="char_1", template_string='',): instance = MultilingualExample1.objects.language( get_language_from_request(request)).get(pk=pk) return _base_detail(request, instance, template_name, item_name, template_string) def detail_view_multi_unfiltered(request, pk, template_name='detail_multi.html', item_name="char_1", template_string='',): instance = MultilingualExample1.objects.get(pk=pk) return _base_detail(request, instance, template_name, item_name, template_string) def list_view(request): context = {} context['examples'] = Example1.objects.all() context['instance_class'] = Example1 return render(request, 'list.html', context) def detail_view(request, pk, template_name='detail.html', item_name="char_1", template_string='',): if request.user.is_staff and request.toolbar: instance = Example1.objects.get(pk=pk) else: instance = Example1.objects.get(pk=pk, publish=True) return _base_detail(request, instance, template_name, item_name, template_string) def detail_view_char(request, pk, template_name='detail.html', item_name="char_1", template_string='',): instance = CharPksExample.objects.get(pk=pk) return _base_detail(request, instance, template_name, item_name, template_string) class ClassDetail(DetailView): model = Example1 template_name = "detail.html" template_string = '' def render_to_response(self, context, **response_kwargs): if self.template_string: context = RequestContext(self.request, context) template = Template(self.template_string) return HttpResponse(template.render(context)) else: return super(ClassDetail, self).render_to_response(context, **response_kwargs) def get_context_data(self, **kwargs): context = super(ClassDetail, self).get_context_data(**kwargs) context['instance_class'] = self.model return context
bsd-3-clause
keulraesik/pyelftools
examples/dwarf_location_lists.py
7
4363
#------------------------------------------------------------------------------- # elftools example: dwarf_location_lists.py # # Examine DIE entries which have location list values, and decode these # location lists. # # Eli Bendersky (eliben@gmail.com) # This code is in the public domain #------------------------------------------------------------------------------- from __future__ import print_function import sys # If pyelftools is not installed, the example can also run from the root or # examples/ dir of the source distribution. sys.path[0:0] = ['.', '..'] from elftools.common.py3compat import itervalues from elftools.elf.elffile import ELFFile from elftools.dwarf.descriptions import ( describe_DWARF_expr, set_global_machine_arch) from elftools.dwarf.locationlists import LocationEntry def process_file(filename): print('Processing file:', filename) with open(filename, 'rb') as f: elffile = ELFFile(f) if not elffile.has_dwarf_info(): print(' file has no DWARF info') return # get_dwarf_info returns a DWARFInfo context object, which is the # starting point for all DWARF-based processing in pyelftools. dwarfinfo = elffile.get_dwarf_info() # The location lists are extracted by DWARFInfo from the .debug_loc # section, and returned here as a LocationLists object. location_lists = dwarfinfo.location_lists() # This is required for the descriptions module to correctly decode # register names contained in DWARF expressions. set_global_machine_arch(elffile.get_machine_arch()) for CU in dwarfinfo.iter_CUs(): # DWARFInfo allows to iterate over the compile units contained in # the .debug_info section. CU is a CompileUnit object, with some # computed attributes (such as its offset in the section) and # a header which conforms to the DWARF standard. The access to # header elements is, as usual, via item-lookup. print(' Found a compile unit at offset %s, length %s' % ( CU.cu_offset, CU['unit_length'])) # A CU provides a simple API to iterate over all the DIEs in it. for DIE in CU.iter_DIEs(): # Go over all attributes of the DIE. Each attribute is an # AttributeValue object (from elftools.dwarf.die), which we # can examine. for attr in itervalues(DIE.attributes): if attribute_has_location_list(attr): # This is a location list. Its value is an offset into # the .debug_loc section, so we can use the location # lists object to decode it. loclist = location_lists.get_location_list_at_offset( attr.value) print(' DIE %s. attr %s.\n%s' % ( DIE.tag, attr.name, show_loclist(loclist, dwarfinfo, indent=' '))) def show_loclist(loclist, dwarfinfo, indent): """ Display a location list nicely, decoding the DWARF expressions contained within. """ d = [] for loc_entity in loclist: if isinstance(loc_entity, LocationEntry): d.append('%s <<%s>>' % ( loc_entity, describe_DWARF_expr(loc_entity.loc_expr, dwarfinfo.structs))) else: d.append(str(loc_entity)) return '\n'.join(indent + s for s in d) def attribute_has_location_list(attr): """ Only some attributes can have location list values, if they have the required DW_FORM (loclistptr "class" in DWARF spec v3) """ if (attr.name in ( 'DW_AT_location', 'DW_AT_string_length', 'DW_AT_const_value', 'DW_AT_return_addr', 'DW_AT_data_member_location', 'DW_AT_frame_base', 'DW_AT_segment', 'DW_AT_static_link', 'DW_AT_use_location', 'DW_AT_vtable_elem_location')): if attr.form in ('DW_FORM_data4', 'DW_FORM_data8'): return True return False if __name__ == '__main__': if sys.argv[1] == '--test': for filename in sys.argv[2:]: process_file(filename)
unlicense
JoeWoo/grpc
src/python/grpcio_test/grpc_test/framework/core/_base_interface_test.py
14
3751
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the RPC Framework Core's implementation of the Base interface.""" import logging import random import time import unittest from grpc.framework.core import implementations from grpc.framework.interfaces.base import utilities from grpc_test.framework.common import test_constants from grpc_test.framework.interfaces.base import test_cases from grpc_test.framework.interfaces.base import test_interfaces class _Implementation(test_interfaces.Implementation): def __init__(self): self._invocation_initial_metadata = object() self._service_initial_metadata = object() self._invocation_terminal_metadata = object() self._service_terminal_metadata = object() def instantiate(self, serializations, servicer): invocation = implementations.invocation_end_link() service = implementations.service_end_link( servicer, test_constants.DEFAULT_TIMEOUT, test_constants.MAXIMUM_TIMEOUT) invocation.join_link(service) service.join_link(invocation) return invocation, service, None def destantiate(self, memo): pass def invocation_initial_metadata(self): return self._invocation_initial_metadata def service_initial_metadata(self): return self._service_initial_metadata def invocation_completion(self): return utilities.completion(self._invocation_terminal_metadata, None, None) def service_completion(self): return utilities.completion(self._service_terminal_metadata, None, None) def metadata_transmitted(self, original_metadata, transmitted_metadata): return transmitted_metadata is original_metadata def completion_transmitted(self, original_completion, transmitted_completion): return ( (original_completion.terminal_metadata is transmitted_completion.terminal_metadata) and original_completion.code is transmitted_completion.code and original_completion.message is transmitted_completion.message ) def load_tests(loader, tests, pattern): return unittest.TestSuite( tests=tuple( loader.loadTestsFromTestCase(test_case_class) for test_case_class in test_cases.test_cases(_Implementation()))) if __name__ == '__main__': unittest.main(verbosity=2)
bsd-3-clause
nesdis/djongo
tests/django_tests/tests/v21/tests/template_tests/filter_tests/test_default.py
521
2159
from django.template.defaultfilters import default from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class DefaultTests(SimpleTestCase): """ Literal string arguments to the default filter are always treated as safe strings, regardless of the auto-escaping state. Note: we have to use {"a": ""} here, otherwise the invalid template variable string interferes with the test result. """ @setup({'default01': '{{ a|default:"x<" }}'}) def test_default01(self): output = self.engine.render_to_string('default01', {"a": ""}) self.assertEqual(output, "x<") @setup({'default02': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) def test_default02(self): output = self.engine.render_to_string('default02', {"a": ""}) self.assertEqual(output, "x<") @setup({'default03': '{{ a|default:"x<" }}'}) def test_default03(self): output = self.engine.render_to_string('default03', {"a": mark_safe("x>")}) self.assertEqual(output, "x>") @setup({'default04': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) def test_default04(self): output = self.engine.render_to_string('default04', {"a": mark_safe("x>")}) self.assertEqual(output, "x>") class DefaultIfNoneTests(SimpleTestCase): @setup({'default_if_none01': '{{ a|default:"x<" }}'}) def test_default_if_none01(self): output = self.engine.render_to_string('default_if_none01', {"a": None}) self.assertEqual(output, "x<") @setup({'default_if_none02': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) def test_default_if_none02(self): output = self.engine.render_to_string('default_if_none02', {"a": None}) self.assertEqual(output, "x<") class FunctionTests(SimpleTestCase): def test_value(self): self.assertEqual(default('val', 'default'), 'val') def test_none(self): self.assertEqual(default(None, 'default'), 'default') def test_empty_string(self): self.assertEqual(default('', 'default'), 'default')
agpl-3.0
pburkholder/chef
lib/chef/provider/package/yum-dump.py
63
9728
# # Author:: Matthew Kent (<mkent@magoazul.com>) # Copyright:: Copyright (c) 2009, 2011 Matthew Kent # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # yum-dump.py # Inspired by yumhelper.py by David Lutterkort # # Produce a list of installed, available and re-installable packages using yum # and dump the results to stdout. # # yum-dump invokes yum similarly to the command line interface which makes it # subject to most of the configuration parameters in yum.conf. yum-dump will # also load yum plugins in the same manor as yum - these can affect the output. # # Can be run as non root, but that won't update the cache. # # Intended to support yum 2.x and 3.x import os import sys import time import yum import re import errno from yum import Errors from optparse import OptionParser from distutils import version YUM_PID_FILE='/var/run/yum.pid' YUM_VER = version.StrictVersion(yum.__version__) YUM_MAJOR = YUM_VER.version[0] if YUM_MAJOR > 3 or YUM_MAJOR < 2: print >> sys.stderr, "yum-dump Error: Can't match supported yum version" \ " (%s)" % yum.__version__ sys.exit(1) # Required for Provides output if YUM_MAJOR == 2: import rpm import rpmUtils.miscutils def setup(yb, options): # Only want our output # if YUM_MAJOR == 3: try: if YUM_VER >= version.StrictVersion("3.2.22"): yb.preconf.errorlevel=0 yb.preconf.debuglevel=0 # initialize the config yb.conf else: yb.doConfigSetup(errorlevel=0, debuglevel=0) except yum.Errors.ConfigError, e: # suppresses an ignored exception at exit yb.preconf = None print >> sys.stderr, "yum-dump Config Error: %s" % e return 1 except ValueError, e: yb.preconf = None print >> sys.stderr, "yum-dump Options Error: %s" % e return 1 elif YUM_MAJOR == 2: yb.doConfigSetup() def __log(a,b): pass yb.log = __log yb.errorlog = __log # Give Chef every possible package version, it can decide what to do with them if YUM_MAJOR == 3: yb.conf.showdupesfromrepos = True elif YUM_MAJOR == 2: yb.conf.setConfigOption('showdupesfromrepos', True) # Optionally run only on cached repositories, but non root must use the cache if os.geteuid() != 0: if YUM_MAJOR == 3: yb.conf.cache = True elif YUM_MAJOR == 2: yb.conf.setConfigOption('cache', True) else: if YUM_MAJOR == 3: yb.conf.cache = options.cache elif YUM_MAJOR == 2: yb.conf.setConfigOption('cache', options.cache) # Handle repo toggle via id or glob exactly like yum for opt, repos in options.repo_control: for repo in repos: if opt == '--enablerepo': yb.repos.enableRepo(repo) elif opt == '--disablerepo': yb.repos.disableRepo(repo) return 0 def dump_packages(yb, list, output_provides): packages = {} if YUM_MAJOR == 2: yb.doTsSetup() yb.doRepoSetup() yb.doSackSetup() db = yb.doPackageLists(list) for pkg in db.installed: pkg.type = 'i' packages[str(pkg)] = pkg if YUM_VER >= version.StrictVersion("3.2.21"): for pkg in db.available: pkg.type = 'a' packages[str(pkg)] = pkg # These are both installed and available for pkg in db.reinstall_available: pkg.type = 'r' packages[str(pkg)] = pkg else: # Old style method - no reinstall list for pkg in yb.pkgSack.returnPackages(): if str(pkg) in packages: if packages[str(pkg)].type == "i": packages[str(pkg)].type = 'r' continue pkg.type = 'a' packages[str(pkg)] = pkg unique_packages = packages.values() unique_packages.sort(lambda x, y: cmp(x.name, y.name)) for pkg in unique_packages: if output_provides == "all" or \ (output_provides == "installed" and (pkg.type == "i" or pkg.type == "r")): # yum 2 doesn't have provides_print, implement it ourselves using methods # based on requires gathering in packages.py if YUM_MAJOR == 2: provlist = [] # Installed and available are gathered in different ways if pkg.type == 'i' or pkg.type == 'r': names = pkg.hdr[rpm.RPMTAG_PROVIDENAME] flags = pkg.hdr[rpm.RPMTAG_PROVIDEFLAGS] ver = pkg.hdr[rpm.RPMTAG_PROVIDEVERSION] if names is not None: tmplst = zip(names, flags, ver) for (n, f, v) in tmplst: prov = rpmUtils.miscutils.formatRequire(n, v, f) provlist.append(prov) # This is slow :( elif pkg.type == 'a': for prcoTuple in pkg.returnPrco('provides'): prcostr = pkg.prcoPrintable(prcoTuple) provlist.append(prcostr) provides = provlist else: provides = pkg.provides_print else: provides = "[]" print '%s %s %s %s %s %s %s %s' % ( pkg.name, pkg.epoch, pkg.version, pkg.release, pkg.arch, provides, pkg.type, pkg.repoid ) return 0 def yum_dump(options): lock_obtained = False yb = yum.YumBase() status = setup(yb, options) if status != 0: return status if options.output_options: print "[option installonlypkgs] %s" % " ".join(yb.conf.installonlypkgs) # Non root can't handle locking on rhel/centos 4 if os.geteuid() != 0: return dump_packages(yb, options.package_list, options.output_provides) # Wrap the collection and output of packages in yum's global lock to prevent # any inconsistencies. try: # Spin up to --yum-lock-timeout option countdown = options.yum_lock_timeout while True: try: yb.doLock(YUM_PID_FILE) lock_obtained = True except Errors.LockError, e: time.sleep(1) countdown -= 1 if countdown == 0: print >> sys.stderr, "yum-dump Locking Error! Couldn't obtain an " \ "exclusive yum lock in %d seconds. Giving up." % options.yum_lock_timeout return 200 else: break return dump_packages(yb, options.package_list, options.output_provides) # Ensure we clear the lock and cleanup any resources finally: try: yb.closeRpmDB() if lock_obtained == True: yb.doUnlock(YUM_PID_FILE) except Errors.LockError, e: print >> sys.stderr, "yum-dump Unlock Error: %s" % e return 200 # Preserve order of enable/disable repo args like yum does def gather_repo_opts(option, opt, value, parser): if getattr(parser.values, option.dest, None) is None: setattr(parser.values, option.dest, []) getattr(parser.values, option.dest).append((opt, value.split(','))) def main(): usage = "Usage: %prog [options]\n" + \ "Output a list of installed, available and re-installable packages via yum" parser = OptionParser(usage=usage) parser.add_option("-C", "--cache", action="store_true", dest="cache", default=False, help="run entirely from cache, don't update cache") parser.add_option("-o", "--options", action="store_true", dest="output_options", default=False, help="output select yum options useful to Chef") parser.add_option("-p", "--installed-provides", action="store_const", const="installed", dest="output_provides", default="none", help="output Provides for installed packages, big/wide output") parser.add_option("-P", "--all-provides", action="store_const", const="all", dest="output_provides", default="none", help="output Provides for all package, slow, big/wide output") parser.add_option("-i", "--installed", action="store_const", const="installed", dest="package_list", default="all", help="output only installed packages") parser.add_option("-a", "--available", action="store_const", const="available", dest="package_list", default="all", help="output only available and re-installable packages") parser.add_option("--enablerepo", action="callback", callback=gather_repo_opts, type="string", dest="repo_control", default=[], help="enable disabled repositories by id or glob") parser.add_option("--disablerepo", action="callback", callback=gather_repo_opts, type="string", dest="repo_control", default=[], help="disable repositories by id or glob") parser.add_option("--yum-lock-timeout", action="store", type="int", dest="yum_lock_timeout", default=30, help="Time in seconds to wait for yum process lock") (options, args) = parser.parse_args() try: return yum_dump(options) except yum.Errors.RepoError, e: print >> sys.stderr, "yum-dump Repository Error: %s" % e return 1 except yum.Errors.YumBaseError, e: print >> sys.stderr, "yum-dump General Error: %s" % e return 1 try: status = main() # Suppress a nasty broken pipe error when output is piped to utilities like 'head' except IOError, e: if e.errno == errno.EPIPE: sys.exit(1) else: raise sys.exit(status)
apache-2.0
jagguli/intellij-community
python/testData/inspections/PyRedeclarationInspection/test.py
54
1786
def test_class(): class X: pass class <warning descr="Redeclared 'X' defined above without usage">X</warning>: pass def test_function(): def foo(): pass def <warning descr="Redeclared 'foo' defined above without usage">foo</warning>(): pass # Top-level variable test def TopLevelBoo(): pass <warning descr="Redeclared 'TopLevelBoo' defined above without usage">TopLevelBoo</warning> = 1 <warning descr="Redeclared 'TopLevelBoo' defined above without usage">TopLevelBoo</warning> = 2 class <warning descr="Redeclared 'TopLevelBoo' defined above without usage">TopLevelBoo</warning>: pass def test_decorated_function(decorator): def foo(): pass @decorator def foo(): pass def <warning descr="Redeclared 'foo' defined above without usage">foo</warning>(): pass def test_local_variable(): x = 1 x = 2 def test_conditional(c): def foo(): pass if c: def foo(): pass try: def foo(): pass except: pass def test_while_loop(c): def foo(): pass while c: def <warning descr="Redeclared 'foo' defined above without usage">foo</warning>(): pass class TestForLoopNoRedeclaraion: for foo in [1, 2, 3]: x = 1 class TestForLoopTarget: def foo(): pass for <warning descr="Redeclared 'foo' defined above without usage">foo</warning> in [1, 2, 3]: x = 1 class TestForLoopBody: def foo(): pass for _ in [1, 2, 3]: def <warning descr="Redeclared 'foo' defined above without usage">foo</warning>(): pass # PY-10839 class TestNestedComprehension: x = [[n for _ in []] for n in []] n = 2
apache-2.0
javaes/sailfish-msg-importer
tests/test_SMSImporter.py
1
1478
import unittest from sail_sms import SMSImporter, SMSParser class SMSImporterTest(unittest.TestCase): def setUp(self): parser = SMSParser("assets/samples.xml") self.sms_list = parser.get_all_sms_in_sf_format() self.importer = SMSImporter("assets/test_commhistory.db") def tearDown(self): self.importer.remove_all_groups_and_msgs() def test_import_received_sms(self): self.importer.import_sms(self.sms_list[1]) self.assertEqual(self.importer.get_msg_count(), 1) self.assertEqual(self.importer.get_group_count(), 1) def test_import_sent_sms(self): self.importer.import_sms(self.sms_list[0]) self.assertEqual(self.importer.get_msg_count(), 1) self.assertEqual(self.importer.get_group_count(), 1) def test_import_sms_from_two_different_remote_uids(self): for sms in self.sms_list: self.importer.import_sms(sms) self.assertEqual(self.importer.get_msg_count(), len(self.sms_list)) self.assertEqual(self.importer.get_group_count(), 2) def test_remove_all_groups_and_msgs(self): for sms in self.sms_list: self.importer.import_sms(sms) self.importer.remove_all_groups_and_msgs() self.assertEqual(self.importer.get_msg_count(), 0) self.assertEqual(self.importer.get_group_count(), 0) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
apache-2.0
vgan/soiqbot
tweepy/models.py
56
14021
# Tweepy # Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. from __future__ import absolute_import, print_function from tweepy.utils import parse_datetime, parse_html_value, parse_a_href class ResultSet(list): """A list like object that holds results from a Twitter API query.""" def __init__(self, max_id=None, since_id=None): super(ResultSet, self).__init__() self._max_id = max_id self._since_id = since_id @property def max_id(self): if self._max_id: return self._max_id ids = self.ids() # Max_id is always set to the *smallest* id, minus one, in the set return (min(ids) - 1) if ids else None @property def since_id(self): if self._since_id: return self._since_id ids = self.ids() # Since_id is always set to the *greatest* id in the set return max(ids) if ids else None def ids(self): return [item.id for item in self if hasattr(item, 'id')] class Model(object): def __init__(self, api=None): self._api = api def __getstate__(self): # pickle pickle = dict(self.__dict__) try: del pickle['_api'] # do not pickle the API reference except KeyError: pass return pickle @classmethod def parse(cls, api, json): """Parse a JSON object into a model instance.""" raise NotImplementedError @classmethod def parse_list(cls, api, json_list): """ Parse a list of JSON objects into a result set of model instances. """ results = ResultSet() for obj in json_list: if obj: results.append(cls.parse(api, obj)) return results def __repr__(self): state = ['%s=%s' % (k, repr(v)) for (k, v) in vars(self).items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(state)) class Status(Model): @classmethod def parse(cls, api, json): status = cls(api) setattr(status, '_json', json) for k, v in json.items(): if k == 'user': user_model = getattr(api.parser.model_factory, 'user') if api else User user = user_model.parse(api, v) setattr(status, 'author', user) setattr(status, 'user', user) # DEPRECIATED elif k == 'created_at': setattr(status, k, parse_datetime(v)) elif k == 'source': if '<' in v: setattr(status, k, parse_html_value(v)) setattr(status, 'source_url', parse_a_href(v)) else: setattr(status, k, v) setattr(status, 'source_url', None) elif k == 'retweeted_status': setattr(status, k, Status.parse(api, v)) elif k == 'place': if v is not None: setattr(status, k, Place.parse(api, v)) else: setattr(status, k, None) else: setattr(status, k, v) return status def destroy(self): return self._api.destroy_status(self.id) def retweet(self): return self._api.retweet(self.id) def retweets(self): return self._api.retweets(self.id) def favorite(self): return self._api.create_favorite(self.id) def __eq__(self, other): if isinstance(other, Status): return self.id == other.id return NotImplemented def __ne__(self, other): result = self == other if result is NotImplemented: return result return not result class User(Model): @classmethod def parse(cls, api, json): user = cls(api) setattr(user, '_json', json) for k, v in json.items(): if k == 'created_at': setattr(user, k, parse_datetime(v)) elif k == 'status': setattr(user, k, Status.parse(api, v)) elif k == 'following': # twitter sets this to null if it is false if v is True: setattr(user, k, True) else: setattr(user, k, False) else: setattr(user, k, v) return user @classmethod def parse_list(cls, api, json_list): if isinstance(json_list, list): item_list = json_list else: item_list = json_list['users'] results = ResultSet() for obj in item_list: results.append(cls.parse(api, obj)) return results def timeline(self, **kargs): return self._api.user_timeline(user_id=self.id, **kargs) def friends(self, **kargs): return self._api.friends(user_id=self.id, **kargs) def followers(self, **kargs): return self._api.followers(user_id=self.id, **kargs) def follow(self): self._api.create_friendship(user_id=self.id) self.following = True def unfollow(self): self._api.destroy_friendship(user_id=self.id) self.following = False def lists_memberships(self, *args, **kargs): return self._api.lists_memberships(user=self.screen_name, *args, **kargs) def lists_subscriptions(self, *args, **kargs): return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs) def lists(self, *args, **kargs): return self._api.lists_all(user=self.screen_name, *args, **kargs) def followers_ids(self, *args, **kargs): return self._api.followers_ids(user_id=self.id, *args, **kargs) class DirectMessage(Model): @classmethod def parse(cls, api, json): dm = cls(api) for k, v in json.items(): if k == 'sender' or k == 'recipient': setattr(dm, k, User.parse(api, v)) elif k == 'created_at': setattr(dm, k, parse_datetime(v)) else: setattr(dm, k, v) return dm def destroy(self): return self._api.destroy_direct_message(self.id) class Friendship(Model): @classmethod def parse(cls, api, json): relationship = json['relationship'] # parse source source = cls(api) for k, v in relationship['source'].items(): setattr(source, k, v) # parse target target = cls(api) for k, v in relationship['target'].items(): setattr(target, k, v) return source, target class Category(Model): @classmethod def parse(cls, api, json): category = cls(api) for k, v in json.items(): setattr(category, k, v) return category class SavedSearch(Model): @classmethod def parse(cls, api, json): ss = cls(api) for k, v in json.items(): if k == 'created_at': setattr(ss, k, parse_datetime(v)) else: setattr(ss, k, v) return ss def destroy(self): return self._api.destroy_saved_search(self.id) class SearchResults(ResultSet): @classmethod def parse(cls, api, json): metadata = json['search_metadata'] results = SearchResults() results.refresh_url = metadata.get('refresh_url') results.completed_in = metadata.get('completed_in') results.query = metadata.get('query') results.count = metadata.get('count') results.next_results = metadata.get('next_results') status_model = getattr(api.parser.model_factory, 'status') if api else Status for status in json['statuses']: results.append(status_model.parse(api, status)) return results class List(Model): @classmethod def parse(cls, api, json): lst = List(api) for k, v in json.items(): if k == 'user': setattr(lst, k, User.parse(api, v)) elif k == 'created_at': setattr(lst, k, parse_datetime(v)) else: setattr(lst, k, v) return lst @classmethod def parse_list(cls, api, json_list, result_set=None): results = ResultSet() if isinstance(json_list, dict): json_list = json_list['lists'] for obj in json_list: results.append(cls.parse(api, obj)) return results def update(self, **kargs): return self._api.update_list(self.slug, **kargs) def destroy(self): return self._api.destroy_list(self.slug) def timeline(self, **kargs): return self._api.list_timeline(self.user.screen_name, self.slug, **kargs) def add_member(self, id): return self._api.add_list_member(self.slug, id) def remove_member(self, id): return self._api.remove_list_member(self.slug, id) def members(self, **kargs): return self._api.list_members(self.user.screen_name, self.slug, **kargs) def is_member(self, id): return self._api.is_list_member(self.user.screen_name, self.slug, id) def subscribe(self): return self._api.subscribe_list(self.user.screen_name, self.slug) def unsubscribe(self): return self._api.unsubscribe_list(self.user.screen_name, self.slug) def subscribers(self, **kargs): return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs) def is_subscribed(self, id): return self._api.is_subscribed_list(self.user.screen_name, self.slug, id) class Relation(Model): @classmethod def parse(cls, api, json): result = cls(api) for k, v in json.items(): if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']: setattr(result, k, Status.parse(api, v)) elif k == 'results': setattr(result, k, Relation.parse_list(api, v)) else: setattr(result, k, v) return result class Relationship(Model): @classmethod def parse(cls, api, json): result = cls(api) for k, v in json.items(): if k == 'connections': setattr(result, 'is_following', 'following' in v) setattr(result, 'is_followed_by', 'followed_by' in v) else: setattr(result, k, v) return result class JSONModel(Model): @classmethod def parse(cls, api, json): return json class IDModel(Model): @classmethod def parse(cls, api, json): if isinstance(json, list): return json else: return json['ids'] class BoundingBox(Model): @classmethod def parse(cls, api, json): result = cls(api) if json is not None: for k, v in json.items(): setattr(result, k, v) return result def origin(self): """ Return longitude, latitude of southwest (bottom, left) corner of bounding box, as a tuple. This assumes that bounding box is always a rectangle, which appears to be the case at present. """ return tuple(self.coordinates[0][0]) def corner(self): """ Return longitude, latitude of northeast (top, right) corner of bounding box, as a tuple. This assumes that bounding box is always a rectangle, which appears to be the case at present. """ return tuple(self.coordinates[0][2]) class Place(Model): @classmethod def parse(cls, api, json): place = cls(api) for k, v in json.items(): if k == 'bounding_box': # bounding_box value may be null (None.) # Example: "United States" (id=96683cc9126741d1) if v is not None: t = BoundingBox.parse(api, v) else: t = v setattr(place, k, t) elif k == 'contained_within': # contained_within is a list of Places. setattr(place, k, Place.parse_list(api, v)) else: setattr(place, k, v) return place @classmethod def parse_list(cls, api, json_list): if isinstance(json_list, list): item_list = json_list else: item_list = json_list['result']['places'] results = ResultSet() for obj in item_list: results.append(cls.parse(api, obj)) return results class Media(Model): @classmethod def parse(cls, api, json): media = cls(api) for k, v in json.items(): setattr(media, k, v) return media class ModelFactory(object): """ Used by parsers for creating instances of models. You may subclass this factory to add your own extended models. """ status = Status user = User direct_message = DirectMessage friendship = Friendship saved_search = SavedSearch search_results = SearchResults category = Category list = List relation = Relation relationship = Relationship media = Media json = JSONModel ids = IDModel place = Place bounding_box = BoundingBox
mit
ecsnavarretemit/sarai-interactive-maps-backend
app/views/chirps.py
1
9046
# chirps.py # # Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph> # Licensed under MIT # Version 1.0.0-alpha6 import ee import csv import StringIO from datetime import datetime from flask import Blueprint, jsonify, abort, request, make_response from flask_cors import cross_origin from app import EE_CREDENTIALS, cache, app from app.gzipped import gzipped mod = Blueprint('chirps', __name__, url_prefix='/chirps') def accumulate(image, ee_list): previous = ee.Image(ee.List(ee_list).get(-1)) added = image.add(previous).set('system:time_start', image.get('system:time_start')) return ee.List(ee_list).add(added) def cumulative_mapper(item): timestamp = item[3] / 1000 rainfall_0p = item[4] rainfall = item[5] # round to 2 decimal places if it has value if rainfall is not None: rainfall = round(rainfall, 2) return { 'time': datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d'), 'rainfall_0p': rainfall_0p, 'rainfall': rainfall } def rainfall_mapper(item): timestamp = item[3] / 1000 rainfall = item[4] # round to 2 decimal places if it has value if rainfall is not None: rainfall = round(rainfall, 2) return { 'time': datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d'), 'rainfall': rainfall } def rainfall_clipper(image): ft = "ft:%s" % app.config['PROVINCES_FT']['LOCATION_METADATA_FUSION_TABLE'] province = ee.FeatureCollection(ft) place = request.args.get('place') return image.clip( province.filter(ee.Filter.eq(app.config['PROVINCES_FT']['LOCATION_FUSION_TABLE_NAME_COLUMN'], place)) .geometry() ) def rainfall_cache_key(*args, **kwargs): path = request.path args = str(hash(frozenset(request.args.items()))) return (path + args).encode('utf-8') def query_daily_rainfall_data(lat, lng, start_date, end_date): cache_key = 'rainfall_daily_rain_%s_%s_%s_%s' % (lat, lng, start_date, end_date) final_result = cache.get(cache_key) if final_result is None: ee.Initialize(EE_CREDENTIALS) # create a geometry point instance for cropping data later point = ee.Geometry.Point(float(lng), float(lat)) image_collection = ee.ImageCollection('UCSB-CHG/CHIRPS/PENTAD') filtering_result = image_collection.filterDate(start_date, end_date) # check if there are features retrieved if len(filtering_result.getInfo()['features']) == 0: return None # precipitation should be casted to float or else # it will throw error about incompatible types result = filtering_result.cast({'precipitation': 'float'}, ['precipitation']).getRegion(point, 500).getInfo() # remove the headers from the result.pop(0) # transform the data final_result = map(rainfall_mapper, result) # cache it for 12 hours cache.set(cache_key, final_result, timeout=43200) return final_result def query_cumulative_rainfall_data(lat, lng, start_date, end_date): cache_key = 'rainfall_cum_rain_%s_%s_%s_%s' % (lat, lng, start_date, end_date) final_result = cache.get(cache_key) if final_result is None: ee.Initialize(EE_CREDENTIALS) # create a geometry point instance for cropping data later point = ee.Geometry.Point(float(lng), float(lat)) image_collection = ee.ImageCollection('UCSB-CHG/CHIRPS/PENTAD') filtering_result = image_collection.filterDate(start_date, end_date) # check if there are features retrieved if len(filtering_result.getInfo()['features']) == 0: return None time0 = filtering_result.first().get('system:time_start') first = ee.List([ ee.Image(0).set('system:time_start', time0).select([0], ['precip']) ]) cumulative = ee.ImageCollection(ee.List(filtering_result.iterate(accumulate, first))) # precipitation should be casted to float or else # it will throw error about incompatible types result = cumulative.cast({'precipitation': 'float', 'precip': 'float'}, ['precip', 'precipitation']).getRegion(point, 500).getInfo() # remove the headers from the result.pop(0) # transform the data final_result = map(cumulative_mapper, result) # delete the first item if the rainfall_0p is not none if final_result[0]['rainfall_0p'] is not None: final_result.pop(0) # remove the rainfall_0p for item in final_result: item.pop('rainfall_0p', None) # cache it for 12 hours cache.set(cache_key, final_result, timeout=43200) return final_result # cache the result of this endpoint for 12 hours @mod.route('/<start_date>/<end_date>', methods=['GET']) @cross_origin() @gzipped @cache.cached(timeout=43200, key_prefix=rainfall_cache_key) def index(start_date, end_date): ee.Initialize(EE_CREDENTIALS) geometry = ee.Geometry.Polygon( ee.List([ [127.94248139921513, 5.33459854167601], [126.74931782819613, 11.825234466620996], [124.51107186428203, 17.961503806746318], [121.42999903167879, 19.993626604011016], [118.25656974884657, 18.2117821750514], [116.27168958893185, 6.817365082528201], [122.50121143769957, 3.79887124351577], [127.94248139921513, 5.33459854167601] ]), 'EPSG:4326', True ) image_collection = ee.ImageCollection('UCSB-CHG/CHIRPS/PENTAD') image = image_collection.filterDate(start_date, end_date) if request.args.get('place') is not None: image = image.map(rainfall_clipper) new_image = image.median().clip(geometry) try: rainfall = new_image.select('precipitation') visualization_styles = { 'min': 0, 'max': 100, 'opacity': 0.4, 'palette': 'ff0000, ff6900, ffff00, 62ff00, 00ff00' } map_object = rainfall.getMapId(visualization_styles) map_id = map_object['mapid'] map_token = map_object['token'] # assemble the resulting response result = { 'success': True, 'mapId': map_id, 'mapToken': map_token } except ee.ee_exception.EEException: abort(404, 'Rainfall data not found.') return jsonify(**result) # cache the result of this endpoint for 12 hours @mod.route('/daily-rainfall/<lat>/<lng>/<start_date>/<end_date>', methods=['GET']) @cross_origin() @gzipped def daily_rainfall(lat, lng, start_date, end_date): query_result = query_daily_rainfall_data(lat, lng, start_date, end_date) output_format = 'json' available_formats = ['json', 'csv'] requested_format = request.args.get('fmt') if requested_format is not None: # abort the request and throw HTTP 400 since the format # is not on the list of available formats if not requested_format in available_formats: abort(400, 'Unsupported format') # override the default output format output_format = requested_format # abort the request if the query_result contains None value if query_result is None: abort(404, 'Rainfall data not found') response = None if output_format == 'json': json_result = { 'success': True, 'result': query_result } response = jsonify(**json_result) else: si = StringIO.StringIO() cw = csv.writer(si) cw.writerow(['Date', 'Precipitation']) for value in query_result: cw.writerow([ value['time'], value['rainfall'] ]) filename = 'dailt-rainfall-%s-%s-%s-%s' % (lat, lng, start_date, end_date) response = make_response(si.getvalue()) response.headers['Content-Disposition'] = 'attachment; filename=%s.csv' % filename response.headers['Content-type'] = 'text/csv' return response # cache the result of this endpoint for 12 hours @mod.route('/cumulative-rainfall/<lat>/<lng>/<start_date>/<end_date>', methods=['GET']) @cross_origin() @gzipped def cumulative_rainfall(lat, lng, start_date, end_date): query_result = query_cumulative_rainfall_data(lat, lng, start_date, end_date) output_format = 'json' available_formats = ['json', 'csv'] requested_format = request.args.get('fmt') if requested_format is not None: # abort the request and throw HTTP 400 since the format # is not on the list of available formats if not requested_format in available_formats: abort(400, 'Unsupported format') # override the default output format output_format = requested_format # abort the request if the query_result contains None value if query_result is None: abort(404, 'Rainfall data not found') response = None if output_format == 'json': json_result = { 'success': True, 'result': query_result } response = jsonify(**json_result) else: si = StringIO.StringIO() cw = csv.writer(si) cw.writerow(['Date', 'Precipitation']) for value in query_result: cw.writerow([ value['time'], value['rainfall'] ]) filename = 'cumulative-rainfall-%s-%s-%s-%s' % (lat, lng, start_date, end_date) response = make_response(si.getvalue()) response.headers['Content-Disposition'] = 'attachment; filename=%s.csv' % filename response.headers['Content-type'] = 'text/csv' return response
mit
yarikoptic/scrapy
tests/test_downloadermiddleware_robotstxt.py
58
6754
from __future__ import absolute_import import re from twisted.internet import reactor, error from twisted.internet.defer import Deferred, DeferredList, maybeDeferred from twisted.python import failure from twisted.trial import unittest from scrapy.downloadermiddlewares.robotstxt import (RobotsTxtMiddleware, logger as mw_module_logger) from scrapy.exceptions import IgnoreRequest, NotConfigured from scrapy.http import Request, Response, TextResponse from scrapy.settings import Settings from tests import mock class RobotsTxtMiddlewareTest(unittest.TestCase): def setUp(self): self.crawler = mock.MagicMock() self.crawler.settings = Settings() self.crawler.engine.download = mock.MagicMock() def tearDown(self): del self.crawler def test_robotstxt_settings(self): self.crawler.settings = Settings() self.crawler.settings.set('USER_AGENT', 'CustomAgent') self.assertRaises(NotConfigured, RobotsTxtMiddleware, self.crawler) def _get_successful_crawler(self): crawler = self.crawler crawler.settings.set('ROBOTSTXT_OBEY', True) ROBOTS = re.sub(b'^\s+(?m)', b'', b''' User-Agent: * Disallow: /admin/ Disallow: /static/ ''') response = TextResponse('http://site.local/robots.txt', body=ROBOTS) def return_response(request, spider): deferred = Deferred() reactor.callFromThread(deferred.callback, response) return deferred crawler.engine.download.side_effect = return_response return crawler def test_robotstxt(self): middleware = RobotsTxtMiddleware(self._get_successful_crawler()) return DeferredList([ self.assertNotIgnored(Request('http://site.local/allowed'), middleware), self.assertIgnored(Request('http://site.local/admin/main'), middleware), self.assertIgnored(Request('http://site.local/static/'), middleware) ], fireOnOneErrback=True) def test_robotstxt_ready_parser(self): middleware = RobotsTxtMiddleware(self._get_successful_crawler()) d = self.assertNotIgnored(Request('http://site.local/allowed'), middleware) d.addCallback(lambda _: self.assertNotIgnored(Request('http://site.local/allowed'), middleware)) return d def test_robotstxt_meta(self): middleware = RobotsTxtMiddleware(self._get_successful_crawler()) meta = {'dont_obey_robotstxt': True} return DeferredList([ self.assertNotIgnored(Request('http://site.local/allowed', meta=meta), middleware), self.assertNotIgnored(Request('http://site.local/admin/main', meta=meta), middleware), self.assertNotIgnored(Request('http://site.local/static/', meta=meta), middleware) ], fireOnOneErrback=True) def _get_garbage_crawler(self): crawler = self.crawler crawler.settings.set('ROBOTSTXT_OBEY', True) response = Response('http://site.local/robots.txt', body=b'GIF89a\xd3\x00\xfe\x00\xa2') def return_response(request, spider): deferred = Deferred() reactor.callFromThread(deferred.callback, response) return deferred crawler.engine.download.side_effect = return_response return crawler def test_robotstxt_garbage(self): # garbage response should be discarded, equal 'allow all' middleware = RobotsTxtMiddleware(self._get_garbage_crawler()) deferred = DeferredList([ self.assertNotIgnored(Request('http://site.local'), middleware), self.assertNotIgnored(Request('http://site.local/allowed'), middleware), self.assertNotIgnored(Request('http://site.local/admin/main'), middleware), self.assertNotIgnored(Request('http://site.local/static/'), middleware) ], fireOnOneErrback=True) return deferred def _get_emptybody_crawler(self): crawler = self.crawler crawler.settings.set('ROBOTSTXT_OBEY', True) response = Response('http://site.local/robots.txt') def return_response(request, spider): deferred = Deferred() reactor.callFromThread(deferred.callback, response) return deferred crawler.engine.download.side_effect = return_response return crawler def test_robotstxt_empty_response(self): # empty response should equal 'allow all' middleware = RobotsTxtMiddleware(self._get_emptybody_crawler()) return DeferredList([ self.assertNotIgnored(Request('http://site.local/allowed'), middleware), self.assertNotIgnored(Request('http://site.local/admin/main'), middleware), self.assertNotIgnored(Request('http://site.local/static/'), middleware) ], fireOnOneErrback=True) def test_robotstxt_error(self): self.crawler.settings.set('ROBOTSTXT_OBEY', True) err = error.DNSLookupError('Robotstxt address not found') def return_failure(request, spider): deferred = Deferred() reactor.callFromThread(deferred.errback, failure.Failure(err)) return deferred self.crawler.engine.download.side_effect = return_failure middleware = RobotsTxtMiddleware(self.crawler) middleware._logerror = mock.MagicMock(side_effect=middleware._logerror) deferred = middleware.process_request(Request('http://site.local'), None) deferred.addCallback(lambda _: self.assertTrue(middleware._logerror.called)) return deferred def test_ignore_robotstxt_request(self): self.crawler.settings.set('ROBOTSTXT_OBEY', True) def ignore_request(request, spider): deferred = Deferred() reactor.callFromThread(deferred.errback, failure.Failure(IgnoreRequest())) return deferred self.crawler.engine.download.side_effect = ignore_request middleware = RobotsTxtMiddleware(self.crawler) mw_module_logger.error = mock.MagicMock() d = self.assertNotIgnored(Request('http://site.local/allowed'), middleware) d.addCallback(lambda _: self.assertFalse(mw_module_logger.error.called)) return d def assertNotIgnored(self, request, middleware): spider = None # not actually used dfd = maybeDeferred(middleware.process_request, request, spider) dfd.addCallback(self.assertIsNone) return dfd def assertIgnored(self, request, middleware): spider = None # not actually used return self.assertFailure(maybeDeferred(middleware.process_request, request, spider), IgnoreRequest)
bsd-3-clause
repuestos-365/server
client/node_modules/node-gyp/gyp/pylib/gyp/input.py
713
115880
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from compiler.ast import Const from compiler.ast import Dict from compiler.ast import Discard from compiler.ast import List from compiler.ast import Module from compiler.ast import Node from compiler.ast import Stmt import compiler import gyp.common import gyp.simple_copy import multiprocessing import optparse import os.path import re import shlex import signal import subprocess import sys import threading import time import traceback from gyp.common import GypError from gyp.common import OrderedSet # A list of types that are treated as linkable. linkable_types = [ 'executable', 'shared_library', 'loadable_module', 'mac_kernel_extension', ] # A list of sections that contain links to other targets. dependency_sections = ['dependencies', 'export_dependent_settings'] # base_path_sections is a list of sections defined by GYP that contain # pathnames. The generators can provide more keys, the two lists are merged # into path_sections, but you should call IsPathSection instead of using either # list directly. base_path_sections = [ 'destination', 'files', 'include_dirs', 'inputs', 'libraries', 'outputs', 'sources', ] path_sections = set() # These per-process dictionaries are used to cache build file data when loading # in parallel mode. per_process_data = {} per_process_aux_data = {} def IsPathSection(section): # If section ends in one of the '=+?!' characters, it's applied to a section # without the trailing characters. '/' is notably absent from this list, # because there's no way for a regular expression to be treated as a path. while section and section[-1:] in '=+?!': section = section[:-1] if section in path_sections: return True # Sections mathing the regexp '_(dir|file|path)s?$' are also # considered PathSections. Using manual string matching since that # is much faster than the regexp and this can be called hundreds of # thousands of times so micro performance matters. if "_" in section: tail = section[-6:] if tail[-1] == 's': tail = tail[:-1] if tail[-5:] in ('_file', '_path'): return True return tail[-4:] == '_dir' return False # base_non_configuration_keys is a list of key names that belong in the target # itself and should not be propagated into its configurations. It is merged # with a list that can come from the generator to # create non_configuration_keys. base_non_configuration_keys = [ # Sections that must exist inside targets and not configurations. 'actions', 'configurations', 'copies', 'default_configuration', 'dependencies', 'dependencies_original', 'libraries', 'postbuilds', 'product_dir', 'product_extension', 'product_name', 'product_prefix', 'rules', 'run_as', 'sources', 'standalone_static_library', 'suppress_wildcard', 'target_name', 'toolset', 'toolsets', 'type', # Sections that can be found inside targets or configurations, but that # should not be propagated from targets into their configurations. 'variables', ] non_configuration_keys = [] # Keys that do not belong inside a configuration dictionary. invalid_configuration_keys = [ 'actions', 'all_dependent_settings', 'configurations', 'dependencies', 'direct_dependent_settings', 'libraries', 'link_settings', 'sources', 'standalone_static_library', 'target_name', 'type', ] # Controls whether or not the generator supports multiple toolsets. multiple_toolsets = False # Paths for converting filelist paths to output paths: { # toplevel, # qualified_output_dir, # } generator_filelist_paths = None def GetIncludedBuildFiles(build_file_path, aux_data, included=None): """Return a list of all build files included into build_file_path. The returned list will contain build_file_path as well as all other files that it included, either directly or indirectly. Note that the list may contain files that were included into a conditional section that evaluated to false and was not merged into build_file_path's dict. aux_data is a dict containing a key for each build file or included build file. Those keys provide access to dicts whose "included" keys contain lists of all other files included by the build file. included should be left at its default None value by external callers. It is used for recursion. The returned list will not contain any duplicate entries. Each build file in the list will be relative to the current directory. """ if included == None: included = [] if build_file_path in included: return included included.append(build_file_path) for included_build_file in aux_data[build_file_path].get('included', []): GetIncludedBuildFiles(included_build_file, aux_data, included) return included def CheckedEval(file_contents): """Return the eval of a gyp file. The gyp file is restricted to dictionaries and lists only, and repeated keys are not allowed. Note that this is slower than eval() is. """ ast = compiler.parse(file_contents) assert isinstance(ast, Module) c1 = ast.getChildren() assert c1[0] is None assert isinstance(c1[1], Stmt) c2 = c1[1].getChildren() assert isinstance(c2[0], Discard) c3 = c2[0].getChildren() assert len(c3) == 1 return CheckNode(c3[0], []) def CheckNode(node, keypath): if isinstance(node, Dict): c = node.getChildren() dict = {} for n in range(0, len(c), 2): assert isinstance(c[n], Const) key = c[n].getChildren()[0] if key in dict: raise GypError("Key '" + key + "' repeated at level " + repr(len(keypath) + 1) + " with key path '" + '.'.join(keypath) + "'") kp = list(keypath) # Make a copy of the list for descending this node. kp.append(key) dict[key] = CheckNode(c[n + 1], kp) return dict elif isinstance(node, List): c = node.getChildren() children = [] for index, child in enumerate(c): kp = list(keypath) # Copy list. kp.append(repr(index)) children.append(CheckNode(child, kp)) return children elif isinstance(node, Const): return node.getChildren()[0] else: raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) + "': " + repr(node)) def LoadOneBuildFile(build_file_path, data, aux_data, includes, is_target, check): if build_file_path in data: return data[build_file_path] if os.path.exists(build_file_path): build_file_contents = open(build_file_path).read() else: raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd())) build_file_data = None try: if check: build_file_data = CheckedEval(build_file_contents) else: build_file_data = eval(build_file_contents, {'__builtins__': None}, None) except SyntaxError, e: e.filename = build_file_path raise except Exception, e: gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path) raise if type(build_file_data) is not dict: raise GypError("%s does not evaluate to a dictionary." % build_file_path) data[build_file_path] = build_file_data aux_data[build_file_path] = {} # Scan for includes and merge them in. if ('skip_includes' not in build_file_data or not build_file_data['skip_includes']): try: if is_target: LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data, aux_data, includes, check) else: LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data, aux_data, None, check) except Exception, e: gyp.common.ExceptionAppend(e, 'while reading includes of ' + build_file_path) raise return build_file_data def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data, includes, check): includes_list = [] if includes != None: includes_list.extend(includes) if 'includes' in subdict: for include in subdict['includes']: # "include" is specified relative to subdict_path, so compute the real # path to include by appending the provided "include" to the directory # in which subdict_path resides. relative_include = \ os.path.normpath(os.path.join(os.path.dirname(subdict_path), include)) includes_list.append(relative_include) # Unhook the includes list, it's no longer needed. del subdict['includes'] # Merge in the included files. for include in includes_list: if not 'included' in aux_data[subdict_path]: aux_data[subdict_path]['included'] = [] aux_data[subdict_path]['included'].append(include) gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include) MergeDicts(subdict, LoadOneBuildFile(include, data, aux_data, None, False, check), subdict_path, include) # Recurse into subdictionaries. for k, v in subdict.iteritems(): if type(v) is dict: LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, None, check) elif type(v) is list: LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, check) # This recurses into lists so that it can look for dicts. def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check): for item in sublist: if type(item) is dict: LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data, None, check) elif type(item) is list: LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check) # Processes toolsets in all the targets. This recurses into condition entries # since they can contain toolsets as well. def ProcessToolsetsInDict(data): if 'targets' in data: target_list = data['targets'] new_target_list = [] for target in target_list: # If this target already has an explicit 'toolset', and no 'toolsets' # list, don't modify it further. if 'toolset' in target and 'toolsets' not in target: new_target_list.append(target) continue if multiple_toolsets: toolsets = target.get('toolsets', ['target']) else: toolsets = ['target'] # Make sure this 'toolsets' definition is only processed once. if 'toolsets' in target: del target['toolsets'] if len(toolsets) > 0: # Optimization: only do copies if more than one toolset is specified. for build in toolsets[1:]: new_target = gyp.simple_copy.deepcopy(target) new_target['toolset'] = build new_target_list.append(new_target) target['toolset'] = toolsets[0] new_target_list.append(target) data['targets'] = new_target_list if 'conditions' in data: for condition in data['conditions']: if type(condition) is list: for condition_dict in condition[1:]: if type(condition_dict) is dict: ProcessToolsetsInDict(condition_dict) # TODO(mark): I don't love this name. It just means that it's going to load # a build file that contains targets and is expected to provide a targets dict # that contains the targets... def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes, depth, check, load_dependencies): # If depth is set, predefine the DEPTH variable to be a relative path from # this build file's directory to the directory identified by depth. if depth: # TODO(dglazkov) The backslash/forward-slash replacement at the end is a # temporary measure. This should really be addressed by keeping all paths # in POSIX until actual project generation. d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path)) if d == '': variables['DEPTH'] = '.' else: variables['DEPTH'] = d.replace('\\', '/') # The 'target_build_files' key is only set when loading target build files in # the non-parallel code path, where LoadTargetBuildFile is called # recursively. In the parallel code path, we don't need to check whether the # |build_file_path| has already been loaded, because the 'scheduled' set in # ParallelState guarantees that we never load the same |build_file_path| # twice. if 'target_build_files' in data: if build_file_path in data['target_build_files']: # Already loaded. return False data['target_build_files'].add(build_file_path) gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Target Build File '%s'", build_file_path) build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, includes, True, check) # Store DEPTH for later use in generators. build_file_data['_DEPTH'] = depth # Set up the included_files key indicating which .gyp files contributed to # this target dict. if 'included_files' in build_file_data: raise GypError(build_file_path + ' must not contain included_files key') included = GetIncludedBuildFiles(build_file_path, aux_data) build_file_data['included_files'] = [] for included_file in included: # included_file is relative to the current directory, but it needs to # be made relative to build_file_path's directory. included_relative = \ gyp.common.RelativePath(included_file, os.path.dirname(build_file_path)) build_file_data['included_files'].append(included_relative) # Do a first round of toolsets expansion so that conditions can be defined # per toolset. ProcessToolsetsInDict(build_file_data) # Apply "pre"/"early" variable expansions and condition evaluations. ProcessVariablesAndConditionsInDict( build_file_data, PHASE_EARLY, variables, build_file_path) # Since some toolsets might have been defined conditionally, perform # a second round of toolsets expansion now. ProcessToolsetsInDict(build_file_data) # Look at each project's target_defaults dict, and merge settings into # targets. if 'target_defaults' in build_file_data: if 'targets' not in build_file_data: raise GypError("Unable to find targets in build file %s" % build_file_path) index = 0 while index < len(build_file_data['targets']): # This procedure needs to give the impression that target_defaults is # used as defaults, and the individual targets inherit from that. # The individual targets need to be merged into the defaults. Make # a deep copy of the defaults for each target, merge the target dict # as found in the input file into that copy, and then hook up the # copy with the target-specific data merged into it as the replacement # target dict. old_target_dict = build_file_data['targets'][index] new_target_dict = gyp.simple_copy.deepcopy( build_file_data['target_defaults']) MergeDicts(new_target_dict, old_target_dict, build_file_path, build_file_path) build_file_data['targets'][index] = new_target_dict index += 1 # No longer needed. del build_file_data['target_defaults'] # Look for dependencies. This means that dependency resolution occurs # after "pre" conditionals and variable expansion, but before "post" - # in other words, you can't put a "dependencies" section inside a "post" # conditional within a target. dependencies = [] if 'targets' in build_file_data: for target_dict in build_file_data['targets']: if 'dependencies' not in target_dict: continue for dependency in target_dict['dependencies']: dependencies.append( gyp.common.ResolveTarget(build_file_path, dependency, None)[0]) if load_dependencies: for dependency in dependencies: try: LoadTargetBuildFile(dependency, data, aux_data, variables, includes, depth, check, load_dependencies) except Exception, e: gyp.common.ExceptionAppend( e, 'while loading dependencies of %s' % build_file_path) raise else: return (build_file_path, dependencies) def CallLoadTargetBuildFile(global_flags, build_file_path, variables, includes, depth, check, generator_input_info): """Wrapper around LoadTargetBuildFile for parallel processing. This wrapper is used when LoadTargetBuildFile is executed in a worker process. """ try: signal.signal(signal.SIGINT, signal.SIG_IGN) # Apply globals so that the worker process behaves the same. for key, value in global_flags.iteritems(): globals()[key] = value SetGeneratorGlobals(generator_input_info) result = LoadTargetBuildFile(build_file_path, per_process_data, per_process_aux_data, variables, includes, depth, check, False) if not result: return result (build_file_path, dependencies) = result # We can safely pop the build_file_data from per_process_data because it # will never be referenced by this process again, so we don't need to keep # it in the cache. build_file_data = per_process_data.pop(build_file_path) # This gets serialized and sent back to the main process via a pipe. # It's handled in LoadTargetBuildFileCallback. return (build_file_path, build_file_data, dependencies) except GypError, e: sys.stderr.write("gyp: %s\n" % e) return None except Exception, e: print >>sys.stderr, 'Exception:', e print >>sys.stderr, traceback.format_exc() return None class ParallelProcessingError(Exception): pass class ParallelState(object): """Class to keep track of state when processing input files in parallel. If build files are loaded in parallel, use this to keep track of state during farming out and processing parallel jobs. It's stored in a global so that the callback function can have access to it. """ def __init__(self): # The multiprocessing pool. self.pool = None # The condition variable used to protect this object and notify # the main loop when there might be more data to process. self.condition = None # The "data" dict that was passed to LoadTargetBuildFileParallel self.data = None # The number of parallel calls outstanding; decremented when a response # was received. self.pending = 0 # The set of all build files that have been scheduled, so we don't # schedule the same one twice. self.scheduled = set() # A list of dependency build file paths that haven't been scheduled yet. self.dependencies = [] # Flag to indicate if there was an error in a child process. self.error = False def LoadTargetBuildFileCallback(self, result): """Handle the results of running LoadTargetBuildFile in another process. """ self.condition.acquire() if not result: self.error = True self.condition.notify() self.condition.release() return (build_file_path0, build_file_data0, dependencies0) = result self.data[build_file_path0] = build_file_data0 self.data['target_build_files'].add(build_file_path0) for new_dependency in dependencies0: if new_dependency not in self.scheduled: self.scheduled.add(new_dependency) self.dependencies.append(new_dependency) self.pending -= 1 self.condition.notify() self.condition.release() def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth, check, generator_input_info): parallel_state = ParallelState() parallel_state.condition = threading.Condition() # Make copies of the build_files argument that we can modify while working. parallel_state.dependencies = list(build_files) parallel_state.scheduled = set(build_files) parallel_state.pending = 0 parallel_state.data = data try: parallel_state.condition.acquire() while parallel_state.dependencies or parallel_state.pending: if parallel_state.error: break if not parallel_state.dependencies: parallel_state.condition.wait() continue dependency = parallel_state.dependencies.pop() parallel_state.pending += 1 global_flags = { 'path_sections': globals()['path_sections'], 'non_configuration_keys': globals()['non_configuration_keys'], 'multiple_toolsets': globals()['multiple_toolsets']} if not parallel_state.pool: parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count()) parallel_state.pool.apply_async( CallLoadTargetBuildFile, args = (global_flags, dependency, variables, includes, depth, check, generator_input_info), callback = parallel_state.LoadTargetBuildFileCallback) except KeyboardInterrupt, e: parallel_state.pool.terminate() raise e parallel_state.condition.release() parallel_state.pool.close() parallel_state.pool.join() parallel_state.pool = None if parallel_state.error: sys.exit(1) # Look for the bracket that matches the first bracket seen in a # string, and return the start and end as a tuple. For example, if # the input is something like "<(foo <(bar)) blah", then it would # return (1, 13), indicating the entire string except for the leading # "<" and trailing " blah". LBRACKETS= set('{[(') BRACKETS = {'}': '{', ']': '[', ')': '('} def FindEnclosingBracketGroup(input_str): stack = [] start = -1 for index, char in enumerate(input_str): if char in LBRACKETS: stack.append(char) if start == -1: start = index elif char in BRACKETS: if not stack: return (-1, -1) if stack.pop() != BRACKETS[char]: return (-1, -1) if not stack: return (start, index + 1) return (-1, -1) def IsStrCanonicalInt(string): """Returns True if |string| is in its canonical integer form. The canonical form is such that str(int(string)) == string. """ if type(string) is str: # This function is called a lot so for maximum performance, avoid # involving regexps which would otherwise make the code much # shorter. Regexps would need twice the time of this function. if string: if string == "0": return True if string[0] == "-": string = string[1:] if not string: return False if '1' <= string[0] <= '9': return string.isdigit() return False # This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)", # "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())". # In the last case, the inner "<()" is captured in match['content']. early_variable_re = re.compile( r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # This matches the same as early_variable_re, but with '>' instead of '<'. late_variable_re = re.compile( r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # This matches the same as early_variable_re, but with '^' instead of '<'. latelate_variable_re = re.compile( r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)' r'(?P<command_string>[-a-zA-Z0-9_.]+)?' r'\((?P<is_array>\s*\[?)' r'(?P<content>.*?)(\]?)\))') # Global cache of results from running commands so they don't have to be run # more then once. cached_command_results = {} def FixupPlatformCommand(cmd): if sys.platform == 'win32': if type(cmd) is list: cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:] else: cmd = re.sub('^cat ', 'type ', cmd) return cmd PHASE_EARLY = 0 PHASE_LATE = 1 PHASE_LATELATE = 2 def ExpandVariables(input, phase, variables, build_file): # Look for the pattern that gets expanded into variables if phase == PHASE_EARLY: variable_re = early_variable_re expansion_symbol = '<' elif phase == PHASE_LATE: variable_re = late_variable_re expansion_symbol = '>' elif phase == PHASE_LATELATE: variable_re = latelate_variable_re expansion_symbol = '^' else: assert False input_str = str(input) if IsStrCanonicalInt(input_str): return int(input_str) # Do a quick scan to determine if an expensive regex search is warranted. if expansion_symbol not in input_str: return input_str # Get the entire list of matches as a list of MatchObject instances. # (using findall here would return strings instead of MatchObjects). matches = list(variable_re.finditer(input_str)) if not matches: return input_str output = input_str # Reverse the list of matches so that replacements are done right-to-left. # That ensures that earlier replacements won't mess up the string in a # way that causes later calls to find the earlier substituted text instead # of what's intended for replacement. matches.reverse() for match_group in matches: match = match_group.groupdict() gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match) # match['replace'] is the substring to look for, match['type'] # is the character code for the replacement type (< > <! >! <| >| <@ # >@ <!@ >!@), match['is_array'] contains a '[' for command # arrays, and match['content'] is the name of the variable (< >) # or command to run (<! >!). match['command_string'] is an optional # command string. Currently, only 'pymod_do_main' is supported. # run_command is true if a ! variant is used. run_command = '!' in match['type'] command_string = match['command_string'] # file_list is true if a | variant is used. file_list = '|' in match['type'] # Capture these now so we can adjust them later. replace_start = match_group.start('replace') replace_end = match_group.end('replace') # Find the ending paren, and re-evaluate the contained string. (c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:]) # Adjust the replacement range to match the entire command # found by FindEnclosingBracketGroup (since the variable_re # probably doesn't match the entire command if it contained # nested variables). replace_end = replace_start + c_end # Find the "real" replacement, matching the appropriate closing # paren, and adjust the replacement start and end. replacement = input_str[replace_start:replace_end] # Figure out what the contents of the variable parens are. contents_start = replace_start + c_start + 1 contents_end = replace_end - 1 contents = input_str[contents_start:contents_end] # Do filter substitution now for <|(). # Admittedly, this is different than the evaluation order in other # contexts. However, since filtration has no chance to run on <|(), # this seems like the only obvious way to give them access to filters. if file_list: processed_variables = gyp.simple_copy.deepcopy(variables) ProcessListFiltersInDict(contents, processed_variables) # Recurse to expand variables in the contents contents = ExpandVariables(contents, phase, processed_variables, build_file) else: # Recurse to expand variables in the contents contents = ExpandVariables(contents, phase, variables, build_file) # Strip off leading/trailing whitespace so that variable matches are # simpler below (and because they are rarely needed). contents = contents.strip() # expand_to_list is true if an @ variant is used. In that case, # the expansion should result in a list. Note that the caller # is to be expecting a list in return, and not all callers do # because not all are working in list context. Also, for list # expansions, there can be no other text besides the variable # expansion in the input string. expand_to_list = '@' in match['type'] and input_str == replacement if run_command or file_list: # Find the build file's directory, so commands can be run or file lists # generated relative to it. build_file_dir = os.path.dirname(build_file) if build_file_dir == '' and not file_list: # If build_file is just a leaf filename indicating a file in the # current directory, build_file_dir might be an empty string. Set # it to None to signal to subprocess.Popen that it should run the # command in the current directory. build_file_dir = None # Support <|(listfile.txt ...) which generates a file # containing items from a gyp list, generated at gyp time. # This works around actions/rules which have more inputs than will # fit on the command line. if file_list: if type(contents) is list: contents_list = contents else: contents_list = contents.split(' ') replacement = contents_list[0] if os.path.isabs(replacement): raise GypError('| cannot handle absolute paths, got "%s"' % replacement) if not generator_filelist_paths: path = os.path.join(build_file_dir, replacement) else: if os.path.isabs(build_file_dir): toplevel = generator_filelist_paths['toplevel'] rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel) else: rel_build_file_dir = build_file_dir qualified_out_dir = generator_filelist_paths['qualified_out_dir'] path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement) gyp.common.EnsureDirExists(path) replacement = gyp.common.RelativePath(path, build_file_dir) f = gyp.common.WriteOnDiff(path) for i in contents_list[1:]: f.write('%s\n' % i) f.close() elif run_command: use_shell = True if match['is_array']: contents = eval(contents) use_shell = False # Check for a cached value to avoid executing commands, or generating # file lists more than once. The cache key contains the command to be # run as well as the directory to run it from, to account for commands # that depend on their current directory. # TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory, # someone could author a set of GYP files where each time the command # is invoked it produces different output by design. When the need # arises, the syntax should be extended to support no caching off a # command's output so it is run every time. cache_key = (str(contents), build_file_dir) cached_value = cached_command_results.get(cache_key, None) if cached_value is None: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Executing command '%s' in directory '%s'", contents, build_file_dir) replacement = '' if command_string == 'pymod_do_main': # <!pymod_do_main(modulename param eters) loads |modulename| as a # python module and then calls that module's DoMain() function, # passing ["param", "eters"] as a single list argument. For modules # that don't load quickly, this can be faster than # <!(python modulename param eters). Do this in |build_file_dir|. oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir. if build_file_dir: # build_file_dir may be None (see above). os.chdir(build_file_dir) try: parsed_contents = shlex.split(contents) try: py_module = __import__(parsed_contents[0]) except ImportError as e: raise GypError("Error importing pymod_do_main" "module (%s): %s" % (parsed_contents[0], e)) replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip() finally: os.chdir(oldwd) assert replacement != None elif command_string: raise GypError("Unknown command string '%s' in '%s'." % (command_string, contents)) else: # Fix up command with platform specific workarounds. contents = FixupPlatformCommand(contents) try: p = subprocess.Popen(contents, shell=use_shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=build_file_dir) except Exception, e: raise GypError("%s while executing command '%s' in %s" % (e, contents, build_file)) p_stdout, p_stderr = p.communicate('') if p.wait() != 0 or p_stderr: sys.stderr.write(p_stderr) # Simulate check_call behavior, since check_call only exists # in python 2.5 and later. raise GypError("Call to '%s' returned exit status %d while in %s." % (contents, p.returncode, build_file)) replacement = p_stdout.rstrip() cached_command_results[cache_key] = replacement else: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Had cache value for command '%s' in directory '%s'", contents,build_file_dir) replacement = cached_value else: if not contents in variables: if contents[-1] in ['!', '/']: # In order to allow cross-compiles (nacl) to happen more naturally, # we will allow references to >(sources/) etc. to resolve to # and empty list if undefined. This allows actions to: # 'action!': [ # '>@(_sources!)', # ], # 'action/': [ # '>@(_sources/)', # ], replacement = [] else: raise GypError('Undefined variable ' + contents + ' in ' + build_file) else: replacement = variables[contents] if type(replacement) is list: for item in replacement: if not contents[-1] == '/' and type(item) not in (str, int): raise GypError('Variable ' + contents + ' must expand to a string or list of strings; ' + 'list contains a ' + item.__class__.__name__) # Run through the list and handle variable expansions in it. Since # the list is guaranteed not to contain dicts, this won't do anything # with conditions sections. ProcessVariablesAndConditionsInList(replacement, phase, variables, build_file) elif type(replacement) not in (str, int): raise GypError('Variable ' + contents + ' must expand to a string or list of strings; ' + 'found a ' + replacement.__class__.__name__) if expand_to_list: # Expanding in list context. It's guaranteed that there's only one # replacement to do in |input_str| and that it's this replacement. See # above. if type(replacement) is list: # If it's already a list, make a copy. output = replacement[:] else: # Split it the same way sh would split arguments. output = shlex.split(str(replacement)) else: # Expanding in string context. encoded_replacement = '' if type(replacement) is list: # When expanding a list into string context, turn the list items # into a string in a way that will work with a subprocess call. # # TODO(mark): This isn't completely correct. This should # call a generator-provided function that observes the # proper list-to-argument quoting rules on a specific # platform instead of just calling the POSIX encoding # routine. encoded_replacement = gyp.common.EncodePOSIXShellList(replacement) else: encoded_replacement = replacement output = output[:replace_start] + str(encoded_replacement) + \ output[replace_end:] # Prepare for the next match iteration. input_str = output if output == input: gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found only identity matches on %r, avoiding infinite " "recursion.", output) else: # Look for more matches now that we've replaced some, to deal with # expanding local variables (variables defined in the same # variables block as this one). gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output) if type(output) is list: if output and type(output[0]) is list: # Leave output alone if it's a list of lists. # We don't want such lists to be stringified. pass else: new_output = [] for item in output: new_output.append( ExpandVariables(item, phase, variables, build_file)) output = new_output else: output = ExpandVariables(output, phase, variables, build_file) # Convert all strings that are canonically-represented integers into integers. if type(output) is list: for index in xrange(0, len(output)): if IsStrCanonicalInt(output[index]): output[index] = int(output[index]) elif IsStrCanonicalInt(output): output = int(output) return output # The same condition is often evaluated over and over again so it # makes sense to cache as much as possible between evaluations. cached_conditions_asts = {} def EvalCondition(condition, conditions_key, phase, variables, build_file): """Returns the dict that should be used or None if the result was that nothing should be used.""" if type(condition) is not list: raise GypError(conditions_key + ' must be a list') if len(condition) < 2: # It's possible that condition[0] won't work in which case this # attempt will raise its own IndexError. That's probably fine. raise GypError(conditions_key + ' ' + condition[0] + ' must be at least length 2, not ' + str(len(condition))) i = 0 result = None while i < len(condition): cond_expr = condition[i] true_dict = condition[i + 1] if type(true_dict) is not dict: raise GypError('{} {} must be followed by a dictionary, not {}'.format( conditions_key, cond_expr, type(true_dict))) if len(condition) > i + 2 and type(condition[i + 2]) is dict: false_dict = condition[i + 2] i = i + 3 if i != len(condition): raise GypError('{} {} has {} unexpected trailing items'.format( conditions_key, cond_expr, len(condition) - i)) else: false_dict = None i = i + 2 if result == None: result = EvalSingleCondition( cond_expr, true_dict, false_dict, phase, variables, build_file) return result def EvalSingleCondition( cond_expr, true_dict, false_dict, phase, variables, build_file): """Returns true_dict if cond_expr evaluates to true, and false_dict otherwise.""" # Do expansions on the condition itself. Since the conditon can naturally # contain variable references without needing to resort to GYP expansion # syntax, this is of dubious value for variables, but someone might want to # use a command expansion directly inside a condition. cond_expr_expanded = ExpandVariables(cond_expr, phase, variables, build_file) if type(cond_expr_expanded) not in (str, int): raise ValueError( 'Variable expansion in this context permits str and int ' + \ 'only, found ' + cond_expr_expanded.__class__.__name__) try: if cond_expr_expanded in cached_conditions_asts: ast_code = cached_conditions_asts[cond_expr_expanded] else: ast_code = compile(cond_expr_expanded, '<string>', 'eval') cached_conditions_asts[cond_expr_expanded] = ast_code if eval(ast_code, {'__builtins__': None}, variables): return true_dict return false_dict except SyntaxError, e: syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s ' 'at character %d.' % (str(e.args[0]), e.text, build_file, e.offset), e.filename, e.lineno, e.offset, e.text) raise syntax_error except NameError, e: gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' % (cond_expr_expanded, build_file)) raise GypError(e) def ProcessConditionsInDict(the_dict, phase, variables, build_file): # Process a 'conditions' or 'target_conditions' section in the_dict, # depending on phase. # early -> conditions # late -> target_conditions # latelate -> no conditions # # Each item in a conditions list consists of cond_expr, a string expression # evaluated as the condition, and true_dict, a dict that will be merged into # the_dict if cond_expr evaluates to true. Optionally, a third item, # false_dict, may be present. false_dict is merged into the_dict if # cond_expr evaluates to false. # # Any dict merged into the_dict will be recursively processed for nested # conditionals and other expansions, also according to phase, immediately # prior to being merged. if phase == PHASE_EARLY: conditions_key = 'conditions' elif phase == PHASE_LATE: conditions_key = 'target_conditions' elif phase == PHASE_LATELATE: return else: assert False if not conditions_key in the_dict: return conditions_list = the_dict[conditions_key] # Unhook the conditions list, it's no longer needed. del the_dict[conditions_key] for condition in conditions_list: merge_dict = EvalCondition(condition, conditions_key, phase, variables, build_file) if merge_dict != None: # Expand variables and nested conditinals in the merge_dict before # merging it. ProcessVariablesAndConditionsInDict(merge_dict, phase, variables, build_file) MergeDicts(the_dict, merge_dict, build_file, build_file) def LoadAutomaticVariablesFromDict(variables, the_dict): # Any keys with plain string values in the_dict become automatic variables. # The variable name is the key name with a "_" character prepended. for key, value in the_dict.iteritems(): if type(value) in (str, int, list): variables['_' + key] = value def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key): # Any keys in the_dict's "variables" dict, if it has one, becomes a # variable. The variable name is the key name in the "variables" dict. # Variables that end with the % character are set only if they are unset in # the variables dict. the_dict_key is the name of the key that accesses # the_dict in the_dict's parent dict. If the_dict's parent is not a dict # (it could be a list or it could be parentless because it is a root dict), # the_dict_key will be None. for key, value in the_dict.get('variables', {}).iteritems(): if type(value) not in (str, int, list): continue if key.endswith('%'): variable_name = key[:-1] if variable_name in variables: # If the variable is already set, don't set it. continue if the_dict_key is 'variables' and variable_name in the_dict: # If the variable is set without a % in the_dict, and the_dict is a # variables dict (making |variables| a varaibles sub-dict of a # variables dict), use the_dict's definition. value = the_dict[variable_name] else: variable_name = key variables[variable_name] = value def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in, build_file, the_dict_key=None): """Handle all variable and command expansion and conditional evaluation. This function is the public entry point for all variable expansions and conditional evaluations. The variables_in dictionary will not be modified by this function. """ # Make a copy of the variables_in dict that can be modified during the # loading of automatics and the loading of the variables dict. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) if 'variables' in the_dict: # Make sure all the local variables are added to the variables # list before we process them so that you can reference one # variable from another. They will be fully expanded by recursion # in ExpandVariables. for key, value in the_dict['variables'].iteritems(): variables[key] = value # Handle the associated variables dict first, so that any variable # references within can be resolved prior to using them as variables. # Pass a copy of the variables dict to avoid having it be tainted. # Otherwise, it would have extra automatics added for everything that # should just be an ordinary variable in this scope. ProcessVariablesAndConditionsInDict(the_dict['variables'], phase, variables, build_file, 'variables') LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) for key, value in the_dict.iteritems(): # Skip "variables", which was already processed if present. if key != 'variables' and type(value) is str: expanded = ExpandVariables(value, phase, variables, build_file) if type(expanded) not in (str, int): raise ValueError( 'Variable expansion in this context permits str and int ' + \ 'only, found ' + expanded.__class__.__name__ + ' for ' + key) the_dict[key] = expanded # Variable expansion may have resulted in changes to automatics. Reload. # TODO(mark): Optimization: only reload if no changes were made. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) # Process conditions in this dict. This is done after variable expansion # so that conditions may take advantage of expanded variables. For example, # if the_dict contains: # {'type': '<(library_type)', # 'conditions': [['_type=="static_library"', { ... }]]}, # _type, as used in the condition, will only be set to the value of # library_type if variable expansion is performed before condition # processing. However, condition processing should occur prior to recursion # so that variables (both automatic and "variables" dict type) may be # adjusted by conditions sections, merged into the_dict, and have the # intended impact on contained dicts. # # This arrangement means that a "conditions" section containing a "variables" # section will only have those variables effective in subdicts, not in # the_dict. The workaround is to put a "conditions" section within a # "variables" section. For example: # {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]], # 'defines': ['<(define)'], # 'my_subdict': {'defines': ['<(define)']}}, # will not result in "IS_MAC" being appended to the "defines" list in the # current scope but would result in it being appended to the "defines" list # within "my_subdict". By comparison: # {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]}, # 'defines': ['<(define)'], # 'my_subdict': {'defines': ['<(define)']}}, # will append "IS_MAC" to both "defines" lists. # Evaluate conditions sections, allowing variable expansions within them # as well as nested conditionals. This will process a 'conditions' or # 'target_conditions' section, perform appropriate merging and recursive # conditional and variable processing, and then remove the conditions section # from the_dict if it is present. ProcessConditionsInDict(the_dict, phase, variables, build_file) # Conditional processing may have resulted in changes to automatics or the # variables dict. Reload. variables = variables_in.copy() LoadAutomaticVariablesFromDict(variables, the_dict) LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) # Recurse into child dicts, or process child lists which may result in # further recursion into descendant dicts. for key, value in the_dict.iteritems(): # Skip "variables" and string values, which were already processed if # present. if key == 'variables' or type(value) is str: continue if type(value) is dict: # Pass a copy of the variables dict so that subdicts can't influence # parents. ProcessVariablesAndConditionsInDict(value, phase, variables, build_file, key) elif type(value) is list: # The list itself can't influence the variables dict, and # ProcessVariablesAndConditionsInList will make copies of the variables # dict if it needs to pass it to something that can influence it. No # copy is necessary here. ProcessVariablesAndConditionsInList(value, phase, variables, build_file) elif type(value) is not int: raise TypeError('Unknown type ' + value.__class__.__name__ + \ ' for ' + key) def ProcessVariablesAndConditionsInList(the_list, phase, variables, build_file): # Iterate using an index so that new values can be assigned into the_list. index = 0 while index < len(the_list): item = the_list[index] if type(item) is dict: # Make a copy of the variables dict so that it won't influence anything # outside of its own scope. ProcessVariablesAndConditionsInDict(item, phase, variables, build_file) elif type(item) is list: ProcessVariablesAndConditionsInList(item, phase, variables, build_file) elif type(item) is str: expanded = ExpandVariables(item, phase, variables, build_file) if type(expanded) in (str, int): the_list[index] = expanded elif type(expanded) is list: the_list[index:index+1] = expanded index += len(expanded) # index now identifies the next item to examine. Continue right now # without falling into the index increment below. continue else: raise ValueError( 'Variable expansion in this context permits strings and ' + \ 'lists only, found ' + expanded.__class__.__name__ + ' at ' + \ index) elif type(item) is not int: raise TypeError('Unknown type ' + item.__class__.__name__ + \ ' at index ' + index) index = index + 1 def BuildTargetsDict(data): """Builds a dict mapping fully-qualified target names to their target dicts. |data| is a dict mapping loaded build files by pathname relative to the current directory. Values in |data| are build file contents. For each |data| value with a "targets" key, the value of the "targets" key is taken as a list containing target dicts. Each target's fully-qualified name is constructed from the pathname of the build file (|data| key) and its "target_name" property. These fully-qualified names are used as the keys in the returned dict. These keys provide access to the target dicts, the dicts in the "targets" lists. """ targets = {} for build_file in data['target_build_files']: for target in data[build_file].get('targets', []): target_name = gyp.common.QualifiedTarget(build_file, target['target_name'], target['toolset']) if target_name in targets: raise GypError('Duplicate target definitions for ' + target_name) targets[target_name] = target return targets def QualifyDependencies(targets): """Make dependency links fully-qualified relative to the current directory. |targets| is a dict mapping fully-qualified target names to their target dicts. For each target in this dict, keys known to contain dependency links are examined, and any dependencies referenced will be rewritten so that they are fully-qualified and relative to the current directory. All rewritten dependencies are suitable for use as keys to |targets| or a similar dict. """ all_dependency_sections = [dep + op for dep in dependency_sections for op in ('', '!', '/')] for target, target_dict in targets.iteritems(): target_build_file = gyp.common.BuildFile(target) toolset = target_dict['toolset'] for dependency_key in all_dependency_sections: dependencies = target_dict.get(dependency_key, []) for index in xrange(0, len(dependencies)): dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget( target_build_file, dependencies[index], toolset) if not multiple_toolsets: # Ignore toolset specification in the dependency if it is specified. dep_toolset = toolset dependency = gyp.common.QualifiedTarget(dep_file, dep_target, dep_toolset) dependencies[index] = dependency # Make sure anything appearing in a list other than "dependencies" also # appears in the "dependencies" list. if dependency_key != 'dependencies' and \ dependency not in target_dict['dependencies']: raise GypError('Found ' + dependency + ' in ' + dependency_key + ' of ' + target + ', but not in dependencies') def ExpandWildcardDependencies(targets, data): """Expands dependencies specified as build_file:*. For each target in |targets|, examines sections containing links to other targets. If any such section contains a link of the form build_file:*, it is taken as a wildcard link, and is expanded to list each target in build_file. The |data| dict provides access to build file dicts. Any target that does not wish to be included by wildcard can provide an optional "suppress_wildcard" key in its target dict. When present and true, a wildcard dependency link will not include such targets. All dependency names, including the keys to |targets| and the values in each dependency list, must be qualified when this function is called. """ for target, target_dict in targets.iteritems(): toolset = target_dict['toolset'] target_build_file = gyp.common.BuildFile(target) for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) # Loop this way instead of "for dependency in" or "for index in xrange" # because the dependencies list will be modified within the loop body. index = 0 while index < len(dependencies): (dependency_build_file, dependency_target, dependency_toolset) = \ gyp.common.ParseQualifiedTarget(dependencies[index]) if dependency_target != '*' and dependency_toolset != '*': # Not a wildcard. Keep it moving. index = index + 1 continue if dependency_build_file == target_build_file: # It's an error for a target to depend on all other targets in # the same file, because a target cannot depend on itself. raise GypError('Found wildcard in ' + dependency_key + ' of ' + target + ' referring to same build file') # Take the wildcard out and adjust the index so that the next # dependency in the list will be processed the next time through the # loop. del dependencies[index] index = index - 1 # Loop through the targets in the other build file, adding them to # this target's list of dependencies in place of the removed # wildcard. dependency_target_dicts = data[dependency_build_file]['targets'] for dependency_target_dict in dependency_target_dicts: if int(dependency_target_dict.get('suppress_wildcard', False)): continue dependency_target_name = dependency_target_dict['target_name'] if (dependency_target != '*' and dependency_target != dependency_target_name): continue dependency_target_toolset = dependency_target_dict['toolset'] if (dependency_toolset != '*' and dependency_toolset != dependency_target_toolset): continue dependency = gyp.common.QualifiedTarget(dependency_build_file, dependency_target_name, dependency_target_toolset) index = index + 1 dependencies.insert(index, dependency) index = index + 1 def Unify(l): """Removes duplicate elements from l, keeping the first element.""" seen = {} return [seen.setdefault(e, e) for e in l if e not in seen] def RemoveDuplicateDependencies(targets): """Makes sure every dependency appears only once in all targets's dependency lists.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: target_dict[dependency_key] = Unify(dependencies) def Filter(l, item): """Removes item from l.""" res = {} return [res.setdefault(e, e) for e in l if e != item] def RemoveSelfDependencies(targets): """Remove self dependencies from targets that have the prune_self_dependency variable set.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: for t in dependencies: if t == target_name: if targets[t].get('variables', {}).get('prune_self_dependency', 0): target_dict[dependency_key] = Filter(dependencies, target_name) def RemoveLinkDependenciesFromNoneTargets(targets): """Remove dependencies having the 'link_dependency' attribute from the 'none' targets.""" for target_name, target_dict in targets.iteritems(): for dependency_key in dependency_sections: dependencies = target_dict.get(dependency_key, []) if dependencies: for t in dependencies: if target_dict.get('type', None) == 'none': if targets[t].get('variables', {}).get('link_dependency', 0): target_dict[dependency_key] = \ Filter(target_dict[dependency_key], t) class DependencyGraphNode(object): """ Attributes: ref: A reference to an object that this DependencyGraphNode represents. dependencies: List of DependencyGraphNodes on which this one depends. dependents: List of DependencyGraphNodes that depend on this one. """ class CircularException(GypError): pass def __init__(self, ref): self.ref = ref self.dependencies = [] self.dependents = [] def __repr__(self): return '<DependencyGraphNode: %r>' % self.ref def FlattenToList(self): # flat_list is the sorted list of dependencies - actually, the list items # are the "ref" attributes of DependencyGraphNodes. Every target will # appear in flat_list after all of its dependencies, and before all of its # dependents. flat_list = OrderedSet() # in_degree_zeros is the list of DependencyGraphNodes that have no # dependencies not in flat_list. Initially, it is a copy of the children # of this node, because when the graph was built, nodes with no # dependencies were made implicit dependents of the root node. in_degree_zeros = set(self.dependents[:]) while in_degree_zeros: # Nodes in in_degree_zeros have no dependencies not in flat_list, so they # can be appended to flat_list. Take these nodes out of in_degree_zeros # as work progresses, so that the next node to process from the list can # always be accessed at a consistent position. node = in_degree_zeros.pop() flat_list.add(node.ref) # Look at dependents of the node just added to flat_list. Some of them # may now belong in in_degree_zeros. for node_dependent in node.dependents: is_in_degree_zero = True # TODO: We want to check through the # node_dependent.dependencies list but if it's long and we # always start at the beginning, then we get O(n^2) behaviour. for node_dependent_dependency in node_dependent.dependencies: if not node_dependent_dependency.ref in flat_list: # The dependent one or more dependencies not in flat_list. There # will be more chances to add it to flat_list when examining # it again as a dependent of those other dependencies, provided # that there are no cycles. is_in_degree_zero = False break if is_in_degree_zero: # All of the dependent's dependencies are already in flat_list. Add # it to in_degree_zeros where it will be processed in a future # iteration of the outer loop. in_degree_zeros.add(node_dependent) return list(flat_list) def FindCycles(self): """ Returns a list of cycles in the graph, where each cycle is its own list. """ results = [] visited = set() def Visit(node, path): for child in node.dependents: if child in path: results.append([child] + path[:path.index(child) + 1]) elif not child in visited: visited.add(child) Visit(child, [child] + path) visited.add(self) Visit(self, [self]) return results def DirectDependencies(self, dependencies=None): """Returns a list of just direct dependencies.""" if dependencies == None: dependencies = [] for dependency in self.dependencies: # Check for None, corresponding to the root node. if dependency.ref != None and dependency.ref not in dependencies: dependencies.append(dependency.ref) return dependencies def _AddImportedDependencies(self, targets, dependencies=None): """Given a list of direct dependencies, adds indirect dependencies that other dependencies have declared to export their settings. This method does not operate on self. Rather, it operates on the list of dependencies in the |dependencies| argument. For each dependency in that list, if any declares that it exports the settings of one of its own dependencies, those dependencies whose settings are "passed through" are added to the list. As new items are added to the list, they too will be processed, so it is possible to import settings through multiple levels of dependencies. This method is not terribly useful on its own, it depends on being "primed" with a list of direct dependencies such as one provided by DirectDependencies. DirectAndImportedDependencies is intended to be the public entry point. """ if dependencies == None: dependencies = [] index = 0 while index < len(dependencies): dependency = dependencies[index] dependency_dict = targets[dependency] # Add any dependencies whose settings should be imported to the list # if not already present. Newly-added items will be checked for # their own imports when the list iteration reaches them. # Rather than simply appending new items, insert them after the # dependency that exported them. This is done to more closely match # the depth-first method used by DeepDependencies. add_index = 1 for imported_dependency in \ dependency_dict.get('export_dependent_settings', []): if imported_dependency not in dependencies: dependencies.insert(index + add_index, imported_dependency) add_index = add_index + 1 index = index + 1 return dependencies def DirectAndImportedDependencies(self, targets, dependencies=None): """Returns a list of a target's direct dependencies and all indirect dependencies that a dependency has advertised settings should be exported through the dependency for. """ dependencies = self.DirectDependencies(dependencies) return self._AddImportedDependencies(targets, dependencies) def DeepDependencies(self, dependencies=None): """Returns an OrderedSet of all of a target's dependencies, recursively.""" if dependencies is None: # Using a list to get ordered output and a set to do fast "is it # already added" checks. dependencies = OrderedSet() for dependency in self.dependencies: # Check for None, corresponding to the root node. if dependency.ref is None: continue if dependency.ref not in dependencies: dependency.DeepDependencies(dependencies) dependencies.add(dependency.ref) return dependencies def _LinkDependenciesInternal(self, targets, include_shared_libraries, dependencies=None, initial=True): """Returns an OrderedSet of dependency targets that are linked into this target. This function has a split personality, depending on the setting of |initial|. Outside callers should always leave |initial| at its default setting. When adding a target to the list of dependencies, this function will recurse into itself with |initial| set to False, to collect dependencies that are linked into the linkable target for which the list is being built. If |include_shared_libraries| is False, the resulting dependencies will not include shared_library targets that are linked into this target. """ if dependencies is None: # Using a list to get ordered output and a set to do fast "is it # already added" checks. dependencies = OrderedSet() # Check for None, corresponding to the root node. if self.ref is None: return dependencies # It's kind of sucky that |targets| has to be passed into this function, # but that's presently the easiest way to access the target dicts so that # this function can find target types. if 'target_name' not in targets[self.ref]: raise GypError("Missing 'target_name' field in target.") if 'type' not in targets[self.ref]: raise GypError("Missing 'type' field in target %s" % targets[self.ref]['target_name']) target_type = targets[self.ref]['type'] is_linkable = target_type in linkable_types if initial and not is_linkable: # If this is the first target being examined and it's not linkable, # return an empty list of link dependencies, because the link # dependencies are intended to apply to the target itself (initial is # True) and this target won't be linked. return dependencies # Don't traverse 'none' targets if explicitly excluded. if (target_type == 'none' and not targets[self.ref].get('dependencies_traverse', True)): dependencies.add(self.ref) return dependencies # Executables, mac kernel extensions and loadable modules are already fully # and finally linked. Nothing else can be a link dependency of them, there # can only be dependencies in the sense that a dependent target might run # an executable or load the loadable_module. if not initial and target_type in ('executable', 'loadable_module', 'mac_kernel_extension'): return dependencies # Shared libraries are already fully linked. They should only be included # in |dependencies| when adjusting static library dependencies (in order to # link against the shared_library's import lib), but should not be included # in |dependencies| when propagating link_settings. # The |include_shared_libraries| flag controls which of these two cases we # are handling. if (not initial and target_type == 'shared_library' and not include_shared_libraries): return dependencies # The target is linkable, add it to the list of link dependencies. if self.ref not in dependencies: dependencies.add(self.ref) if initial or not is_linkable: # If this is a subsequent target and it's linkable, don't look any # further for linkable dependencies, as they'll already be linked into # this target linkable. Always look at dependencies of the initial # target, and always look at dependencies of non-linkables. for dependency in self.dependencies: dependency._LinkDependenciesInternal(targets, include_shared_libraries, dependencies, False) return dependencies def DependenciesForLinkSettings(self, targets): """ Returns a list of dependency targets whose link_settings should be merged into this target. """ # TODO(sbaig) Currently, chrome depends on the bug that shared libraries' # link_settings are propagated. So for now, we will allow it, unless the # 'allow_sharedlib_linksettings_propagation' flag is explicitly set to # False. Once chrome is fixed, we can remove this flag. include_shared_libraries = \ targets[self.ref].get('allow_sharedlib_linksettings_propagation', True) return self._LinkDependenciesInternal(targets, include_shared_libraries) def DependenciesToLinkAgainst(self, targets): """ Returns a list of dependency targets that are linked into this target. """ return self._LinkDependenciesInternal(targets, True) def BuildDependencyList(targets): # Create a DependencyGraphNode for each target. Put it into a dict for easy # access. dependency_nodes = {} for target, spec in targets.iteritems(): if target not in dependency_nodes: dependency_nodes[target] = DependencyGraphNode(target) # Set up the dependency links. Targets that have no dependencies are treated # as dependent on root_node. root_node = DependencyGraphNode(None) for target, spec in targets.iteritems(): target_node = dependency_nodes[target] target_build_file = gyp.common.BuildFile(target) dependencies = spec.get('dependencies') if not dependencies: target_node.dependencies = [root_node] root_node.dependents.append(target_node) else: for dependency in dependencies: dependency_node = dependency_nodes.get(dependency) if not dependency_node: raise GypError("Dependency '%s' not found while " "trying to load target %s" % (dependency, target)) target_node.dependencies.append(dependency_node) dependency_node.dependents.append(target_node) flat_list = root_node.FlattenToList() # If there's anything left unvisited, there must be a circular dependency # (cycle). if len(flat_list) != len(targets): if not root_node.dependents: # If all targets have dependencies, add the first target as a dependent # of root_node so that the cycle can be discovered from root_node. target = targets.keys()[0] target_node = dependency_nodes[target] target_node.dependencies.append(root_node) root_node.dependents.append(target_node) cycles = [] for cycle in root_node.FindCycles(): paths = [node.ref for node in cycle] cycles.append('Cycle: %s' % ' -> '.join(paths)) raise DependencyGraphNode.CircularException( 'Cycles in dependency graph detected:\n' + '\n'.join(cycles)) return [dependency_nodes, flat_list] def VerifyNoGYPFileCircularDependencies(targets): # Create a DependencyGraphNode for each gyp file containing a target. Put # it into a dict for easy access. dependency_nodes = {} for target in targets.iterkeys(): build_file = gyp.common.BuildFile(target) if not build_file in dependency_nodes: dependency_nodes[build_file] = DependencyGraphNode(build_file) # Set up the dependency links. for target, spec in targets.iteritems(): build_file = gyp.common.BuildFile(target) build_file_node = dependency_nodes[build_file] target_dependencies = spec.get('dependencies', []) for dependency in target_dependencies: try: dependency_build_file = gyp.common.BuildFile(dependency) except GypError, e: gyp.common.ExceptionAppend( e, 'while computing dependencies of .gyp file %s' % build_file) raise if dependency_build_file == build_file: # A .gyp file is allowed to refer back to itself. continue dependency_node = dependency_nodes.get(dependency_build_file) if not dependency_node: raise GypError("Dependancy '%s' not found" % dependency_build_file) if dependency_node not in build_file_node.dependencies: build_file_node.dependencies.append(dependency_node) dependency_node.dependents.append(build_file_node) # Files that have no dependencies are treated as dependent on root_node. root_node = DependencyGraphNode(None) for build_file_node in dependency_nodes.itervalues(): if len(build_file_node.dependencies) == 0: build_file_node.dependencies.append(root_node) root_node.dependents.append(build_file_node) flat_list = root_node.FlattenToList() # If there's anything left unvisited, there must be a circular dependency # (cycle). if len(flat_list) != len(dependency_nodes): if not root_node.dependents: # If all files have dependencies, add the first file as a dependent # of root_node so that the cycle can be discovered from root_node. file_node = dependency_nodes.values()[0] file_node.dependencies.append(root_node) root_node.dependents.append(file_node) cycles = [] for cycle in root_node.FindCycles(): paths = [node.ref for node in cycle] cycles.append('Cycle: %s' % ' -> '.join(paths)) raise DependencyGraphNode.CircularException( 'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles)) def DoDependentSettings(key, flat_list, targets, dependency_nodes): # key should be one of all_dependent_settings, direct_dependent_settings, # or link_settings. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) if key == 'all_dependent_settings': dependencies = dependency_nodes[target].DeepDependencies() elif key == 'direct_dependent_settings': dependencies = \ dependency_nodes[target].DirectAndImportedDependencies(targets) elif key == 'link_settings': dependencies = \ dependency_nodes[target].DependenciesForLinkSettings(targets) else: raise GypError("DoDependentSettings doesn't know how to determine " 'dependencies for ' + key) for dependency in dependencies: dependency_dict = targets[dependency] if not key in dependency_dict: continue dependency_build_file = gyp.common.BuildFile(dependency) MergeDicts(target_dict, dependency_dict[key], build_file, dependency_build_file) def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes, sort_dependencies): # Recompute target "dependencies" properties. For each static library # target, remove "dependencies" entries referring to other static libraries, # unless the dependency has the "hard_dependency" attribute set. For each # linkable target, add a "dependencies" entry referring to all of the # target's computed list of link dependencies (including static libraries # if no such entry is already present. for target in flat_list: target_dict = targets[target] target_type = target_dict['type'] if target_type == 'static_library': if not 'dependencies' in target_dict: continue target_dict['dependencies_original'] = target_dict.get( 'dependencies', [])[:] # A static library should not depend on another static library unless # the dependency relationship is "hard," which should only be done when # a dependent relies on some side effect other than just the build # product, like a rule or action output. Further, if a target has a # non-hard dependency, but that dependency exports a hard dependency, # the non-hard dependency can safely be removed, but the exported hard # dependency must be added to the target to keep the same dependency # ordering. dependencies = \ dependency_nodes[target].DirectAndImportedDependencies(targets) index = 0 while index < len(dependencies): dependency = dependencies[index] dependency_dict = targets[dependency] # Remove every non-hard static library dependency and remove every # non-static library dependency that isn't a direct dependency. if (dependency_dict['type'] == 'static_library' and \ not dependency_dict.get('hard_dependency', False)) or \ (dependency_dict['type'] != 'static_library' and \ not dependency in target_dict['dependencies']): # Take the dependency out of the list, and don't increment index # because the next dependency to analyze will shift into the index # formerly occupied by the one being removed. del dependencies[index] else: index = index + 1 # Update the dependencies. If the dependencies list is empty, it's not # needed, so unhook it. if len(dependencies) > 0: target_dict['dependencies'] = dependencies else: del target_dict['dependencies'] elif target_type in linkable_types: # Get a list of dependency targets that should be linked into this # target. Add them to the dependencies list if they're not already # present. link_dependencies = \ dependency_nodes[target].DependenciesToLinkAgainst(targets) for dependency in link_dependencies: if dependency == target: continue if not 'dependencies' in target_dict: target_dict['dependencies'] = [] if not dependency in target_dict['dependencies']: target_dict['dependencies'].append(dependency) # Sort the dependencies list in the order from dependents to dependencies. # e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D. # Note: flat_list is already sorted in the order from dependencies to # dependents. if sort_dependencies and 'dependencies' in target_dict: target_dict['dependencies'] = [dep for dep in reversed(flat_list) if dep in target_dict['dependencies']] # Initialize this here to speed up MakePathRelative. exception_re = re.compile(r'''["']?[-/$<>^]''') def MakePathRelative(to_file, fro_file, item): # If item is a relative path, it's relative to the build file dict that it's # coming from. Fix it up to make it relative to the build file dict that # it's going into. # Exception: any |item| that begins with these special characters is # returned without modification. # / Used when a path is already absolute (shortcut optimization; # such paths would be returned as absolute anyway) # $ Used for build environment variables # - Used for some build environment flags (such as -lapr-1 in a # "libraries" section) # < Used for our own variable and command expansions (see ExpandVariables) # > Used for our own variable and command expansions (see ExpandVariables) # ^ Used for our own variable and command expansions (see ExpandVariables) # # "/' Used when a value is quoted. If these are present, then we # check the second character instead. # if to_file == fro_file or exception_re.match(item): return item else: # TODO(dglazkov) The backslash/forward-slash replacement at the end is a # temporary measure. This should really be addressed by keeping all paths # in POSIX until actual project generation. ret = os.path.normpath(os.path.join( gyp.common.RelativePath(os.path.dirname(fro_file), os.path.dirname(to_file)), item)).replace('\\', '/') if item[-1] == '/': ret += '/' return ret def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True): # Python documentation recommends objects which do not support hash # set this value to None. Python library objects follow this rule. is_hashable = lambda val: val.__hash__ # If x is hashable, returns whether x is in s. Else returns whether x is in l. def is_in_set_or_list(x, s, l): if is_hashable(x): return x in s return x in l prepend_index = 0 # Make membership testing of hashables in |to| (in particular, strings) # faster. hashable_to_set = set(x for x in to if is_hashable(x)) for item in fro: singleton = False if type(item) in (str, int): # The cheap and easy case. if is_paths: to_item = MakePathRelative(to_file, fro_file, item) else: to_item = item if not (type(item) is str and item.startswith('-')): # Any string that doesn't begin with a "-" is a singleton - it can # only appear once in a list, to be enforced by the list merge append # or prepend. singleton = True elif type(item) is dict: # Make a copy of the dictionary, continuing to look for paths to fix. # The other intelligent aspects of merge processing won't apply because # item is being merged into an empty dict. to_item = {} MergeDicts(to_item, item, to_file, fro_file) elif type(item) is list: # Recurse, making a copy of the list. If the list contains any # descendant dicts, path fixing will occur. Note that here, custom # values for is_paths and append are dropped; those are only to be # applied to |to| and |fro|, not sublists of |fro|. append shouldn't # matter anyway because the new |to_item| list is empty. to_item = [] MergeLists(to_item, item, to_file, fro_file) else: raise TypeError( 'Attempt to merge list item of unsupported type ' + \ item.__class__.__name__) if append: # If appending a singleton that's already in the list, don't append. # This ensures that the earliest occurrence of the item will stay put. if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to): to.append(to_item) if is_hashable(to_item): hashable_to_set.add(to_item) else: # If prepending a singleton that's already in the list, remove the # existing instance and proceed with the prepend. This ensures that the # item appears at the earliest possible position in the list. while singleton and to_item in to: to.remove(to_item) # Don't just insert everything at index 0. That would prepend the new # items to the list in reverse order, which would be an unwelcome # surprise. to.insert(prepend_index, to_item) if is_hashable(to_item): hashable_to_set.add(to_item) prepend_index = prepend_index + 1 def MergeDicts(to, fro, to_file, fro_file): # I wanted to name the parameter "from" but it's a Python keyword... for k, v in fro.iteritems(): # It would be nice to do "if not k in to: to[k] = v" but that wouldn't give # copy semantics. Something else may want to merge from the |fro| dict # later, and having the same dict ref pointed to twice in the tree isn't # what anyone wants considering that the dicts may subsequently be # modified. if k in to: bad_merge = False if type(v) in (str, int): if type(to[k]) not in (str, int): bad_merge = True elif type(v) is not type(to[k]): bad_merge = True if bad_merge: raise TypeError( 'Attempt to merge dict value of type ' + v.__class__.__name__ + \ ' into incompatible type ' + to[k].__class__.__name__ + \ ' for key ' + k) if type(v) in (str, int): # Overwrite the existing value, if any. Cheap and easy. is_path = IsPathSection(k) if is_path: to[k] = MakePathRelative(to_file, fro_file, v) else: to[k] = v elif type(v) is dict: # Recurse, guaranteeing copies will be made of objects that require it. if not k in to: to[k] = {} MergeDicts(to[k], v, to_file, fro_file) elif type(v) is list: # Lists in dicts can be merged with different policies, depending on # how the key in the "from" dict (k, the from-key) is written. # # If the from-key has ...the to-list will have this action # this character appended:... applied when receiving the from-list: # = replace # + prepend # ? set, only if to-list does not yet exist # (none) append # # This logic is list-specific, but since it relies on the associated # dict key, it's checked in this dict-oriented function. ext = k[-1] append = True if ext == '=': list_base = k[:-1] lists_incompatible = [list_base, list_base + '?'] to[list_base] = [] elif ext == '+': list_base = k[:-1] lists_incompatible = [list_base + '=', list_base + '?'] append = False elif ext == '?': list_base = k[:-1] lists_incompatible = [list_base, list_base + '=', list_base + '+'] else: list_base = k lists_incompatible = [list_base + '=', list_base + '?'] # Some combinations of merge policies appearing together are meaningless. # It's stupid to replace and append simultaneously, for example. Append # and prepend are the only policies that can coexist. for list_incompatible in lists_incompatible: if list_incompatible in fro: raise GypError('Incompatible list policies ' + k + ' and ' + list_incompatible) if list_base in to: if ext == '?': # If the key ends in "?", the list will only be merged if it doesn't # already exist. continue elif type(to[list_base]) is not list: # This may not have been checked above if merging in a list with an # extension character. raise TypeError( 'Attempt to merge dict value of type ' + v.__class__.__name__ + \ ' into incompatible type ' + to[list_base].__class__.__name__ + \ ' for key ' + list_base + '(' + k + ')') else: to[list_base] = [] # Call MergeLists, which will make copies of objects that require it. # MergeLists can recurse back into MergeDicts, although this will be # to make copies of dicts (with paths fixed), there will be no # subsequent dict "merging" once entering a list because lists are # always replaced, appended to, or prepended to. is_paths = IsPathSection(list_base) MergeLists(to[list_base], v, to_file, fro_file, is_paths, append) else: raise TypeError( 'Attempt to merge dict value of unsupported type ' + \ v.__class__.__name__ + ' for key ' + k) def MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, configuration, visited): # Skip if previously visted. if configuration in visited: return # Look at this configuration. configuration_dict = target_dict['configurations'][configuration] # Merge in parents. for parent in configuration_dict.get('inherit_from', []): MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, parent, visited + [configuration]) # Merge it into the new config. MergeDicts(new_configuration_dict, configuration_dict, build_file, build_file) # Drop abstract. if 'abstract' in new_configuration_dict: del new_configuration_dict['abstract'] def SetUpConfigurations(target, target_dict): # key_suffixes is a list of key suffixes that might appear on key names. # These suffixes are handled in conditional evaluations (for =, +, and ?) # and rules/exclude processing (for ! and /). Keys with these suffixes # should be treated the same as keys without. key_suffixes = ['=', '+', '?', '!', '/'] build_file = gyp.common.BuildFile(target) # Provide a single configuration by default if none exists. # TODO(mark): Signal an error if default_configurations exists but # configurations does not. if not 'configurations' in target_dict: target_dict['configurations'] = {'Default': {}} if not 'default_configuration' in target_dict: concrete = [i for (i, config) in target_dict['configurations'].iteritems() if not config.get('abstract')] target_dict['default_configuration'] = sorted(concrete)[0] merged_configurations = {} configs = target_dict['configurations'] for (configuration, old_configuration_dict) in configs.iteritems(): # Skip abstract configurations (saves work only). if old_configuration_dict.get('abstract'): continue # Configurations inherit (most) settings from the enclosing target scope. # Get the inheritance relationship right by making a copy of the target # dict. new_configuration_dict = {} for (key, target_val) in target_dict.iteritems(): key_ext = key[-1:] if key_ext in key_suffixes: key_base = key[:-1] else: key_base = key if not key_base in non_configuration_keys: new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val) # Merge in configuration (with all its parents first). MergeConfigWithInheritance(new_configuration_dict, build_file, target_dict, configuration, []) merged_configurations[configuration] = new_configuration_dict # Put the new configurations back into the target dict as a configuration. for configuration in merged_configurations.keys(): target_dict['configurations'][configuration] = ( merged_configurations[configuration]) # Now drop all the abstract ones. for configuration in target_dict['configurations'].keys(): old_configuration_dict = target_dict['configurations'][configuration] if old_configuration_dict.get('abstract'): del target_dict['configurations'][configuration] # Now that all of the target's configurations have been built, go through # the target dict's keys and remove everything that's been moved into a # "configurations" section. delete_keys = [] for key in target_dict: key_ext = key[-1:] if key_ext in key_suffixes: key_base = key[:-1] else: key_base = key if not key_base in non_configuration_keys: delete_keys.append(key) for key in delete_keys: del target_dict[key] # Check the configurations to see if they contain invalid keys. for configuration in target_dict['configurations'].keys(): configuration_dict = target_dict['configurations'][configuration] for key in configuration_dict.keys(): if key in invalid_configuration_keys: raise GypError('%s not allowed in the %s configuration, found in ' 'target %s' % (key, configuration, target)) def ProcessListFiltersInDict(name, the_dict): """Process regular expression and exclusion-based filters on lists. An exclusion list is in a dict key named with a trailing "!", like "sources!". Every item in such a list is removed from the associated main list, which in this example, would be "sources". Removed items are placed into a "sources_excluded" list in the dict. Regular expression (regex) filters are contained in dict keys named with a trailing "/", such as "sources/" to operate on the "sources" list. Regex filters in a dict take the form: 'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'], ['include', '_mac\\.cc$'] ], The first filter says to exclude all files ending in _linux.cc, _mac.cc, and _win.cc. The second filter then includes all files ending in _mac.cc that are now or were once in the "sources" list. Items matching an "exclude" filter are subject to the same processing as would occur if they were listed by name in an exclusion list (ending in "!"). Items matching an "include" filter are brought back into the main list if previously excluded by an exclusion list or exclusion regex filter. Subsequent matching "exclude" patterns can still cause items to be excluded after matching an "include". """ # Look through the dictionary for any lists whose keys end in "!" or "/". # These are lists that will be treated as exclude lists and regular # expression-based exclude/include lists. Collect the lists that are # needed first, looking for the lists that they operate on, and assemble # then into |lists|. This is done in a separate loop up front, because # the _included and _excluded keys need to be added to the_dict, and that # can't be done while iterating through it. lists = [] del_lists = [] for key, value in the_dict.iteritems(): operation = key[-1] if operation != '!' and operation != '/': continue if type(value) is not list: raise ValueError(name + ' key ' + key + ' must be list, not ' + \ value.__class__.__name__) list_key = key[:-1] if list_key not in the_dict: # This happens when there's a list like "sources!" but no corresponding # "sources" list. Since there's nothing for it to operate on, queue up # the "sources!" list for deletion now. del_lists.append(key) continue if type(the_dict[list_key]) is not list: value = the_dict[list_key] raise ValueError(name + ' key ' + list_key + \ ' must be list, not ' + \ value.__class__.__name__ + ' when applying ' + \ {'!': 'exclusion', '/': 'regex'}[operation]) if not list_key in lists: lists.append(list_key) # Delete the lists that are known to be unneeded at this point. for del_list in del_lists: del the_dict[del_list] for list_key in lists: the_list = the_dict[list_key] # Initialize the list_actions list, which is parallel to the_list. Each # item in list_actions identifies whether the corresponding item in # the_list should be excluded, unconditionally preserved (included), or # whether no exclusion or inclusion has been applied. Items for which # no exclusion or inclusion has been applied (yet) have value -1, items # excluded have value 0, and items included have value 1. Includes and # excludes override previous actions. All items in list_actions are # initialized to -1 because no excludes or includes have been processed # yet. list_actions = list((-1,) * len(the_list)) exclude_key = list_key + '!' if exclude_key in the_dict: for exclude_item in the_dict[exclude_key]: for index in xrange(0, len(the_list)): if exclude_item == the_list[index]: # This item matches the exclude_item, so set its action to 0 # (exclude). list_actions[index] = 0 # The "whatever!" list is no longer needed, dump it. del the_dict[exclude_key] regex_key = list_key + '/' if regex_key in the_dict: for regex_item in the_dict[regex_key]: [action, pattern] = regex_item pattern_re = re.compile(pattern) if action == 'exclude': # This item matches an exclude regex, so set its value to 0 (exclude). action_value = 0 elif action == 'include': # This item matches an include regex, so set its value to 1 (include). action_value = 1 else: # This is an action that doesn't make any sense. raise ValueError('Unrecognized action ' + action + ' in ' + name + \ ' key ' + regex_key) for index in xrange(0, len(the_list)): list_item = the_list[index] if list_actions[index] == action_value: # Even if the regex matches, nothing will change so continue (regex # searches are expensive). continue if pattern_re.search(list_item): # Regular expression match. list_actions[index] = action_value # The "whatever/" list is no longer needed, dump it. del the_dict[regex_key] # Add excluded items to the excluded list. # # Note that exclude_key ("sources!") is different from excluded_key # ("sources_excluded"). The exclude_key list is input and it was already # processed and deleted; the excluded_key list is output and it's about # to be created. excluded_key = list_key + '_excluded' if excluded_key in the_dict: raise GypError(name + ' key ' + excluded_key + ' must not be present prior ' ' to applying exclusion/regex filters for ' + list_key) excluded_list = [] # Go backwards through the list_actions list so that as items are deleted, # the indices of items that haven't been seen yet don't shift. That means # that things need to be prepended to excluded_list to maintain them in the # same order that they existed in the_list. for index in xrange(len(list_actions) - 1, -1, -1): if list_actions[index] == 0: # Dump anything with action 0 (exclude). Keep anything with action 1 # (include) or -1 (no include or exclude seen for the item). excluded_list.insert(0, the_list[index]) del the_list[index] # If anything was excluded, put the excluded list into the_dict at # excluded_key. if len(excluded_list) > 0: the_dict[excluded_key] = excluded_list # Now recurse into subdicts and lists that may contain dicts. for key, value in the_dict.iteritems(): if type(value) is dict: ProcessListFiltersInDict(key, value) elif type(value) is list: ProcessListFiltersInList(key, value) def ProcessListFiltersInList(name, the_list): for item in the_list: if type(item) is dict: ProcessListFiltersInDict(name, item) elif type(item) is list: ProcessListFiltersInList(name, item) def ValidateTargetType(target, target_dict): """Ensures the 'type' field on the target is one of the known types. Arguments: target: string, name of target. target_dict: dict, target spec. Raises an exception on error. """ VALID_TARGET_TYPES = ('executable', 'loadable_module', 'static_library', 'shared_library', 'mac_kernel_extension', 'none') target_type = target_dict.get('type', None) if target_type not in VALID_TARGET_TYPES: raise GypError("Target %s has an invalid target type '%s'. " "Must be one of %s." % (target, target_type, '/'.join(VALID_TARGET_TYPES))) if (target_dict.get('standalone_static_library', 0) and not target_type == 'static_library'): raise GypError('Target %s has type %s but standalone_static_library flag is' ' only valid for static_library type.' % (target, target_type)) def ValidateSourcesInTarget(target, target_dict, build_file, duplicate_basename_check): if not duplicate_basename_check: return if target_dict.get('type', None) != 'static_library': return sources = target_dict.get('sources', []) basenames = {} for source in sources: name, ext = os.path.splitext(source) is_compiled_file = ext in [ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] if not is_compiled_file: continue basename = os.path.basename(name) # Don't include extension. basenames.setdefault(basename, []).append(source) error = '' for basename, files in basenames.iteritems(): if len(files) > 1: error += ' %s: %s\n' % (basename, ' '.join(files)) if error: print('static library %s has several files with the same basename:\n' % target + error + 'libtool on Mac cannot handle that. Use ' '--no-duplicate-basename-check to disable this validation.') raise GypError('Duplicate basenames in sources section, see list above') def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules): """Ensures that the rules sections in target_dict are valid and consistent, and determines which sources they apply to. Arguments: target: string, name of target. target_dict: dict, target spec containing "rules" and "sources" lists. extra_sources_for_rules: a list of keys to scan for rule matches in addition to 'sources'. """ # Dicts to map between values found in rules' 'rule_name' and 'extension' # keys and the rule dicts themselves. rule_names = {} rule_extensions = {} rules = target_dict.get('rules', []) for rule in rules: # Make sure that there's no conflict among rule names and extensions. rule_name = rule['rule_name'] if rule_name in rule_names: raise GypError('rule %s exists in duplicate, target %s' % (rule_name, target)) rule_names[rule_name] = rule rule_extension = rule['extension'] if rule_extension.startswith('.'): rule_extension = rule_extension[1:] if rule_extension in rule_extensions: raise GypError(('extension %s associated with multiple rules, ' + 'target %s rules %s and %s') % (rule_extension, target, rule_extensions[rule_extension]['rule_name'], rule_name)) rule_extensions[rule_extension] = rule # Make sure rule_sources isn't already there. It's going to be # created below if needed. if 'rule_sources' in rule: raise GypError( 'rule_sources must not exist in input, target %s rule %s' % (target, rule_name)) rule_sources = [] source_keys = ['sources'] source_keys.extend(extra_sources_for_rules) for source_key in source_keys: for source in target_dict.get(source_key, []): (source_root, source_extension) = os.path.splitext(source) if source_extension.startswith('.'): source_extension = source_extension[1:] if source_extension == rule_extension: rule_sources.append(source) if len(rule_sources) > 0: rule['rule_sources'] = rule_sources def ValidateRunAsInTarget(target, target_dict, build_file): target_name = target_dict.get('target_name') run_as = target_dict.get('run_as') if not run_as: return if type(run_as) is not dict: raise GypError("The 'run_as' in target %s from file %s should be a " "dictionary." % (target_name, build_file)) action = run_as.get('action') if not action: raise GypError("The 'run_as' in target %s from file %s must have an " "'action' section." % (target_name, build_file)) if type(action) is not list: raise GypError("The 'action' for 'run_as' in target %s from file %s " "must be a list." % (target_name, build_file)) working_directory = run_as.get('working_directory') if working_directory and type(working_directory) is not str: raise GypError("The 'working_directory' for 'run_as' in target %s " "in file %s should be a string." % (target_name, build_file)) environment = run_as.get('environment') if environment and type(environment) is not dict: raise GypError("The 'environment' for 'run_as' in target %s " "in file %s should be a dictionary." % (target_name, build_file)) def ValidateActionsInTarget(target, target_dict, build_file): '''Validates the inputs to the actions in a target.''' target_name = target_dict.get('target_name') actions = target_dict.get('actions', []) for action in actions: action_name = action.get('action_name') if not action_name: raise GypError("Anonymous action in target %s. " "An action must have an 'action_name' field." % target_name) inputs = action.get('inputs', None) if inputs is None: raise GypError('Action in target %s has no inputs.' % target_name) action_command = action.get('action') if action_command and not action_command[0]: raise GypError("Empty action as command in target %s." % target_name) def TurnIntIntoStrInDict(the_dict): """Given dict the_dict, recursively converts all integers into strings. """ # Use items instead of iteritems because there's no need to try to look at # reinserted keys and their associated values. for k, v in the_dict.items(): if type(v) is int: v = str(v) the_dict[k] = v elif type(v) is dict: TurnIntIntoStrInDict(v) elif type(v) is list: TurnIntIntoStrInList(v) if type(k) is int: del the_dict[k] the_dict[str(k)] = v def TurnIntIntoStrInList(the_list): """Given list the_list, recursively converts all integers into strings. """ for index in xrange(0, len(the_list)): item = the_list[index] if type(item) is int: the_list[index] = str(item) elif type(item) is dict: TurnIntIntoStrInDict(item) elif type(item) is list: TurnIntIntoStrInList(item) def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets, data): """Return only the targets that are deep dependencies of |root_targets|.""" qualified_root_targets = [] for target in root_targets: target = target.strip() qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list) if not qualified_targets: raise GypError("Could not find target %s" % target) qualified_root_targets.extend(qualified_targets) wanted_targets = {} for target in qualified_root_targets: wanted_targets[target] = targets[target] for dependency in dependency_nodes[target].DeepDependencies(): wanted_targets[dependency] = targets[dependency] wanted_flat_list = [t for t in flat_list if t in wanted_targets] # Prune unwanted targets from each build_file's data dict. for build_file in data['target_build_files']: if not 'targets' in data[build_file]: continue new_targets = [] for target in data[build_file]['targets']: qualified_name = gyp.common.QualifiedTarget(build_file, target['target_name'], target['toolset']) if qualified_name in wanted_targets: new_targets.append(target) data[build_file]['targets'] = new_targets return wanted_targets, wanted_flat_list def VerifyNoCollidingTargets(targets): """Verify that no two targets in the same directory share the same name. Arguments: targets: A list of targets in the form 'path/to/file.gyp:target_name'. """ # Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'. used = {} for target in targets: # Separate out 'path/to/file.gyp, 'target_name' from # 'path/to/file.gyp:target_name'. path, name = target.rsplit(':', 1) # Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'. subdir, gyp = os.path.split(path) # Use '.' for the current directory '', so that the error messages make # more sense. if not subdir: subdir = '.' # Prepare a key like 'path/to:target_name'. key = subdir + ':' + name if key in used: # Complain if this target is already used. raise GypError('Duplicate target name "%s" in directory "%s" used both ' 'in "%s" and "%s".' % (name, subdir, gyp, used[key])) used[key] = gyp def SetGeneratorGlobals(generator_input_info): # Set up path_sections and non_configuration_keys with the default data plus # the generator-specific data. global path_sections path_sections = set(base_path_sections) path_sections.update(generator_input_info['path_sections']) global non_configuration_keys non_configuration_keys = base_non_configuration_keys[:] non_configuration_keys.extend(generator_input_info['non_configuration_keys']) global multiple_toolsets multiple_toolsets = generator_input_info[ 'generator_supports_multiple_toolsets'] global generator_filelist_paths generator_filelist_paths = generator_input_info['generator_filelist_paths'] def Load(build_files, variables, includes, depth, generator_input_info, check, circular_check, duplicate_basename_check, parallel, root_targets): SetGeneratorGlobals(generator_input_info) # A generator can have other lists (in addition to sources) be processed # for rules. extra_sources_for_rules = generator_input_info['extra_sources_for_rules'] # Load build files. This loads every target-containing build file into # the |data| dictionary such that the keys to |data| are build file names, # and the values are the entire build file contents after "early" or "pre" # processing has been done and includes have been resolved. # NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as # well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps # track of the keys corresponding to "target" files. data = {'target_build_files': set()} # Normalize paths everywhere. This is important because paths will be # used as keys to the data dict and for references between input files. build_files = set(map(os.path.normpath, build_files)) if parallel: LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth, check, generator_input_info) else: aux_data = {} for build_file in build_files: try: LoadTargetBuildFile(build_file, data, aux_data, variables, includes, depth, check, True) except Exception, e: gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file) raise # Build a dict to access each target's subdict by qualified name. targets = BuildTargetsDict(data) # Fully qualify all dependency links. QualifyDependencies(targets) # Remove self-dependencies from targets that have 'prune_self_dependencies' # set to 1. RemoveSelfDependencies(targets) # Expand dependencies specified as build_file:*. ExpandWildcardDependencies(targets, data) # Remove all dependencies marked as 'link_dependency' from the targets of # type 'none'. RemoveLinkDependenciesFromNoneTargets(targets) # Apply exclude (!) and regex (/) list filters only for dependency_sections. for target_name, target_dict in targets.iteritems(): tmp_dict = {} for key_base in dependency_sections: for op in ('', '!', '/'): key = key_base + op if key in target_dict: tmp_dict[key] = target_dict[key] del target_dict[key] ProcessListFiltersInDict(target_name, tmp_dict) # Write the results back to |target_dict|. for key in tmp_dict: target_dict[key] = tmp_dict[key] # Make sure every dependency appears at most once. RemoveDuplicateDependencies(targets) if circular_check: # Make sure that any targets in a.gyp don't contain dependencies in other # .gyp files that further depend on a.gyp. VerifyNoGYPFileCircularDependencies(targets) [dependency_nodes, flat_list] = BuildDependencyList(targets) if root_targets: # Remove, from |targets| and |flat_list|, the targets that are not deep # dependencies of the targets specified in |root_targets|. targets, flat_list = PruneUnwantedTargets( targets, flat_list, dependency_nodes, root_targets, data) # Check that no two targets in the same directory have the same name. VerifyNoCollidingTargets(flat_list) # Handle dependent settings of various types. for settings_type in ['all_dependent_settings', 'direct_dependent_settings', 'link_settings']: DoDependentSettings(settings_type, flat_list, targets, dependency_nodes) # Take out the dependent settings now that they've been published to all # of the targets that require them. for target in flat_list: if settings_type in targets[target]: del targets[target][settings_type] # Make sure static libraries don't declare dependencies on other static # libraries, but that linkables depend on all unlinked static libraries # that they need so that their link steps will be correct. gii = generator_input_info if gii['generator_wants_static_library_dependencies_adjusted']: AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes, gii['generator_wants_sorted_dependencies']) # Apply "post"/"late"/"target" variable expansions and condition evaluations. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ProcessVariablesAndConditionsInDict( target_dict, PHASE_LATE, variables, build_file) # Move everything that can go into a "configurations" section into one. for target in flat_list: target_dict = targets[target] SetUpConfigurations(target, target_dict) # Apply exclude (!) and regex (/) list filters. for target in flat_list: target_dict = targets[target] ProcessListFiltersInDict(target, target_dict) # Apply "latelate" variable expansions and condition evaluations. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ProcessVariablesAndConditionsInDict( target_dict, PHASE_LATELATE, variables, build_file) # Make sure that the rules make sense, and build up rule_sources lists as # needed. Not all generators will need to use the rule_sources lists, but # some may, and it seems best to build the list in a common spot. # Also validate actions and run_as elements in targets. for target in flat_list: target_dict = targets[target] build_file = gyp.common.BuildFile(target) ValidateTargetType(target, target_dict) ValidateSourcesInTarget(target, target_dict, build_file, duplicate_basename_check) ValidateRulesInTarget(target, target_dict, extra_sources_for_rules) ValidateRunAsInTarget(target, target_dict, build_file) ValidateActionsInTarget(target, target_dict, build_file) # Generators might not expect ints. Turn them into strs. TurnIntIntoStrInDict(data) # TODO(mark): Return |data| for now because the generator needs a list of # build files that came in. In the future, maybe it should just accept # a list, and not the whole data dict. return [flat_list, targets, data]
mit
ybellavance/python-for-android
python-build/python-libs/gdata/build/lib/gdata/blogger/__init__.py
140
6426
#!/usr/bin/python # # Copyright (C) 2007, 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains extensions to Atom objects used with Blogger.""" __author__ = 'api.jscudder (Jeffrey Scudder)' import atom import gdata import re LABEL_SCHEME = 'http://www.blogger.com/atom/ns#' THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0' class BloggerEntry(gdata.GDataEntry): """Adds convenience methods inherited by all Blogger entries.""" blog_name_pattern = re.compile('(http://)(\w*)') blog_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)') blog_id2_pattern = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)') def GetBlogId(self): """Extracts the Blogger id of this blog. This method is useful when contructing URLs by hand. The blog id is often used in blogger operation URLs. This should not be confused with the id member of a BloggerBlog. The id element is the Atom id XML element. The blog id which this method returns is a part of the Atom id. Returns: The blog's unique id as a string. """ if self.id.text: match = self.blog_id_pattern.match(self.id.text) if match: return match.group(2) else: return self.blog_id2_pattern.match(self.id.text).group(2) return None def GetBlogName(self): """Finds the name of this blog as used in the 'alternate' URL. An alternate URL is in the form 'http://blogName.blogspot.com/'. For an entry representing the above example, this method would return 'blogName'. Returns: The blog's URL name component as a string. """ for link in self.link: if link.rel == 'alternate': return self.blog_name_pattern.match(link.href).group(2) return None class BlogEntry(BloggerEntry): """Describes a blog entry in the feed listing a user's blogs.""" def BlogEntryFromString(xml_string): return atom.CreateClassFromXMLString(BlogEntry, xml_string) class BlogFeed(gdata.GDataFeed): """Describes a feed of a user's blogs.""" _children = gdata.GDataFeed._children.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogEntry]) def BlogFeedFromString(xml_string): return atom.CreateClassFromXMLString(BlogFeed, xml_string) class BlogPostEntry(BloggerEntry): """Describes a blog post entry in the feed of a blog's posts.""" post_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)') def AddLabel(self, label): """Adds a label to the blog post. The label is represented by an Atom category element, so this method is shorthand for appending a new atom.Category object. Args: label: str """ self.category.append(atom.Category(scheme=LABEL_SCHEME, term=label)) def GetPostId(self): """Extracts the postID string from the entry's Atom id. Returns: A string of digits which identify this post within the blog. """ if self.id.text: return self.post_id_pattern.match(self.id.text).group(4) return None def BlogPostEntryFromString(xml_string): return atom.CreateClassFromXMLString(BlogPostEntry, xml_string) class BlogPostFeed(gdata.GDataFeed): """Describes a feed of a blog's posts.""" _children = gdata.GDataFeed._children.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogPostEntry]) def BlogPostFeedFromString(xml_string): return atom.CreateClassFromXMLString(BlogPostFeed, xml_string) class InReplyTo(atom.AtomBase): _tag = 'in-reply-to' _namespace = THR_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['href'] = 'href' _attributes['ref'] = 'ref' _attributes['source'] = 'source' _attributes['type'] = 'type' def __init__(self, href=None, ref=None, source=None, type=None, extension_elements=None, extension_attributes=None, text=None): self.href = href self.ref = ref self.source = source self.type = type self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} self.text = text def InReplyToFromString(xml_string): return atom.CreateClassFromXMLString(InReplyTo, xml_string) class CommentEntry(BloggerEntry): """Describes a blog post comment entry in the feed of a blog post's comments.""" _children = BloggerEntry._children.copy() _children['{%s}in-reply-to' % THR_NAMESPACE] = ('in_reply_to', InReplyTo) comment_id_pattern = re.compile('.*-(\w*)$') def __init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, in_reply_to=None, extension_elements=None, extension_attributes=None, text=None): BloggerEntry.__init__(self, author=author, category=category, content=content, contributor=contributor, atom_id=atom_id, link=link, published=published, rights=rights, source=source, summary=summary, control=control, title=title, updated=updated, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) self.in_reply_to = in_reply_to def GetCommentId(self): """Extracts the commentID string from the entry's Atom id. Returns: A string of digits which identify this post within the blog. """ if self.id.text: return self.comment_id_pattern.match(self.id.text).group(1) return None def CommentEntryFromString(xml_string): return atom.CreateClassFromXMLString(CommentEntry, xml_string) class CommentFeed(gdata.GDataFeed): """Describes a feed of a blog post's comments.""" _children = gdata.GDataFeed._children.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CommentEntry]) def CommentFeedFromString(xml_string): return atom.CreateClassFromXMLString(CommentFeed, xml_string)
apache-2.0
mcalhoun/ansible
lib/ansible/plugins/__init__.py
31
15255
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com> # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import glob import imp import inspect import os import os.path import sys from collections import defaultdict from ansible import constants as C try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() # Global so that all instances of a PluginLoader will share the caches MODULE_CACHE = {} PATH_CACHE = {} PLUGIN_PATH_CACHE = {} def get_all_plugin_loaders(): return [(name, obj) for (name, obj) in inspect.getmembers(sys.modules[__name__]) if isinstance(obj, PluginLoader)] class PluginLoader: ''' PluginLoader loads plugins from the configured plugin directories. It searches for plugins by iterating through the combined list of play basedirs, configured paths, and the python path. The first match is used. ''' def __init__(self, class_name, package, config, subdir, aliases={}, required_base_class=None): self.class_name = class_name self.base_class = required_base_class self.package = package self.config = config self.subdir = subdir self.aliases = aliases if not class_name in MODULE_CACHE: MODULE_CACHE[class_name] = {} if not class_name in PATH_CACHE: PATH_CACHE[class_name] = None if not class_name in PLUGIN_PATH_CACHE: PLUGIN_PATH_CACHE[class_name] = defaultdict(dict) self._module_cache = MODULE_CACHE[class_name] self._paths = PATH_CACHE[class_name] self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name] self._extra_dirs = [] self._searched_paths = set() def __setstate__(self, data): ''' Deserializer. ''' class_name = data.get('class_name') package = data.get('package') config = data.get('config') subdir = data.get('subdir') aliases = data.get('aliases') base_class = data.get('base_class') PATH_CACHE[class_name] = data.get('PATH_CACHE') PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE') self.__init__(class_name, package, config, subdir, aliases, base_class) self._extra_dirs = data.get('_extra_dirs', []) self._searched_paths = data.get('_searched_paths', set()) def __getstate__(self): ''' Serializer. ''' return dict( class_name = self.class_name, base_class = self.base_class, package = self.package, config = self.config, subdir = self.subdir, aliases = self.aliases, _extra_dirs = self._extra_dirs, _searched_paths = self._searched_paths, PATH_CACHE = PATH_CACHE[self.class_name], PLUGIN_PATH_CACHE = PLUGIN_PATH_CACHE[self.class_name], ) def print_paths(self): ''' Returns a string suitable for printing of the search path ''' # Uses a list to get the order right ret = [] for i in self._get_paths(): if i not in ret: ret.append(i) return os.pathsep.join(ret) def _all_directories(self, dir): results = [] results.append(dir) for root, subdirs, files in os.walk(dir): if '__init__.py' in files: for x in subdirs: results.append(os.path.join(root,x)) return results def _get_package_paths(self): ''' Gets the path of a Python package ''' paths = [] if not self.package: return [] if not hasattr(self, 'package_path'): m = __import__(self.package) parts = self.package.split('.')[1:] self.package_path = os.path.join(os.path.dirname(m.__file__), *parts) paths.extend(self._all_directories(self.package_path)) return paths def _get_paths(self): ''' Return a list of paths to search for plugins in ''' if self._paths is not None: return self._paths ret = self._extra_dirs[:] # look in any configured plugin paths, allow one level deep for subcategories if self.config is not None: configured_paths = self.config.split(os.pathsep) for path in configured_paths: path = os.path.realpath(os.path.expanduser(path)) contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path) for c in contents: if os.path.isdir(c) and c not in ret: ret.append(c) if path not in ret: ret.append(path) # look for any plugins installed in the package subtree ret.extend(self._get_package_paths()) # HACK: because powershell modules are in the same directory # hierarchy as other modules we have to process them last. This is # because powershell only works on windows but the other modules work # anywhere (possibly including windows if the correct language # interpreter is installed). the non-powershell modules can have any # file extension and thus powershell modules are picked up in that. # The non-hack way to fix this is to have powershell modules be # a different PluginLoader/ModuleLoader. But that requires changing # other things too (known thing to change would be PATHS_CACHE, # PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key # on the class_name and neither regular modules nor powershell modules # would have class_names, they would not work as written. reordered_paths = [] win_dirs = [] for path in ret: if path.endswith('windows'): win_dirs.append(path) else: reordered_paths.append(path) reordered_paths.extend(win_dirs) # cache and return the result self._paths = reordered_paths return reordered_paths def add_directory(self, directory, with_subdir=False): ''' Adds an additional directory to the search path ''' directory = os.path.realpath(directory) if directory is not None: if with_subdir: directory = os.path.join(directory, self.subdir) if directory not in self._extra_dirs: # append the directory and invalidate the path cache self._extra_dirs.append(directory) self._paths = None def find_plugin(self, name, mod_type=''): ''' Find a plugin named name ''' # The particular cache to look for modules within. This matches the # requested mod_type pull_cache = self._plugin_path_cache[mod_type] try: return pull_cache[name] except KeyError: # Cache miss. Now let's find the plugin pass if mod_type: suffix = mod_type elif self.class_name: # Ansible plugins that run in the controller process (most plugins) suffix = '.py' else: # Only Ansible Modules. Ansible modules can be any executable so # they can have any suffix suffix = '' ### FIXME: # Instead of using the self._paths cache (PATH_CACHE) and # self._searched_paths we could use an iterator. Before enabling that # we need to make sure we don't want to add additional directories # (add_directory()) once we start using the iterator. Currently, it # looks like _get_paths() never forces a cache refresh so if we expect # additional directories to be added later, it is buggy. for path in (p for p in self._get_paths() if p not in self._searched_paths and os.path.isdir(p)): try: full_paths = (os.path.join(path, f) for f in os.listdir(path)) except OSError as e: display.warning("Error accessing plugin paths: %s" % str(e)) for full_path in (f for f in full_paths if os.path.isfile(f) and not f.endswith('__init__.py')): full_name = os.path.basename(full_path) # HACK: We have no way of executing python byte # compiled files as ansible modules so specifically exclude them if full_path.endswith(('.pyc', '.pyo')): continue splitname = os.path.splitext(full_name) base_name = splitname[0] try: extension = splitname[1] except IndexError: extension = '' # Module found, now enter it into the caches that match # this file if base_name not in self._plugin_path_cache['']: self._plugin_path_cache[''][base_name] = full_path if full_name not in self._plugin_path_cache['']: self._plugin_path_cache[''][full_name] = full_path if base_name not in self._plugin_path_cache[extension]: self._plugin_path_cache[extension][base_name] = full_path if full_name not in self._plugin_path_cache[extension]: self._plugin_path_cache[extension][full_name] = full_path self._searched_paths.add(path) try: return pull_cache[name] except KeyError: # Didn't find the plugin in this directory. Load modules from # the next one pass # if nothing is found, try finding alias/deprecated if not name.startswith('_'): alias_name = '_' + name # We've already cached all the paths at this point if alias_name in pull_cache: if not os.path.islink(pull_cache[alias_name]): display.deprecated('%s is kept for backwards compatibility ' 'but usage is discouraged. The module ' 'documentation details page may explain ' 'more about this rationale.' % name.lstrip('_')) return pull_cache[alias_name] return None def has_plugin(self, name): ''' Checks if a plugin named name exists ''' return self.find_plugin(name) is not None __contains__ = has_plugin def get(self, name, *args, **kwargs): ''' instantiates a plugin of the given name using arguments ''' if name in self.aliases: name = self.aliases[name] path = self.find_plugin(name) if path is None: return None if path not in self._module_cache: self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) if kwargs.get('class_only', False): obj = getattr(self._module_cache[path], self.class_name) else: obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]: return None return obj def all(self, *args, **kwargs): ''' instantiates all plugins with the same arguments ''' for i in self._get_paths(): matches = glob.glob(os.path.join(i, "*.py")) matches.sort() for path in matches: name, ext = os.path.splitext(os.path.basename(path)) if name.startswith("_"): continue if path not in self._module_cache: self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) if kwargs.get('class_only', False): obj = getattr(self._module_cache[path], self.class_name) else: obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]: continue # set extra info on the module, in case we want it later setattr(obj, '_original_path', path) yield obj action_loader = PluginLoader( 'ActionModule', 'ansible.plugins.action', C.DEFAULT_ACTION_PLUGIN_PATH, 'action_plugins', required_base_class='ActionBase', ) cache_loader = PluginLoader( 'CacheModule', 'ansible.plugins.cache', C.DEFAULT_CACHE_PLUGIN_PATH, 'cache_plugins', ) callback_loader = PluginLoader( 'CallbackModule', 'ansible.plugins.callback', C.DEFAULT_CALLBACK_PLUGIN_PATH, 'callback_plugins', ) connection_loader = PluginLoader( 'Connection', 'ansible.plugins.connection', C.DEFAULT_CONNECTION_PLUGIN_PATH, 'connection_plugins', aliases={'paramiko': 'paramiko_ssh'}, required_base_class='ConnectionBase', ) shell_loader = PluginLoader( 'ShellModule', 'ansible.plugins.shell', 'shell_plugins', 'shell_plugins', ) module_loader = PluginLoader( '', 'ansible.modules', C.DEFAULT_MODULE_PATH, 'library', ) lookup_loader = PluginLoader( 'LookupModule', 'ansible.plugins.lookup', C.DEFAULT_LOOKUP_PLUGIN_PATH, 'lookup_plugins', required_base_class='LookupBase', ) vars_loader = PluginLoader( 'VarsModule', 'ansible.plugins.vars', C.DEFAULT_VARS_PLUGIN_PATH, 'vars_plugins', ) filter_loader = PluginLoader( 'FilterModule', 'ansible.plugins.filter', C.DEFAULT_FILTER_PLUGIN_PATH, 'filter_plugins', ) test_loader = PluginLoader( 'TestModule', 'ansible.plugins.test', C.DEFAULT_TEST_PLUGIN_PATH, 'test_plugins' ) fragment_loader = PluginLoader( 'ModuleDocFragment', 'ansible.utils.module_docs_fragments', os.path.join(os.path.dirname(__file__), 'module_docs_fragments'), '', ) strategy_loader = PluginLoader( 'StrategyModule', 'ansible.plugins.strategy', None, 'strategy_plugins', required_base_class='StrategyBase', )
gpl-3.0
floft/rpi-linux
scripts/gdb/linux/dmesg.py
367
2005
# # gdb helper commands and functions for Linux kernel debugging # # kernel log buffer dump # # Copyright (c) Siemens AG, 2011, 2012 # # Authors: # Jan Kiszka <jan.kiszka@siemens.com> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb import string from linux import utils class LxDmesg(gdb.Command): """Print Linux kernel log buffer.""" def __init__(self): super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16) log_first_idx = int(gdb.parse_and_eval("log_first_idx")) log_next_idx = int(gdb.parse_and_eval("log_next_idx")) log_buf_len = int(gdb.parse_and_eval("log_buf_len")) inf = gdb.inferiors()[0] start = log_buf_addr + log_first_idx if log_first_idx < log_next_idx: log_buf_2nd_half = -1 length = log_next_idx - log_first_idx log_buf = inf.read_memory(start, length) else: log_buf_2nd_half = log_buf_len - log_first_idx log_buf = inf.read_memory(start, log_buf_2nd_half) + \ inf.read_memory(log_buf_addr, log_next_idx) pos = 0 while pos < log_buf.__len__(): length = utils.read_u16(log_buf[pos + 8:pos + 10]) if length == 0: if log_buf_2nd_half == -1: gdb.write("Corrupted log buffer!\n") break pos = log_buf_2nd_half continue text_len = utils.read_u16(log_buf[pos + 10:pos + 12]) text = log_buf[pos + 16:pos + 16 + text_len] time_stamp = utils.read_u64(log_buf[pos:pos + 8]) for line in memoryview(text).tobytes().splitlines(): gdb.write("[{time:12.6f}] {line}\n".format( time=time_stamp / 1000000000.0, line=line)) pos += length LxDmesg()
gpl-2.0
openthread/silk
silk/unit_tests/test_otns.py
1
13819
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, List, Tuple import random import time import unittest from otns.cli import OTNS from silk.tools.otns_manager import OtnsManager from silk.unit_tests.mock_device import MockThreadDevBoard from silk.unit_tests.testcase import SilkTestCase class OTNSIntegrationTest(SilkTestCase): """Silk integration test case for OTNS. """ def setUp(self): """Test method set up. """ self.ns = OTNS(otns_args=["-raw", "-real", "-ot-cli", "otns-silk-proxy", "-listen", ":9000", "-log", "debug"]) # wait for OTNS gRPC server to start time.sleep(0.3) self.manager = OtnsManager("localhost", self.logger.getChild("OtnsManager")) self.manager.wait_for_grpc_channel_ready(10) def tearDown(self) -> None: """Test method tear down. """ self.manager.unsubscribe_from_all_nodes() self.manager.remove_all_nodes() self.ns.close() # wait for OTNS gRPC server to stop time.sleep(0.2) def assert_device_positions(self, nodes_info: Dict[int, Dict[str, Any]], expected_coords: Dict[int, Tuple[int, int]]): """Helper method to assert auto layout position devices coordinates. Args: nodes_info (Dict[int, Dict[str, Any]]): nodes info dictionary. expected_coords (Dict[int, Tuple[int, int]]): dict mapping device id to coordinates to check. """ for device_id, coords in expected_coords.items(): self.assertAlmostEqual(nodes_info[device_id]["x"], coords[0], delta=1) self.assertAlmostEqual(nodes_info[device_id]["y"], coords[1], delta=1) def testAddDevice(self): """Test adding device. """ ns = self.ns manager = self.manager device = MockThreadDevBoard(1) manager.add_node(device) ns.go(0.1) self.assertEqual(len(ns.nodes()), 1) def testRemoveDevice(self): """Test removing device. """ ns = self.ns manager = self.manager device = MockThreadDevBoard(1) manager.add_node(device) ns.go(0.1) self.assertEqual(len(ns.nodes()), 1) manager.remove_node(device) ns.go(0.1) self.assertEqual(len(ns.nodes()), 0) def testSetSpeed(self): """Test setting speed display. """ ns = self.ns manager = self.manager speed = random.randint(2, 20) manager.set_replay_speed(speed) self.assertAlmostEqual(ns.speed, speed) speed = random.randint(21, 40) manager.set_replay_speed(speed) self.assertAlmostEqual(ns.speed, speed) def testAddFixedPositionDevices(self): """Test adding fixed position nodes. """ def assert_device_fixed_positions(devices: List[MockThreadDevBoard]): """Helper method to assert fixed position devices coordinates. Args: devices (List[MockThreadDevBoard]): list of devices to check. """ for a_device in devices: self.assertEqual(nodes_info[a_device.id]["x"], a_device.x) self.assertEqual(nodes_info[a_device.id]["y"], a_device.y) ns = self.ns manager = self.manager device_1 = MockThreadDevBoard(random.randint(1, 10)) device_2 = MockThreadDevBoard(random.randint(11, 20)) device_3 = MockThreadDevBoard(random.randint(21, 30)) for device in [device_1, device_2, device_3]: device.device.set_otns_vis_position(random.randint(100, 200), random.randint(100, 200)) manager.add_node(device_1) manager.add_node(device_2) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 2) assert_device_fixed_positions([device_1, device_2]) manager.add_node(device_3) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 3) assert_device_fixed_positions([device_1, device_2, device_3]) def testAddAutoLayoutDevices(self): """Test adding auto layout nodes. """ ns = self.ns manager = self.manager layout_center_x = random.randint(100, 200) layout_center_y = random.randint(100, 200) layout_radius = random.randint(50, 100) device_1 = MockThreadDevBoard(1) device_2 = MockThreadDevBoard(2) device_3 = MockThreadDevBoard(3) device_4 = MockThreadDevBoard(4) for device in [device_1, device_2, device_3, device_4]: device.device.set_otns_layout_parameter(layout_center_x, layout_center_y, layout_radius) manager.add_node(device_1) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 1) # placing the first node alone expected_coords = {device_1.id: (layout_center_x + layout_radius, layout_center_y)} nodes_info = ns.nodes() self.assert_device_positions(nodes_info, expected_coords) manager.add_node(device_2) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 2) # forming a horizontal line expected_coords = { device_1.id: (layout_center_x - layout_radius, layout_center_y), device_2.id: (layout_center_x + layout_radius, layout_center_y) } self.assert_device_positions(nodes_info, expected_coords) manager.add_node(device_3) manager.add_node(device_4) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 4) # forming a cross shape expected_coords = { device_1.id: (layout_center_x, layout_center_y + layout_radius), device_2.id: (layout_center_x - layout_radius, layout_center_y), device_3.id: (layout_center_x, layout_center_y - layout_radius), device_4.id: (layout_center_x + layout_radius, layout_center_y) } self.assert_device_positions(nodes_info, expected_coords) def testRemoveAutoLayoutDevices(self): """Test that removing nodes keeps other nodes stationary with auto layout. """ ns = self.ns manager = self.manager layout_center_x = random.randint(100, 200) layout_center_y = random.randint(100, 200) layout_radius = random.randint(50, 100) device_1 = MockThreadDevBoard(1) device_2 = MockThreadDevBoard(2) device_3 = MockThreadDevBoard(3) device_4 = MockThreadDevBoard(4) for device in [device_1, device_2, device_3, device_4]: device.device.set_otns_layout_parameter(layout_center_x, layout_center_y, layout_radius) manager.add_node(device) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 4) expected_coords = { device_1.id: (layout_center_x, layout_center_y + layout_radius), device_2.id: (layout_center_x - layout_radius, layout_center_y), device_3.id: (layout_center_x, layout_center_y - layout_radius), device_4.id: (layout_center_x + layout_radius, layout_center_y) } self.assert_device_positions(nodes_info, expected_coords) manager.remove_node(device_4) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 3) expected_coords = { device_1.id: (layout_center_x, layout_center_y + layout_radius), device_2.id: (layout_center_x - layout_radius, layout_center_y), device_3.id: (layout_center_x, layout_center_y - layout_radius) } self.assert_device_positions(nodes_info, expected_coords) manager.remove_node(device_3) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 2) expected_coords = { device_1.id: (layout_center_x, layout_center_y + layout_radius), device_2.id: (layout_center_x - layout_radius, layout_center_y) } self.assert_device_positions(nodes_info, expected_coords) manager.remove_node(device_2) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 1) expected_coords = {device_1.id: (layout_center_x, layout_center_y + layout_radius)} self.assert_device_positions(nodes_info, expected_coords) manager.add_node(device_2) manager.remove_node(device_1) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 1) expected_coords = {device_2.id: (layout_center_x - layout_radius, layout_center_y)} self.assert_device_positions(nodes_info, expected_coords) manager.add_node(device_3) manager.remove_node(device_2) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 1) expected_coords = {device_3.id: (layout_center_x, layout_center_y - layout_radius)} self.assert_device_positions(nodes_info, expected_coords) manager.add_node(device_4) manager.remove_node(device_3) ns.go(0.1) nodes_info = ns.nodes() self.assertEqual(len(nodes_info), 1) expected_coords = {device_4.id: (layout_center_x + layout_radius, layout_center_y)} self.assert_device_positions(nodes_info, expected_coords) def testUpdateExtaddr(self): """Test updating node extended address. Also tests updating before the OTNS manager subscribes to the node. """ ns = self.ns manager = self.manager device_extaddr = random.getrandbits(64) device = MockThreadDevBoard(random.randint(1, 10)) manager.add_node(device) ns.go(0.1) self.assertEqual(ns.nodes()[device.id]["extaddr"], device.id) device.wpantund_process.emit_status(f"extaddr={device_extaddr:016x}") ns.go(0.1) self.assertEqual(ns.nodes()[device.id]["extaddr"], device.id) manager.subscribe_to_node(device) device.wpantund_process.emit_status(f"extaddr={device_extaddr:016x}") ns.go(0.1) self.assertEqual(ns.nodes()[device.id]["extaddr"], device_extaddr) def testUpdateRLOC16(self): """Test updating node RLOC16. Also tests updating before the OTNS manager subscribes to the node. """ ns = self.ns manager = self.manager device_rloc16 = random.getrandbits(16) device = MockThreadDevBoard(random.randint(1, 10)) manager.add_node(device) ns.go(0.1) original_rloc16 = ns.nodes()[device.id]["rloc16"] device.wpantund_process.emit_status(f"rloc16={device_rloc16}") ns.go(0.1) self.assertEqual(ns.nodes()[device.id]["rloc16"], original_rloc16) manager.subscribe_to_node(device) device.wpantund_process.emit_status(f"rloc16={device_rloc16}") ns.go(0.1) self.assertEqual(ns.nodes()[device.id]["rloc16"], device_rloc16) def testFormPartition(self): """Test forming a partition. """ ns = self.ns manager = self.manager device_1_parid = random.getrandbits(16) device_1 = MockThreadDevBoard(random.randint(1, 10)) device_2_parid = random.getrandbits(16) device_2 = MockThreadDevBoard(random.randint(11, 20)) manager.add_node(device_1) manager.add_node(device_2) manager.subscribe_to_node(device_1) manager.subscribe_to_node(device_2) device_1.wpantund_process.emit_status(f"parid={device_1_parid:08x}") device_2.wpantund_process.emit_status(f"parid={device_2_parid:08x}") ns.go(0.1) partitions_info = ns.partitions() self.assertEqual(len(partitions_info), 2) self.assertEqual(len(partitions_info[device_1_parid]), 1) self.assertEqual(len(partitions_info[device_2_parid]), 1) self.assertEqual(partitions_info[device_1_parid][0], device_1.id) self.assertEqual(partitions_info[device_2_parid][0], device_2.id) device_2.wpantund_process.emit_status(f"parid={device_1_parid:08x}") ns.go(0.1) partitions_info = ns.partitions() self.assertEqual(len(partitions_info), 1) self.assertEqual(len(partitions_info[device_1_parid]), 2) self.assertIn(device_1.id, partitions_info[device_1_parid]) self.assertIn(device_2.id, partitions_info[device_1_parid]) device_2.wpantund_process.emit_status(f"parid={device_2_parid:08x}") ns.go(0.1) partitions_info = ns.partitions() self.assertEqual(len(partitions_info), 2) self.assertEqual(len(partitions_info[device_1_parid]), 1) self.assertEqual(len(partitions_info[device_2_parid]), 1) self.assertEqual(partitions_info[device_1_parid][0], device_1.id) self.assertEqual(partitions_info[device_2_parid][0], device_2.id) # TODO: Add child & router table tests after adding query support to OTNS CLI if __name__ == "__main__": unittest.main()
apache-2.0
jankeromnes/depot_tools
tests/fix_encoding_test.py
50
1817
#!/usr/bin/env python # coding=utf8 # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for fix_encoding.py.""" import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import fix_encoding class FixEncodingTest(unittest.TestCase): # Nice mix of latin, hebrew, arabic and chinese. Doesn't mean anything. text = u'Héllô 偉大 سيد' def test_code_page(self): # Make sure printing garbage won't throw. print self.text.encode() + '\xff' print >> sys.stderr, self.text.encode() + '\xff' def test_utf8(self): # Make sure printing utf-8 works. print self.text.encode('utf-8') print >> sys.stderr, self.text.encode('utf-8') def test_unicode(self): # Make sure printing unicode works. print self.text print >> sys.stderr, self.text def test_default_encoding(self): self.assertEquals('utf-8', sys.getdefaultencoding()) def test_win_console(self): if sys.platform != 'win32': return # This should fail if not redirected, e.g. run directly instead of through # the presubmit check. Can be checked with: # python tests\fix_encoding_test.py self.assertEquals( sys.stdout.__class__, fix_encoding.WinUnicodeOutput) self.assertEquals( sys.stderr.__class__, fix_encoding.WinUnicodeOutput) self.assertEquals(sys.stdout.encoding, sys.getdefaultencoding()) self.assertEquals(sys.stderr.encoding, sys.getdefaultencoding()) def test_multiple_calls(self): # Shouldn't do anything. self.assertEquals(False, fix_encoding.fix_encoding()) if __name__ == '__main__': assert fix_encoding.fix_encoding() unittest.main()
bsd-3-clause
keithroe/vtkoptix
ThirdParty/Twisted/twisted/names/authority.py
35
13994
# -*- test-case-name: twisted.names.test.test_names -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Authoritative resolvers. """ import os import time from twisted.names import dns, error from twisted.internet import defer from twisted.python import failure from twisted.python.compat import execfile import common def getSerial(filename = '/tmp/twisted-names.serial'): """Return a monotonically increasing (across program runs) integer. State is stored in the given file. If it does not exist, it is created with rw-/---/--- permissions. """ serial = time.strftime('%Y%m%d') o = os.umask(0177) try: if not os.path.exists(filename): f = file(filename, 'w') f.write(serial + ' 0') f.close() finally: os.umask(o) serialFile = file(filename, 'r') lastSerial, ID = serialFile.readline().split() ID = (lastSerial == serial) and (int(ID) + 1) or 0 serialFile.close() serialFile = file(filename, 'w') serialFile.write('%s %d' % (serial, ID)) serialFile.close() serial = serial + ('%02d' % (ID,)) return serial #class LookupCacherMixin(object): # _cache = None # # def _lookup(self, name, cls, type, timeout = 10): # if not self._cache: # self._cache = {} # self._meth = super(LookupCacherMixin, self)._lookup # # if self._cache.has_key((name, cls, type)): # return self._cache[(name, cls, type)] # else: # r = self._meth(name, cls, type, timeout) # self._cache[(name, cls, type)] = r # return r class FileAuthority(common.ResolverBase): """ An Authority that is loaded from a file. @ivar _ADDITIONAL_PROCESSING_TYPES: Record types for which additional processing will be done. @ivar _ADDRESS_TYPES: Record types which are useful for inclusion in the additional section generated during additional processing. """ # See https://twistedmatrix.com/trac/ticket/6650 _ADDITIONAL_PROCESSING_TYPES = (dns.CNAME, dns.MX, dns.NS) _ADDRESS_TYPES = (dns.A, dns.AAAA) soa = None records = None def __init__(self, filename): common.ResolverBase.__init__(self) self.loadFile(filename) self._cache = {} def __setstate__(self, state): self.__dict__ = state # print 'setstate ', self.soa def _additionalRecords(self, answer, authority, ttl): """ Find locally known information that could be useful to the consumer of the response and construct appropriate records to include in the I{additional} section of that response. Essentially, implement RFC 1034 section 4.3.2 step 6. @param answer: A L{list} of the records which will be included in the I{answer} section of the response. @param authority: A L{list} of the records which will be included in the I{authority} section of the response. @param ttl: The default TTL for records for which this is not otherwise specified. @return: A generator of L{dns.RRHeader} instances for inclusion in the I{additional} section. These instances represent extra information about the records in C{answer} and C{authority}. """ for record in answer + authority: if record.type in self._ADDITIONAL_PROCESSING_TYPES: name = record.payload.name.name for rec in self.records.get(name.lower(), ()): if rec.TYPE in self._ADDRESS_TYPES: yield dns.RRHeader( name, rec.TYPE, dns.IN, rec.ttl or ttl, rec, auth=True) def _lookup(self, name, cls, type, timeout = None): """ Determine a response to a particular DNS query. @param name: The name which is being queried and for which to lookup a response. @type name: L{bytes} @param cls: The class which is being queried. Only I{IN} is implemented here and this value is presently disregarded. @type cls: L{int} @param type: The type of records being queried. See the types defined in L{twisted.names.dns}. @type type: L{int} @param timeout: All processing is done locally and a result is available immediately, so the timeout value is ignored. @return: A L{Deferred} that fires with a L{tuple} of three sets of response records (to comprise the I{answer}, I{authority}, and I{additional} sections of a DNS response) or with a L{Failure} if there is a problem processing the query. """ cnames = [] results = [] authority = [] additional = [] default_ttl = max(self.soa[1].minimum, self.soa[1].expire) domain_records = self.records.get(name.lower()) if domain_records: for record in domain_records: if record.ttl is not None: ttl = record.ttl else: ttl = default_ttl if record.TYPE == dns.NS and name.lower() != self.soa[0].lower(): # NS record belong to a child zone: this is a referral. As # NS records are authoritative in the child zone, ours here # are not. RFC 2181, section 6.1. authority.append( dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=False) ) elif record.TYPE == type or type == dns.ALL_RECORDS: results.append( dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=True) ) if record.TYPE == dns.CNAME: cnames.append( dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=True) ) if not results: results = cnames # https://tools.ietf.org/html/rfc1034#section-4.3.2 - sort of. # See https://twistedmatrix.com/trac/ticket/6732 additionalInformation = self._additionalRecords( results, authority, default_ttl) if cnames: results.extend(additionalInformation) else: additional.extend(additionalInformation) if not results and not authority: # Empty response. Include SOA record to allow clients to cache # this response. RFC 1034, sections 3.7 and 4.3.4, and RFC 2181 # section 7.1. authority.append( dns.RRHeader(self.soa[0], dns.SOA, dns.IN, ttl, self.soa[1], auth=True) ) return defer.succeed((results, authority, additional)) else: if dns._isSubdomainOf(name, self.soa[0]): # We may be the authority and we didn't find it. # XXX: The QNAME may also be a in a delegated child zone. See # #6581 and #6580 return defer.fail(failure.Failure(dns.AuthoritativeDomainError(name))) else: # The QNAME is not a descendant of this zone. Fail with # DomainError so that the next chained authority or # resolver will be queried. return defer.fail(failure.Failure(error.DomainError(name))) def lookupZone(self, name, timeout = 10): if self.soa[0].lower() == name.lower(): # Wee hee hee hooo yea default_ttl = max(self.soa[1].minimum, self.soa[1].expire) if self.soa[1].ttl is not None: soa_ttl = self.soa[1].ttl else: soa_ttl = default_ttl results = [dns.RRHeader(self.soa[0], dns.SOA, dns.IN, soa_ttl, self.soa[1], auth=True)] for (k, r) in self.records.items(): for rec in r: if rec.ttl is not None: ttl = rec.ttl else: ttl = default_ttl if rec.TYPE != dns.SOA: results.append(dns.RRHeader(k, rec.TYPE, dns.IN, ttl, rec, auth=True)) results.append(results[0]) return defer.succeed((results, (), ())) return defer.fail(failure.Failure(dns.DomainError(name))) def _cbAllRecords(self, results): ans, auth, add = [], [], [] for res in results: if res[0]: ans.extend(res[1][0]) auth.extend(res[1][1]) add.extend(res[1][2]) return ans, auth, add class PySourceAuthority(FileAuthority): """A FileAuthority that is built up from Python source code.""" def loadFile(self, filename): g, l = self.setupConfigNamespace(), {} execfile(filename, g, l) if not l.has_key('zone'): raise ValueError, "No zone defined in " + filename self.records = {} for rr in l['zone']: if isinstance(rr[1], dns.Record_SOA): self.soa = rr self.records.setdefault(rr[0].lower(), []).append(rr[1]) def wrapRecord(self, type): return lambda name, *arg, **kw: (name, type(*arg, **kw)) def setupConfigNamespace(self): r = {} items = dns.__dict__.iterkeys() for record in [x for x in items if x.startswith('Record_')]: type = getattr(dns, record) f = self.wrapRecord(type) r[record[len('Record_'):]] = f return r class BindAuthority(FileAuthority): """An Authority that loads BIND configuration files""" def loadFile(self, filename): self.origin = os.path.basename(filename) + '.' # XXX - this might suck lines = open(filename).readlines() lines = self.stripComments(lines) lines = self.collapseContinuations(lines) self.parseLines(lines) def stripComments(self, lines): return [ a.find(';') == -1 and a or a[:a.find(';')] for a in [ b.strip() for b in lines ] ] def collapseContinuations(self, lines): L = [] state = 0 for line in lines: if state == 0: if line.find('(') == -1: L.append(line) else: L.append(line[:line.find('(')]) state = 1 else: if line.find(')') != -1: L[-1] += ' ' + line[:line.find(')')] state = 0 else: L[-1] += ' ' + line lines = L L = [] for line in lines: L.append(line.split()) return filter(None, L) def parseLines(self, lines): TTL = 60 * 60 * 3 ORIGIN = self.origin self.records = {} for (line, index) in zip(lines, range(len(lines))): if line[0] == '$TTL': TTL = dns.str2time(line[1]) elif line[0] == '$ORIGIN': ORIGIN = line[1] elif line[0] == '$INCLUDE': # XXX - oh, fuck me raise NotImplementedError('$INCLUDE directive not implemented') elif line[0] == '$GENERATE': raise NotImplementedError('$GENERATE directive not implemented') else: self.parseRecordLine(ORIGIN, TTL, line) def addRecord(self, owner, ttl, type, domain, cls, rdata): if not domain.endswith('.'): domain = domain + '.' + owner else: domain = domain[:-1] f = getattr(self, 'class_%s' % cls, None) if f: f(ttl, type, domain, rdata) else: raise NotImplementedError, "Record class %r not supported" % cls def class_IN(self, ttl, type, domain, rdata): record = getattr(dns, 'Record_%s' % type, None) if record: r = record(*rdata) r.ttl = ttl self.records.setdefault(domain.lower(), []).append(r) print 'Adding IN Record', domain, ttl, r if type == 'SOA': self.soa = (domain, r) else: raise NotImplementedError, "Record type %r not supported" % type # # This file ends here. Read no further. # def parseRecordLine(self, origin, ttl, line): MARKERS = dns.QUERY_CLASSES.values() + dns.QUERY_TYPES.values() cls = 'IN' owner = origin if line[0] == '@': line = line[1:] owner = origin # print 'default owner' elif not line[0].isdigit() and line[0] not in MARKERS: owner = line[0] line = line[1:] # print 'owner is ', owner if line[0].isdigit() or line[0] in MARKERS: domain = owner owner = origin # print 'woops, owner is ', owner, ' domain is ', domain else: domain = line[0] line = line[1:] # print 'domain is ', domain if line[0] in dns.QUERY_CLASSES.values(): cls = line[0] line = line[1:] # print 'cls is ', cls if line[0].isdigit(): ttl = int(line[0]) line = line[1:] # print 'ttl is ', ttl elif line[0].isdigit(): ttl = int(line[0]) line = line[1:] # print 'ttl is ', ttl if line[0] in dns.QUERY_CLASSES.values(): cls = line[0] line = line[1:] # print 'cls is ', cls type = line[0] # print 'type is ', type rdata = line[1:] # print 'rdata is ', rdata self.addRecord(owner, ttl, type, domain, cls, rdata)
bsd-3-clause
davgibbs/django
django/db/migrations/recorder.py
478
2868
from __future__ import unicode_literals from django.apps.registry import Apps from django.db import models from django.db.utils import DatabaseError from django.utils.encoding import python_2_unicode_compatible from django.utils.timezone import now from .exceptions import MigrationSchemaMissing class MigrationRecorder(object): """ Deals with storing migration records in the database. Because this table is actually itself used for dealing with model creation, it's the one thing we can't do normally via migrations. We manually handle table creation/schema updating (using schema backend) and then have a floating model to do queries with. If a migration is unapplied its row is removed from the table. Having a row in the table always means a migration is applied. """ @python_2_unicode_compatible class Migration(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField(default=now) class Meta: apps = Apps() app_label = "migrations" db_table = "django_migrations" def __str__(self): return "Migration %s for %s" % (self.name, self.app) def __init__(self, connection): self.connection = connection @property def migration_qs(self): return self.Migration.objects.using(self.connection.alias) def ensure_schema(self): """ Ensures the table exists and has the correct schema. """ # If the table's there, that's fine - we've never changed its schema # in the codebase. if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()): return # Make the table try: with self.connection.schema_editor() as editor: editor.create_model(self.Migration) except DatabaseError as exc: raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc) def applied_migrations(self): """ Returns a set of (app, name) of applied migrations. """ self.ensure_schema() return set(tuple(x) for x in self.migration_qs.values_list("app", "name")) def record_applied(self, app, name): """ Records that a migration was applied. """ self.ensure_schema() self.migration_qs.create(app=app, name=name) def record_unapplied(self, app, name): """ Records that a migration was unapplied. """ self.ensure_schema() self.migration_qs.filter(app=app, name=name).delete() def flush(self): """ Deletes all migration records. Useful if you're testing migrations. """ self.migration_qs.all().delete()
bsd-3-clause
Triv90/Nova
nova/compute/power_state.py
37
2162
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Power state is the state we get by calling virt driver on a particular domain. The hypervisor is always considered the authority on the status of a particular VM, and the power_state in the DB should be viewed as a snapshot of the VMs's state in the (recent) past. It can be periodically updated, and should also be updated at the end of a task if the task is supposed to affect power_state. """ # NOTE(maoy): These are *not* virDomainState values from libvirt. # The hex value happens to match virDomainState for backward-compatibility # reasons. NOSTATE = 0x00 RUNNING = 0x01 PAUSED = 0x03 SHUTDOWN = 0x04 # the VM is powered off CRASHED = 0x06 SUSPENDED = 0x07 # TODO(maoy): BUILDING state is only used in bare metal case and should # eventually be removed/cleaned up. NOSTATE is probably enough. BUILDING = 0x09 # TODO(justinsb): Power state really needs to be a proper class, # so that we're not locked into the libvirt status codes and can put mapping # logic here rather than spread throughout the code _STATE_MAP = { NOSTATE: 'pending', RUNNING: 'running', PAUSED: 'paused', SHUTDOWN: 'shutdown', CRASHED: 'crashed', SUSPENDED: 'suspended', BUILDING: 'building', } def name(code): return _STATE_MAP[code] def valid_states(): return _STATE_MAP.keys()
apache-2.0
justdotJS/rowboat
rowboat/plugins/modlog/core.py
2
19552
import re import six import time import pytz import string import operator import humanize from holster.enum import Enum from holster.emitter import Priority from datetime import datetime from collections import defaultdict from disco.bot import CommandLevels from disco.types.base import UNSET, cached_property from disco.util.snowflake import to_unix, to_datetime from disco.util.sanitize import S from rowboat.plugins import RowboatPlugin as Plugin from rowboat.types import SlottedModel, Field, ListField, DictField, ChannelField, snowflake from rowboat.types.plugin import PluginConfig from rowboat.models.message import Message, MessageArchive from rowboat.models.guild import Guild from rowboat.util import ordered_load, MetaException from .pump import ModLogPump # Dynamically updated by the plugin Actions = Enum() URL_REGEX = re.compile(r'(https?://[^\s]+)') def filter_urls(content): return URL_REGEX.sub(r'<\1>', content) class ChannelConfig(SlottedModel): compact = Field(bool, default=True) include = ListField(Actions) exclude = ListField(Actions) rich = ListField(Actions) timestamps = Field(bool, default=False) timezone = Field(str, default='US/Eastern') def validate(self): assert pytz.timezone(self.timezone) is not None @cached_property def tz(self): return pytz.timezone(self.timezone) @cached_property def subscribed(self): include = set(self.include if self.include else Actions.attrs) exclude = set(self.exclude if self.exclude else []) return include - exclude class CustomFormat(SlottedModel): emoji = Field(str, default=None) format = Field(str, default=None) class ModLogConfig(PluginConfig): resolved = Field(bool, default=False, private=True) ignored_users = ListField(snowflake) ignored_channels = ListField(snowflake) custom = DictField(str, CustomFormat) channels = DictField(ChannelField, ChannelConfig) new_member_threshold = Field(int, default=(15 * 60)) _custom = DictField(dict, private=True) _channels = DictField(ChannelConfig, private=True) @cached_property def subscribed(self): return reduce(operator.or_, (i.subscribed for i in self.channels.values())) if self.channels else set() class Formatter(string.Formatter): def convert_field(self, value, conversion): if conversion in ('z', 's'): return S(unicode(value), escape_codeblocks=True) return unicode(value) class Debounce(object): def __init__(self, plugin, guild_id, selector, events): self.plugin = plugin self.guild_id = guild_id self.selector = selector self.events = events self.timestamp = time.time() def is_expired(self): return time.time() - self.timestamp > 60 def remove(self, event=None): self.plugin.debounces.remove(self, event) class DebouncesCollection(object): def __init__(self): self._data = defaultdict(lambda: defaultdict(list)) def __iter__(self): for top in self._data.values(): for bot in top.values(): for obj in bot: yield obj def add(self, obj): for event_name in obj.events: self._data[obj.guild_id][event_name].append(obj) def remove(self, obj, event=None): for event_name in ([event] if event else obj.events): if event_name in obj.events: obj.events.remove(event_name) if obj in self._data[obj.guild_id][event_name]: self._data[obj.guild_id][event_name].remove(obj) def find(self, event, delete=True, **kwargs): guild_id = event.guild_id if hasattr(event, 'guild_id') else event.guild.id for obj in self._data[guild_id][event.__class__.__name__]: if obj.is_expired(): obj.remove() continue for k, v in kwargs.items(): if obj.selector.get(k) != v: continue if delete: obj.remove(event=event.__class__.__name__) return obj @Plugin.with_config(ModLogConfig) class ModLogPlugin(Plugin): fmt = Formatter() def load(self, ctx): if not Actions.attrs: self.action_simple = {} with open('data/actions_simple.yaml') as f: simple = ordered_load(f.read()) for k, v in simple.items(): self.register_action(k, v) else: self.action_simple = ctx['action_simple'] self.debounces = ctx.get('debounces') or DebouncesCollection() # Tracks modlogs that are silenced self.hushed = {} # Tracks pumps for all modlogs self.pumps = {} super(ModLogPlugin, self).load(ctx) def create_debounce(self, event, events, **kwargs): if isinstance(event, (int, long)): guild_id = event else: guild_id = event.guild_id if hasattr(event, 'guild_id') else event.guild.id bounce = Debounce(self, guild_id, kwargs, events) self.debounces.add(bounce) return bounce def unload(self, ctx): ctx['action_simple'] = self.action_simple ctx['debounces'] = self.debounces super(ModLogPlugin, self).unload(ctx) def resolve_channels(self, guild, config): self.log.info('Resolving channels for guild %s (%s)', guild.id, guild.name) channels = {} for key, channel in config.channels.items(): if isinstance(key, int): chan = guild.channels.select_one(id=key) else: chan = guild.channels.select_one(name=key) if not chan: raise MetaException('Failed to ModLog.resolve_channels', { 'guild_name': guild.name, 'guild_id': unicode(guild.id), 'key': unicode(key), 'config_channels': list(unicode(i) for i in config.channels.keys()), 'guild_channels': list(unicode(i) for i in guild.channels.keys()), }) channels[chan.id] = channel self.log.info('Resolved channels for guild %s (%s): %s', guild.id, guild.name, channels) if config._channels: self.log.warning('Overwriting previously resolved channels %s / %s', config._channels, channels) config._channels = channels config._custom = None if config.custom: rowboat_guild = self.call('CorePlugin.get_guild', guild.id) if rowboat_guild and rowboat_guild.is_whitelisted(Guild.WhitelistFlags.MODLOG_CUSTOM_FORMAT): custom = {} for action, override in config.custom.items(): action = Actions.get(action) if not action: continue custom[action] = override.to_dict() if not custom[action].get('emoji'): custom[action]['emoji'] = self.action_simple[action]['emoji'] config._custom = custom config.resolved = True def register_action(self, name, simple): action = Actions.add(name) self.action_simple[action] = simple def log_action_ext(self, action, guild_id, **details): config = self.call('CorePlugin.get_config', guild_id) if not hasattr(config.plugins, 'modlog'): self.log.warning('log_action_ext ignored for %s, lack of modlog config', guild_id) return return self.log_action_raw( action, self.state.guilds.get(guild_id), getattr(config.plugins, 'modlog'), **details) def log_action(self, action, event, **details): details['e'] = event return self.log_action_raw(action, event.guild, event.config.get(), **details) def log_action_raw(self, action, guild, config, **details): if not config: return if not config.resolved: self.resolve_channels(guild, config) if not {action} & config.subscribed: return def generate_simple(chan_config): info = self.action_simple.get(action) if config._custom: if action in config._custom: info = config._custom[action] # Format contents and create the message with the given emoji contents = self.fmt.format(six.text_type(info['format']), **details) msg = u':{}: {}'.format(info['emoji'], contents) if chan_config.timestamps: ts = pytz.utc.localize(datetime.utcnow()).astimezone(chan_config.tz) msg = '`[{}]` '.format(ts.strftime('%H:%M:%S')) + msg if len(msg) > 2000: msg = msg[0:1997] + '...' return msg for channel_id, chan_config in config._channels.items(): if channel_id not in guild.channels: self.log.error('guild %s has outdated modlog channels (%s)', guild.id, channel_id) config._channels = [] config.resolved = False return if not {action} & chan_config.subscribed: continue msg = generate_simple(chan_config) if channel_id not in self.pumps: self.pumps[channel_id] = ModLogPump( self.state.channels.get(channel_id), ) self.pumps[channel_id].send(msg) @Plugin.command('hush', group='modlog', level=CommandLevels.ADMIN) def command_hush(self, event): if event.guild.id in self.hushed: return event.msg.reply(':warning: modlog is already hushed') self.hushed[event.guild.id] = True event.msg.reply(':white_check_mark: modlog has been hushed, do your dirty work in peace') @Plugin.command('unhush', group='modlog', level=CommandLevels.ADMIN) def command_unhush(self, event): if event.guild.id not in self.hushed: return event.msg.reply(':warning: modlog is not hushed') del self.hushed[event.guild.id] event.msg.reply(':white_check_mark: modlog has been unhushed, shhhhh... nobody saw anything') @Plugin.schedule(120) def cleanup_debounce(self): for obj in self.debounces: if obj.is_expired(): obj.remove() @Plugin.listen('ChannelCreate') def on_channel_create(self, event): self.log_action(Actions.CHANNEL_CREATE, event) @Plugin.listen('ChannelDelete') def on_channel_delete(self, event): self.log_action(Actions.CHANNEL_DELETE, event) @Plugin.listen('GuildBanAdd') def on_guild_ban_add(self, event): debounce = self.debounces.find(event, user_id=event.user.id) if debounce: return self.log_action(Actions.GUILD_BAN_ADD, event) @Plugin.listen('GuildBanRemove') def on_guild_ban_remove(self, event): debounce = self.debounces.find(event, user_id=event.user.id) if debounce: return self.log_action(Actions.GUILD_BAN_REMOVE, event) @Plugin.listen('GuildMemberAdd') def on_guild_member_add(self, event): created = humanize.naturaltime(datetime.utcnow() - to_datetime(event.user.id)) new = ( event.config.new_member_threshold and (time.time() - to_unix(event.user.id)) < event.config.new_member_threshold ) self.log_action(Actions.GUILD_MEMBER_ADD, event, new=' :new:' if new else '', created=created) @Plugin.listen('GuildMemberRemove') def on_guild_member_remove(self, event): debounce = self.debounces.find(event, user_id=event.user.id) if debounce: return self.log_action(Actions.GUILD_MEMBER_REMOVE, event) @Plugin.listen('GuildRoleCreate') def on_guild_role_create(self, event): self.log_action(Actions.GUILD_ROLE_CREATE, event) @Plugin.listen('GuildRoleDelete', priority=Priority.BEFORE) def on_guild_role_delete(self, event): pre_role = event.guild.roles.get(event.role_id) self.log_action(Actions.GUILD_ROLE_DELETE, event, pre_role=pre_role) @Plugin.listen('GuildMemberUpdate', priority=Priority.BEFORE) def on_guild_member_update(self, event): pre_member = event.guild.members.get(event.id) if not pre_member: return # Global debounce, used for large member updates debounce = self.debounces.find(event, user_id=event.user.id) if debounce: return # Log nickname changes if (pre_member.nick or event.nick) and pre_member.nick != event.nick: if not pre_member.nick: debounce = self.debounces.find(event, user_id=event.user.id, nickname=event.nick) if debounce: return self.log_action( Actions.ADD_NICK, event, nickname=event.nick) elif not event.nick: self.log_action( Actions.RMV_NICK, event, nickname=pre_member.nick) else: self.log_action( Actions.CHANGE_NICK, event, before=pre_member.nick or '<NO_NICK>', after=event.nick or '<NO_NICK>') # Log role changes, which require diffing the pre/post roles on the member pre_roles = set(pre_member.roles) post_roles = set(event.roles) if pre_roles != post_roles: added = post_roles - pre_roles removed = pre_roles - post_roles # Log all instances of a role getting added for role in filter(bool, map(event.guild.roles.get, added)): debounce = self.debounces.find( event, user_id=event.user.id, role_id=role.id, ) if debounce: continue self.log_action(Actions.GUILD_MEMBER_ROLES_ADD, event, role=role) for role in filter(bool, map(event.guild.roles.get, removed)): debounce = self.debounces.find( event, user_id=event.user.id, role_id=role.id, ) self.log_action(Actions.GUILD_MEMBER_ROLES_RMV, event, role=role) @Plugin.listen('PresenceUpdate', priority=Priority.BEFORE, metadata={'global_': True}) def on_presence_update(self, event): plugin = self.bot.plugins.get('CorePlugin') if not plugin or not event.user: return subscribed_guilds = defaultdict(list) for guild_id, config in plugin.guilds.items(): guild = self.state.guilds.get(guild_id) if not guild: continue if event.user.id not in guild.members: continue config = config.get_config() if not config.plugins or not config.plugins.modlog: continue if event.user.id in config.plugins.modlog.ignored_users: continue if {Actions.CHANGE_USERNAME} & config.plugins.modlog.subscribed: subscribed_guilds[Actions.CHANGE_USERNAME].append((guild, config)) if not len(subscribed_guilds): return pre_user = self.state.users.get(event.user.id) before = unicode(pre_user) if Actions.CHANGE_USERNAME in subscribed_guilds: if event.user.username is not UNSET and event.user.username != pre_user.username: for guild, config in subscribed_guilds[Actions.CHANGE_USERNAME]: self.log_action_raw( Actions.CHANGE_USERNAME, guild, config.plugins.modlog, before=before, after=unicode(event.user), e=event) @Plugin.listen('MessageUpdate', priority=Priority.BEFORE) def on_message_update(self, event): if event.author.id == self.state.me.id: return if event.author.id in event.config.ignored_users: return if event.channel_id in event.config.ignored_channels: return try: msg = Message.get(Message.id == event.id) except Message.DoesNotExist: return if not event.channel or not event.author: return if event.content is not UNSET and msg.content != event.with_proper_mentions: self.log_action( Actions.MESSAGE_EDIT, event, before=filter_urls(msg.content), after=filter_urls(event.with_proper_mentions)) @Plugin.listen('MessageDelete') def on_message_delete(self, event): if event.guild.id in self.hushed: return try: msg = Message.get(Message.id == event.id) except Message.DoesNotExist: return channel = self.state.channels.get(msg.channel_id) if not channel or not msg.author: return debounce = self.debounces.find(event, message_id=event.id) if debounce: return if msg.author.id == self.state.me.id: return if msg.author.id in event.config.ignored_users: return if msg.channel_id in event.config.ignored_channels: return # Truncate/limit the size of contents contents = filter_urls(msg.content) if len(contents) > 1750: contents = contents[:1750] + u'... ({} more characters)'.format( len(contents) - 1750 ) self.log_action(Actions.MESSAGE_DELETE, event, author=msg.author, author_id=msg.author.id, channel=channel, msg=contents, attachments='' if not msg.attachments else u'({})'.format( ', '.join(u'<{}>'.format(i) for i in msg.attachments))) @Plugin.listen('MessageDeleteBulk') def on_message_delete_bulk(self, event): channel = self.state.channels.get(event.channel_id) if not channel: return if event.guild.id in self.hushed: return archive = MessageArchive.create_from_message_ids(event.ids) self.log_action(Actions.MESSAGE_DELETE_BULK, event, log=archive.url, channel=channel, count=len(event.ids)) @Plugin.listen('VoiceStateUpdate', priority=Priority.BEFORE) def on_voice_state_update(self, event): old_vs = self.state.voice_states.get(event.session_id) # Moving channels if old_vs and event.channel_id: if old_vs.channel_id != event.channel_id: self.log_action( Actions.VOICE_CHANNEL_MOVE, event, before_channel=old_vs.channel) elif old_vs and not event.channel_id: self.log_action( Actions.VOICE_CHANNEL_LEAVE, event, channel=old_vs.channel) elif not old_vs: self.log_action( Actions.VOICE_CHANNEL_JOIN, event)
mit
tsukinowasha/ansible-lint-rules
rules/RoleRelativePath.py
1
1521
from ansiblelint import AnsibleLintRule format = "{}" class RoleRelativePath(AnsibleLintRule): id = 'E201' shortdesc = "Doesn't need a relative path in role" description = '' tags = ['role'] def matchplay(self, file, play): # assume if 'roles' in path, inside a role. if 'roles' not in file['path']: return [] if 'template' in play: if not isinstance(play['template'], dict): return False if "../templates" in play['template']['src']: return [({'': play['template']}, self.shortdesc)] if 'win_template' in play: if not isinstance(play['win_template'], dict): return False if "../win_templates" in play['win_template']['src']: return ({'win_template': play['win_template']}, self.shortdesc) if 'copy' in play: if not isinstance(play['copy'], dict): return False if 'src' in play['copy']: if "../files" in play['copy']['src']: return ({'sudo': play['copy']}, self.shortdesc) if 'win_copy' in play: if not isinstance(play['win_copy'], dict): return False if "../files" in play['win_copy']['src']: return ({'sudo': play['win_copy']}, self.shortdesc) return []
mit
mariosky/evo-drawings
venv/lib/python2.7/site-packages/pystache/template_spec.py
50
1725
# coding: utf-8 """ Provides a class to customize template information on a per-view basis. To customize template properties for a particular view, create that view from a class that subclasses TemplateSpec. The "spec" in TemplateSpec stands for "special" or "specified" template information. """ class TemplateSpec(object): """ A mixin or interface for specifying custom template information. The "spec" in TemplateSpec can be taken to mean that the template information is either "specified" or "special." A view should subclass this class only if customized template loading is needed. The following attributes allow one to customize/override template information on a per view basis. A None value means to use default behavior for that value and perform no customization. All attributes are initialized to None. Attributes: template: the template as a string. template_encoding: the encoding used by the template. template_extension: the template file extension. Defaults to "mustache". Pass False for no extension (i.e. extensionless template files). template_name: the name of the template. template_path: absolute path to the template. template_rel_directory: the directory containing the template file, relative to the directory containing the module defining the class. template_rel_path: the path to the template file, relative to the directory containing the module defining the class. """ template = None template_encoding = None template_extension = None template_name = None template_path = None template_rel_directory = None template_rel_path = None
agpl-3.0
ashhher3/pylearn2
pylearn2/datasets/stl10.py
44
5563
""" .. todo:: WRITEME """ __authors__ = "Ian Goodfellow" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" import numpy as np from theano.compat.six.moves import xrange from pylearn2.datasets import dense_design_matrix from pylearn2.utils.serial import load from pylearn2.utils import contains_nan class STL10(dense_design_matrix.DenseDesignMatrix): """ The STL-10 dataset Adam Coates, Honglak Lee, Andrew Y. Ng An Analysis of Single Layer Networks in Unsupervised Feature Learning AISTATS, 2011 http://www.stanford.edu/~acoates//stl10/ When reporting results on this dataset, you are meant to use a somewhat unusal evaluation procedure. Use STL10(which_set='train') to load the training set. Then restrict the training set to one of the ten folds using the restrict function below. You must then train only on the data from that fold. For the test set, report the average test set performance over the ten trials obtained by training on each of the ten folds. The folds here do not define the splits you should use for cross validation. You are free to make your own split within each fold. Parameters ---------- which_set : WRITEME center : WRITEME example_range : WRITEME """ def __init__(self, which_set, center=False, example_range=None): """ .. todo:: WRITEME """ if which_set == 'train': train = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/train.mat') # Load the class names self.class_names = [array[0].encode('utf-8') for array in train['class_names'][0]] # Load the fold indices fold_indices = train['fold_indices'] assert fold_indices.shape == (1, 10) self.fold_indices = np.zeros((10, 1000), dtype='uint16') for i in xrange(10): indices = fold_indices[0, i] assert indices.shape == (1000, 1) assert indices.dtype == 'uint16' self.fold_indices[i, :] = indices[:, 0] # The data is stored as uint8 # If we leave it as uint8, it will cause the CAE to silently fail # since theano will treat derivatives wrt X as 0 X = np.cast['float32'](train['X']) assert X.shape == (5000, 96 * 96 * 3) if example_range is not None: X = X[example_range[0]:example_range[1], :] y_labels = 10 # this is uint8 but labels range should be corrected y = train['y'][:, 0] - 1 assert y.shape == (5000,) elif which_set == 'test': test = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/test.mat') # Load the class names self.class_names = [array[0].encode('utf-8') for array in test['class_names'][0]] # The data is stored as uint8 # If we leave it as uint8, it will cause the CAE to silently fail # since theano will treat derivatives wrt X as 0 X = np.cast['float32'](test['X']) assert X.shape == (8000, 96 * 96 * 3) if example_range is not None: X = X[example_range[0]:example_range[1], :] y_labels = 10 # this is uint8 but labels range should be corrected y = test['y'][:, 0] - 1 assert y.shape == (8000,) elif which_set == 'unlabeled': unlabeled = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/' 'unlabeled.mat') X = unlabeled['X'] # this file is stored in HDF format, which transposes everything assert X.shape == (96 * 96 * 3, 100000) assert X.dtype == 'uint8' if example_range is None: X = X.value else: X = X.value[:, example_range[0]:example_range[1]] X = np.cast['float32'](X.T) unlabeled.close() y_labels = None y = None else: raise ValueError('"' + which_set + '" is not an STL10 dataset. ' 'Recognized values are "train", "test", and ' '"unlabeled".') if center: X -= 127.5 view_converter = dense_design_matrix.DefaultViewConverter((96, 96, 3)) super(STL10, self).__init__(X=X, y=y, y_labels=y_labels, view_converter=view_converter) for i in xrange(self.X.shape[0]): mat = X[i:i + 1, :] topo = self.get_topological_view(mat) for j in xrange(topo.shape[3]): temp = topo[0, :, :, j].T.copy() topo[0, :, :, j] = temp mat = self.get_design_matrix(topo) X[i:i + 1, :] = mat assert not contains_nan(self.X) def restrict(dataset, fold): """ Restricts the dataset to use the specified fold (1 to 10). dataset should be the training set. """ fold_indices = dataset.fold_indices assert fold_indices.shape == (10, 1000) idxs = fold_indices[fold, :] - 1 dataset.X = dataset.X[idxs, :].copy() assert dataset.X.shape[0] == 1000 dataset.y = dataset.y[idxs, ...].copy() assert dataset.y.shape[0] == 1000 return dataset
bsd-3-clause
sndnvaps/G718c_kernel
scripts/build-all.py
1474
10189
#! /usr/bin/env python # Copyright (c) 2009-2013, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. # # TODO: Accept arguments to indicate what to build. import glob from optparse import OptionParser import subprocess import os import os.path import re import shutil import sys version = 'build-all.py, version 0.01' build_dir = '../all-kernels' make_command = ["vmlinux", "modules", "dtbs"] make_env = os.environ make_env.update({ 'ARCH': 'arm', 'KCONFIG_NOTIMESTAMP': 'true' }) make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-') all_options = {} def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) defconfig = open(file, 'a') defconfig.write(str + '\n') defconfig.close() def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = {} arch_pats = ( r'[fm]sm[0-9]*_defconfig', r'apq*_defconfig', r'qsd*_defconfig', r'msmkrypton*_defconfig', ) for p in arch_pats: for n in glob.glob('arch/arm/configs/' + p): names[os.path.basename(n)[:-10]] = n return names class Builder: def __init__(self, logname): self.logname = logname self.fd = open(logname, 'w') def run(self, args): devnull = open('/dev/null', 'r') proc = subprocess.Popen(args, stdin=devnull, env=make_env, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) count = 0 # for line in proc.stdout: rawfd = proc.stdout.fileno() while True: line = os.read(rawfd, 1024) if not line: break self.fd.write(line) self.fd.flush() if all_options.verbose: sys.stdout.write(line) sys.stdout.flush() else: for i in range(line.count('\n')): count += 1 if count == 64: count = 0 print sys.stdout.write('.') sys.stdout.flush() print result = proc.wait() self.fd.close() return result failed_targets = [] def build(target): dest_dir = os.path.join(build_dir, target) log_name = '%s/log-%s.log' % (build_dir, target) print 'Building %s in %s log %s' % (target, dest_dir, log_name) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) defconfig = 'arch/arm/configs/%s_defconfig' % target dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir shutil.copyfile(defconfig, dotconfig) staging_dir = 'install_staging' modi_dir = '%s' % staging_dir hdri_dir = '%s/usr' % staging_dir shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True) devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, '%s_defconfig' % target], env=make_env, stdin=devnull) devnull.close() if not all_options.updateconfigs: # Build targets can be dependent upon the completion of previous # build targets, so build them one at a time. cmd_line = ['make', 'INSTALL_HDR_PATH=%s' % hdri_dir, 'INSTALL_MOD_PATH=%s' % modi_dir, 'O=%s' % dest_dir] build_targets = [] for c in make_command: if re.match(r'^-{1,2}\w', c): cmd_line.append(c) else: build_targets.append(c) for t in build_targets: build = Builder(log_name) result = build.run(cmd_line + [t]) if result != 0: if all_options.keep_going: failed_targets.append(target) fail_or_error = error else: fail_or_error = fail fail_or_error("Failed to build %s, see %s" % (target, build.logname)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=make_env, stdin=devnull) devnull.close() shutil.copyfile(savedefconfig, defconfig) def build_many(allconf, targets): print "Building %d target(s)" % len(targets) for target in targets: if all_options.updateconfigs: update_config(allconf[target], all_options.updateconfigs) build(target) if failed_targets: fail('\n '.join(["Failed targets:"] + [target for target in failed_targets])) def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs.keys(): print " %s" % target sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if options.jobs: make_command.append("-j%d" % options.jobs) if options.load_average: make_command.append("-l%d" % options.load_average) if args == ['all']: build_many(configs, configs.keys()) elif args == ['perf']: targets = [] for t in configs.keys(): if "perf" in t: targets.append(t) build_many(configs, targets) elif args == ['noperf']: targets = [] for t in configs.keys(): if "perf" not in t: targets.append(t) build_many(configs, targets) elif len(args) > 0: targets = [] for t in args: if t not in configs.keys(): parser.error("Target '%s' not one of %s" % (t, configs.keys())) targets.append(t) build_many(configs, targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
jaxkodex/odoo
addons/sale_margin/__openerp__.py
261
1592
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Margins in Sales Orders', 'version':'1.0', 'category' : 'Sales Management', 'description': """ This module adds the 'Margin' on sales order. ============================================= This gives the profitability by calculating the difference between the Unit Price and Cost Price. """, 'author':'OpenERP SA', 'depends':['sale'], 'demo':['sale_margin_demo.xml'], 'test': ['test/sale_margin.yml'], 'data':['security/ir.model.access.csv','sale_margin_view.xml'], 'auto_install': False, 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
bowang/tensorflow
tensorflow/examples/adding_an_op/zero_out_op_1.py
190
1053
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ZeroOut op Python library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import tensorflow as tf _zero_out_module = tf.load_op_library( os.path.join(tf.resource_loader.get_data_files_path(), 'zero_out_op_kernel_1.so')) zero_out = _zero_out_module.zero_out
apache-2.0
sarvex/tensorflow
tensorflow/python/ops/data_flow_ops.py
17
93142
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================== """Data Flow Operations.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import hashlib import threading import six from tensorflow.python.eager import context from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.lib.io import python_io from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_data_flow_ops import * from tensorflow.python.util import deprecation from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import tf_export # pylint: enable=wildcard-import def _as_type_list(dtypes): """Convert dtypes to a list of types.""" assert dtypes is not None if not (isinstance(dtypes, list) or isinstance(dtypes, tuple)): # We have a single type. return [dtypes] else: # We have a list or tuple of types. return list(dtypes) def _as_shape_list(shapes, dtypes, unknown_dim_allowed=False, unknown_rank_allowed=False): """Convert shapes to a list of tuples of int (or None).""" del dtypes if unknown_dim_allowed: if (not isinstance(shapes, collections_abc.Sequence) or not shapes or any(shape is None or isinstance(shape, int) for shape in shapes)): raise ValueError( "When providing partial shapes, a list of shapes must be provided.") if shapes is None: return None if isinstance(shapes, tensor_shape.TensorShape): shapes = [shapes] if not isinstance(shapes, (tuple, list)): raise TypeError( "shapes must be a TensorShape or a list or tuple of TensorShapes.") if all(shape is None or isinstance(shape, int) for shape in shapes): # We have a single shape. shapes = [shapes] shapes = [tensor_shape.as_shape(shape) for shape in shapes] if not unknown_dim_allowed: if any(not shape.is_fully_defined() for shape in shapes): raise ValueError("All shapes must be fully defined: %s" % shapes) if not unknown_rank_allowed: if any(shape.dims is None for shape in shapes): raise ValueError("All shapes must have a defined rank: %s" % shapes) return shapes def _as_name_list(names, dtypes): if names is None: return None if not isinstance(names, (list, tuple)): names = [names] if len(names) != len(dtypes): raise ValueError("List of names must have the same length as the list " "of dtypes") return list(names) def _shape_common(s1, s2): """The greatest lower bound (ordered by specificity) TensorShape.""" s1 = tensor_shape.TensorShape(s1) s2 = tensor_shape.TensorShape(s2) if s1.ndims is None or s2.ndims is None or s1.ndims != s2.ndims: return tensor_shape.unknown_shape() d = [ d1 if d1 is not None and d1 == d2 else None for (d1, d2) in zip(s1.as_list(), s2.as_list()) ] return tensor_shape.TensorShape(d) # pylint: disable=protected-access @tf_export("queue.QueueBase", v1=["queue.QueueBase", "io.QueueBase", "QueueBase"]) @deprecation.deprecated_endpoints(["io.QueueBase", "QueueBase"]) class QueueBase(object): """Base class for queue implementations. A queue is a TensorFlow data structure that stores tensors across multiple steps, and exposes operations that enqueue and dequeue tensors. Each queue element is a tuple of one or more tensors, where each tuple component has a static dtype, and may have a static shape. The queue implementations support versions of enqueue and dequeue that handle single elements, versions that support enqueuing and dequeuing a batch of elements at once. See `tf.queue.FIFOQueue` and `tf.queue.RandomShuffleQueue` for concrete implementations of this class, and instructions on how to create them. """ def __init__(self, dtypes, shapes, names, queue_ref): """Constructs a queue object from a queue reference. The two optional lists, `shapes` and `names`, must be of the same length as `dtypes` if provided. The values at a given index `i` indicate the shape and name to use for the corresponding queue component in `dtypes`. Args: dtypes: A list of types. The length of dtypes must equal the number of tensors in each element. shapes: Constraints on the shapes of tensors in an element: A list of shape tuples or None. This list is the same length as dtypes. If the shape of any tensors in the element are constrained, all must be; shapes can be None if the shapes should not be constrained. names: Optional list of names. If provided, the `enqueue()` and `dequeue()` methods will use dictionaries with these names as keys. Must be None or a list or tuple of the same length as `dtypes`. queue_ref: The queue reference, i.e. the output of the queue op. Raises: ValueError: If one of the arguments is invalid. """ self._dtypes = dtypes if shapes is not None: if len(shapes) != len(dtypes): raise ValueError("Queue shapes must have the same length as dtypes") self._shapes = [tensor_shape.TensorShape(s) for s in shapes] else: self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes] if names is not None: if len(names) != len(dtypes): raise ValueError("Queue names must have the same length as dtypes") self._names = names else: self._names = None self._queue_ref = queue_ref if isinstance(queue_ref, ops.EagerTensor): if context.context().scope_name: self._name = context.context().scope_name else: self._name = "Empty" self._resource_deleter = resource_variable_ops.EagerResourceDeleter( queue_ref, None) else: self._name = self._queue_ref.op.name.split("/")[-1] @staticmethod def from_list(index, queues): """Create a queue using the queue reference from `queues[index]`. Args: index: An integer scalar tensor that determines the input that gets selected. queues: A list of `QueueBase` objects. Returns: A `QueueBase` object. Raises: TypeError: When `queues` is not a list of `QueueBase` objects, or when the data types of `queues` are not all the same. """ if ((not queues) or (not isinstance(queues, list)) or (not all(isinstance(x, QueueBase) for x in queues))): raise TypeError("A list of queues expected") dtypes = queues[0].dtypes if not all(dtypes == q.dtypes for q in queues[1:]): raise TypeError("Queues do not have matching component dtypes.") names = queues[0].names if not all(names == q.names for q in queues[1:]): raise TypeError("Queues do not have matching component names.") queue_shapes = [q.shapes for q in queues] reduced_shapes = [ six.moves.reduce(_shape_common, s) for s in zip(*queue_shapes) ] queue_refs = array_ops.stack([x.queue_ref for x in queues]) selected_queue = array_ops.gather(queue_refs, index) return QueueBase( dtypes=dtypes, shapes=reduced_shapes, names=names, queue_ref=selected_queue) @property def queue_ref(self): """The underlying queue reference.""" return self._queue_ref @property def name(self): """The name of the underlying queue.""" if context.executing_eagerly(): return self._name return self._queue_ref.op.name @property def dtypes(self): """The list of dtypes for each component of a queue element.""" return self._dtypes @property def shapes(self): """The list of shapes for each component of a queue element.""" return self._shapes @property def names(self): """The list of names for each component of a queue element.""" return self._names def _check_enqueue_dtypes(self, vals): """Validate and convert `vals` to a list of `Tensor`s. The `vals` argument can be a Tensor, a list or tuple of tensors, or a dictionary with tensor values. If it is a dictionary, the queue must have been constructed with a `names` attribute and the dictionary keys must match the queue names. If the queue was constructed with a `names` attribute, `vals` must be a dictionary. Args: vals: A tensor, a list or tuple of tensors, or a dictionary.. Returns: A list of `Tensor` objects. Raises: ValueError: If `vals` is invalid. """ if isinstance(vals, dict): if not self._names: raise ValueError("Queue must have names to enqueue a dictionary") if sorted(self._names, key=str) != sorted(vals.keys(), key=str): raise ValueError("Keys in dictionary to enqueue do not match " "names of Queue. Dictionary: (%s), Queue: (%s)" % (sorted(vals.keys()), sorted(self._names))) # The order of values in `self._names` indicates the order in which the # tensors in the dictionary `vals` must be listed. vals = [vals[k] for k in self._names] else: if self._names: raise ValueError("You must enqueue a dictionary in a Queue with names") if not isinstance(vals, (list, tuple)): vals = [vals] tensors = [] for i, (val, dtype) in enumerate(zip(vals, self._dtypes)): tensors.append( ops.convert_to_tensor(val, dtype=dtype, name="component_%d" % i)) return tensors def _scope_vals(self, vals): """Return a list of values to pass to `name_scope()`. Args: vals: A tensor, a list or tuple of tensors, or a dictionary. Returns: The values in vals as a list. """ if isinstance(vals, (list, tuple)): return vals elif isinstance(vals, dict): return vals.values() else: return [vals] def enqueue(self, vals, name=None): """Enqueues one element to this queue. If the queue is full when this operation executes, it will block until the element has been enqueued. At runtime, this operation may raise an error if the queue is `tf.QueueBase.close` before or during its execution. If the queue is closed before this operation runs, `tf.errors.CancelledError` will be raised. If this operation is blocked, and either (i) the queue is closed by a close operation with `cancel_pending_enqueues=True`, or (ii) the session is `tf.Session.close`, `tf.errors.CancelledError` will be raised. Args: vals: A tensor, a list or tuple of tensors, or a dictionary containing the values to enqueue. name: A name for the operation (optional). Returns: The operation that enqueues a new tuple of tensors to the queue. """ with ops.name_scope(name, "%s_enqueue" % self._name, self._scope_vals(vals)) as scope: vals = self._check_enqueue_dtypes(vals) # NOTE(mrry): Not using a shape function because we need access to # the `QueueBase` object. for val, shape in zip(vals, self._shapes): val.get_shape().assert_is_compatible_with(shape) if self._queue_ref.dtype == _dtypes.resource: return gen_data_flow_ops.queue_enqueue_v2( self._queue_ref, vals, name=scope) else: return gen_data_flow_ops.queue_enqueue( self._queue_ref, vals, name=scope) def enqueue_many(self, vals, name=None): """Enqueues zero or more elements to this queue. This operation slices each component tensor along the 0th dimension to make multiple queue elements. All of the tensors in `vals` must have the same size in the 0th dimension. If the queue is full when this operation executes, it will block until all of the elements have been enqueued. At runtime, this operation may raise an error if the queue is `tf.QueueBase.close` before or during its execution. If the queue is closed before this operation runs, `tf.errors.CancelledError` will be raised. If this operation is blocked, and either (i) the queue is closed by a close operation with `cancel_pending_enqueues=True`, or (ii) the session is `tf.Session.close`, `tf.errors.CancelledError` will be raised. Args: vals: A tensor, a list or tuple of tensors, or a dictionary from which the queue elements are taken. name: A name for the operation (optional). Returns: The operation that enqueues a batch of tuples of tensors to the queue. """ with ops.name_scope(name, "%s_EnqueueMany" % self._name, self._scope_vals(vals)) as scope: vals = self._check_enqueue_dtypes(vals) # NOTE(mrry): Not using a shape function because we need access to # the `QueueBase` object. # NOTE(fchollet): the code that follow is verbose because it needs to be # compatible with both TF v1 TensorShape behavior and TF v2 behavior. batch_dim = tensor_shape.dimension_value( vals[0].get_shape().with_rank_at_least(1)[0]) batch_dim = tensor_shape.Dimension(batch_dim) for val, shape in zip(vals, self._shapes): val_batch_dim = tensor_shape.dimension_value( val.get_shape().with_rank_at_least(1)[0]) val_batch_dim = tensor_shape.Dimension(val_batch_dim) batch_dim = batch_dim.merge_with(val_batch_dim) val.get_shape()[1:].assert_is_compatible_with(shape) return gen_data_flow_ops.queue_enqueue_many_v2( self._queue_ref, vals, name=scope) def _dequeue_return_value(self, tensors): """Return the value to return from a dequeue op. If the queue has names, return a dictionary with the names as keys. Otherwise return either a single tensor or a list of tensors depending on the length of `tensors`. Args: tensors: List of tensors from the dequeue op. Returns: A single tensor, a list of tensors, or a dictionary of tensors. """ if self._names: # The returned values in `tensors` are in the same order as # the names in `self._names`. return {n: tensors[i] for i, n in enumerate(self._names)} elif len(tensors) == 1: return tensors[0] else: return tensors def dequeue(self, name=None): """Dequeues one element from this queue. If the queue is empty when this operation executes, it will block until there is an element to dequeue. At runtime, this operation may raise an error if the queue is `tf.QueueBase.close` before or during its execution. If the queue is closed, the queue is empty, and there are no pending enqueue operations that can fulfill this request, `tf.errors.OutOfRangeError` will be raised. If the session is `tf.Session.close`, `tf.errors.CancelledError` will be raised. Args: name: A name for the operation (optional). Returns: The tuple of tensors that was dequeued. """ if name is None: name = "%s_Dequeue" % self._name if self._queue_ref.dtype == _dtypes.resource: ret = gen_data_flow_ops.queue_dequeue_v2( self._queue_ref, self._dtypes, name=name) else: ret = gen_data_flow_ops.queue_dequeue( self._queue_ref, self._dtypes, name=name) # NOTE(mrry): Not using a shape function because we need access to # the `QueueBase` object. if not context.executing_eagerly(): op = ret[0].op for output, shape in zip(op.values(), self._shapes): output.set_shape(shape) return self._dequeue_return_value(ret) def dequeue_many(self, n, name=None): """Dequeues and concatenates `n` elements from this queue. This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size `n` in the 0th dimension. If the queue is closed and there are less than `n` elements left, then an `OutOfRange` exception is raised. At runtime, this operation may raise an error if the queue is `tf.QueueBase.close` before or during its execution. If the queue is closed, the queue contains fewer than `n` elements, and there are no pending enqueue operations that can fulfill this request, `tf.errors.OutOfRangeError` will be raised. If the session is `tf.Session.close`, `tf.errors.CancelledError` will be raised. Args: n: A scalar `Tensor` containing the number of elements to dequeue. name: A name for the operation (optional). Returns: The list of concatenated tensors that was dequeued. """ if name is None: name = "%s_DequeueMany" % self._name ret = gen_data_flow_ops.queue_dequeue_many_v2( self._queue_ref, n=n, component_types=self._dtypes, name=name) # NOTE(mrry): Not using a shape function because we need access to # the Queue object. if not context.executing_eagerly(): op = ret[0].op batch_dim = tensor_shape.Dimension( tensor_util.constant_value(op.inputs[1])) for output, shape in zip(op.values(), self._shapes): output.set_shape( tensor_shape.TensorShape([batch_dim]).concatenate(shape)) return self._dequeue_return_value(ret) def dequeue_up_to(self, n, name=None): """Dequeues and concatenates `n` elements from this queue. **Note** This operation is not supported by all queues. If a queue does not support DequeueUpTo, then a `tf.errors.UnimplementedError` is raised. This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. If the queue has not been closed, all of the components in the dequeued tuple will have size `n` in the 0th dimension. If the queue is closed and there are more than `0` but fewer than `n` elements remaining, then instead of raising a `tf.errors.OutOfRangeError` like `tf.QueueBase.dequeue_many`, less than `n` elements are returned immediately. If the queue is closed and there are `0` elements left in the queue, then a `tf.errors.OutOfRangeError` is raised just like in `dequeue_many`. Otherwise the behavior is identical to `dequeue_many`. Args: n: A scalar `Tensor` containing the number of elements to dequeue. name: A name for the operation (optional). Returns: The tuple of concatenated tensors that was dequeued. """ if name is None: name = "%s_DequeueUpTo" % self._name ret = gen_data_flow_ops.queue_dequeue_up_to_v2( self._queue_ref, n=n, component_types=self._dtypes, name=name) # NOTE(mrry): Not using a shape function because we need access to # the Queue object. if not context.executing_eagerly(): op = ret[0].op for output, shape in zip(op.values(), self._shapes): output.set_shape(tensor_shape.TensorShape([None]).concatenate(shape)) return self._dequeue_return_value(ret) def close(self, cancel_pending_enqueues=False, name=None): """Closes this queue. This operation signals that no more elements will be enqueued in the given queue. Subsequent `enqueue` and `enqueue_many` operations will fail. Subsequent `dequeue` and `dequeue_many` operations will continue to succeed if sufficient elements remain in the queue. Subsequently dequeue and dequeue_many operations that would otherwise block waiting for more elements (if close hadn't been called) will now fail immediately. If `cancel_pending_enqueues` is `True`, all pending requests will also be canceled. Args: cancel_pending_enqueues: (Optional.) A boolean, defaulting to `False` (described above). name: A name for the operation (optional). Returns: The operation that closes the queue. """ if name is None: name = "%s_Close" % self._name if self._queue_ref.dtype == _dtypes.resource: return gen_data_flow_ops.queue_close_v2( self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name) else: return gen_data_flow_ops.queue_close( self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name) def is_closed(self, name=None): """Returns true if queue is closed. This operation returns true if the queue is closed and false if the queue is open. Args: name: A name for the operation (optional). Returns: True if the queue is closed and false if the queue is open. """ if name is None: name = "%s_Is_Closed" % self._name if self._queue_ref.dtype == _dtypes.resource: return gen_data_flow_ops.queue_is_closed_v2(self._queue_ref, name=name) else: return gen_data_flow_ops.queue_is_closed_(self._queue_ref, name=name) def size(self, name=None): """Compute the number of elements in this queue. Args: name: A name for the operation (optional). Returns: A scalar tensor containing the number of elements in this queue. """ if name is None: name = "%s_Size" % self._name if self._queue_ref.dtype == _dtypes.resource: return gen_data_flow_ops.queue_size_v2(self._queue_ref, name=name) else: return gen_data_flow_ops.queue_size(self._queue_ref, name=name) def _shared_name(shared_name): if context.executing_eagerly(): return str(ops.uid()) return shared_name @tf_export( "queue.RandomShuffleQueue", v1=["queue.RandomShuffleQueue", "io.RandomShuffleQueue", "RandomShuffleQueue"]) @deprecation.deprecated_endpoints( ["io.RandomShuffleQueue", "RandomShuffleQueue"]) class RandomShuffleQueue(QueueBase): """A queue implementation that dequeues elements in a random order. See `tf.queue.QueueBase` for a description of the methods on this class. """ def __init__(self, capacity, min_after_dequeue, dtypes, shapes=None, names=None, seed=None, shared_name=None, name="random_shuffle_queue"): """Create a queue that dequeues elements in a random order. A `RandomShuffleQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `RandomShuffleQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a queue element must have the respective fixed shape. If it is unspecified, different queue elements may have different shapes, but the use of `dequeue_many` is disallowed. The `min_after_dequeue` argument allows the caller to specify a minimum number of elements that will remain in the queue after a `dequeue` or `dequeue_many` operation completes, to ensure a minimum level of mixing of elements. This invariant is maintained by blocking those operations until sufficient elements have been enqueued. The `min_after_dequeue` argument is ignored after the queue has been closed. Args: capacity: An integer. The upper bound on the number of elements that may be stored in this queue. min_after_dequeue: An integer (described above). dtypes: A list of `DType` objects. The length of `dtypes` must equal the number of tensors in each queue element. shapes: (Optional.) A list of fully-defined `TensorShape` objects with the same length as `dtypes`, or `None`. names: (Optional.) A list of string naming the components in the queue with the same length as `dtypes`, or `None`. If specified the dequeue methods return a dictionary with the names as keys. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. shared_name: (Optional.) If non-empty, this queue will be shared under the given name across multiple sessions. name: Optional name for the queue operation. """ dtypes = _as_type_list(dtypes) shapes = _as_shape_list(shapes, dtypes) names = _as_name_list(names, dtypes) seed1, seed2 = random_seed.get_seed(seed) if seed1 is None and seed2 is None: seed1, seed2 = 0, 0 elif seed is None and shared_name is not None: # This means that graph seed is provided but op seed is not provided. # If shared_name is also provided, make seed2 depend only on the graph # seed and shared_name. (seed2 from get_seed() is generally dependent on # the id of the last op created.) string = (str(seed1) + shared_name).encode("utf-8") seed2 = int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF queue_ref = gen_data_flow_ops.random_shuffle_queue_v2( component_types=dtypes, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed1, seed2=seed2, shared_name=_shared_name(shared_name), name=name) super(RandomShuffleQueue, self).__init__(dtypes, shapes, names, queue_ref) @tf_export("queue.FIFOQueue", v1=["queue.FIFOQueue", "FIFOQueue"]) @deprecation.deprecated_endpoints("FIFOQueue") class FIFOQueue(QueueBase): """A queue implementation that dequeues elements in first-in first-out order. See `tf.queue.QueueBase` for a description of the methods on this class. """ def __init__(self, capacity, dtypes, shapes=None, names=None, shared_name=None, name="fifo_queue"): """Creates a queue that dequeues elements in a first-in first-out order. A `FIFOQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `FIFOQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a queue element must have the respective fixed shape. If it is unspecified, different queue elements may have different shapes, but the use of `dequeue_many` is disallowed. Args: capacity: An integer. The upper bound on the number of elements that may be stored in this queue. dtypes: A list of `DType` objects. The length of `dtypes` must equal the number of tensors in each queue element. shapes: (Optional.) A list of fully-defined `TensorShape` objects with the same length as `dtypes`, or `None`. names: (Optional.) A list of string naming the components in the queue with the same length as `dtypes`, or `None`. If specified the dequeue methods return a dictionary with the names as keys. shared_name: (Optional.) If non-empty, this queue will be shared under the given name across multiple sessions. name: Optional name for the queue operation. """ dtypes = _as_type_list(dtypes) shapes = _as_shape_list(shapes, dtypes) names = _as_name_list(names, dtypes) with ops.init_scope(), ops.device("CPU"): queue_ref = gen_data_flow_ops.fifo_queue_v2( component_types=dtypes, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name) super(FIFOQueue, self).__init__(dtypes, shapes, names, queue_ref) # TODO(allenl): If GPU-compatible queues turn out to be useful, we should # implement GPU kernels for EnqueueMany and DequeueMany so we can make the # public FIFOQueue GPU-compatible and remove this internal version. class GPUCompatibleFIFOQueue(QueueBase): """A queue implementation that dequeues elements in first-in first-out order. GPUCompatibleFIFOQueue is like FIFOQueue, but the queue resource may be placed either on a CPU or on a GPU. It is not cross-device: enqueues and dequeues will be colocated with the queue resource. GPUCompatibleFIFOQueue only supports enqueue and dequeue at the moment, not enqueue_many or dequeue_many. See `tf.queue.QueueBase` for a description of the methods on this class. """ def __init__(self, capacity, dtypes, shapes=None, names=None, shared_name=None, name="fifo_queue"): """Creates a queue that dequeues elements in a first-in first-out order. A `FIFOQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `FIFOQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a queue element must have the respective fixed shape. If it is unspecified, different queue elements may have different shapes, but the use of `dequeue_many` is disallowed. Args: capacity: An integer. The upper bound on the number of elements that may be stored in this queue. dtypes: A list of `DType` objects. The length of `dtypes` must equal the number of tensors in each queue element. shapes: (Optional.) A list of fully-defined `TensorShape` objects with the same length as `dtypes`, or `None`. names: (Optional.) A list of string naming the components in the queue with the same length as `dtypes`, or `None`. If specified the dequeue methods return a dictionary with the names as keys. shared_name: (Optional.) If non-empty, this queue will be shared under the given name across multiple sessions. name: Optional name for the queue operation. """ dtypes = _as_type_list(dtypes) shapes = _as_shape_list(shapes, dtypes) names = _as_name_list(names, dtypes) with ops.init_scope(): queue_ref = gen_data_flow_ops.fifo_queue_v2( component_types=dtypes, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name) super(GPUCompatibleFIFOQueue, self).__init__( dtypes, shapes, names, queue_ref) def enqueue_many(self, vals, name=None): """enqueue_many is not supported on GPUCompatibleFIFOQueue.""" raise NotImplementedError( "GPUCompatibleFIFOQueue does not support enqueue_many or dequeue_many, " "only enqueue and dequeue.") def dequeue_many(self, n, name=None): """dequeue_many is not supported on GPUCompatibleFIFOQueue.""" raise NotImplementedError( "GPUCompatibleFIFOQueue does not support enqueue_many or dequeue_many, " "only enqueue and dequeue.") @tf_export( "queue.PaddingFIFOQueue", v1=["queue.PaddingFIFOQueue", "io.PaddingFIFOQueue", "PaddingFIFOQueue"]) @deprecation.deprecated_endpoints(["io.PaddingFIFOQueue", "PaddingFIFOQueue"]) class PaddingFIFOQueue(QueueBase): """A FIFOQueue that supports batching variable-sized tensors by padding. A `PaddingFIFOQueue` may contain components with dynamic shape, while also supporting `dequeue_many`. See the constructor for more details. See `tf.queue.QueueBase` for a description of the methods on this class. """ def __init__(self, capacity, dtypes, shapes, names=None, shared_name=None, name="padding_fifo_queue"): """Creates a queue that dequeues elements in a first-in first-out order. A `PaddingFIFOQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `PaddingFIFOQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are described by the `shapes` argument. The `shapes` argument must be specified; each component of a queue element must have the respective shape. Shapes of fixed rank but variable size are allowed by setting any shape dimension to None. In this case, the inputs' shape may vary along the given dimension, and `dequeue_many` will pad the given dimension with zeros up to the maximum shape of all elements in the given batch. Args: capacity: An integer. The upper bound on the number of elements that may be stored in this queue. dtypes: A list of `DType` objects. The length of `dtypes` must equal the number of tensors in each queue element. shapes: A list of `TensorShape` objects, with the same length as `dtypes`. Any dimension in the `TensorShape` containing value `None` is dynamic and allows values to be enqueued with variable size in that dimension. names: (Optional.) A list of string naming the components in the queue with the same length as `dtypes`, or `None`. If specified the dequeue methods return a dictionary with the names as keys. shared_name: (Optional.) If non-empty, this queue will be shared under the given name across multiple sessions. name: Optional name for the queue operation. Raises: ValueError: If shapes is not a list of shapes, or the lengths of dtypes and shapes do not match, or if names is specified and the lengths of dtypes and names do not match. """ dtypes = _as_type_list(dtypes) shapes = _as_shape_list(shapes, dtypes, unknown_dim_allowed=True) names = _as_name_list(names, dtypes) if len(dtypes) != len(shapes): raise ValueError("Shapes must be provided for all components, " "but received %d dtypes and %d shapes." % (len(dtypes), len(shapes))) queue_ref = gen_data_flow_ops.padding_fifo_queue_v2( component_types=dtypes, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name) super(PaddingFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref) @tf_export("queue.PriorityQueue", v1=["queue.PriorityQueue", "io.PriorityQueue", "PriorityQueue"]) @deprecation.deprecated_endpoints(["io.PriorityQueue", "PriorityQueue"]) class PriorityQueue(QueueBase): """A queue implementation that dequeues elements in prioritized order. See `tf.queue.QueueBase` for a description of the methods on this class. """ def __init__(self, capacity, types, shapes=None, names=None, shared_name=None, name="priority_queue"): """Creates a queue that dequeues elements in a first-in first-out order. A `PriorityQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `PriorityQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `types`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a queue element must have the respective fixed shape. If it is unspecified, different queue elements may have different shapes, but the use of `dequeue_many` is disallowed. Enqueues and Dequeues to the `PriorityQueue` must include an additional tuple entry at the beginning: the `priority`. The priority must be an int64 scalar (for `enqueue`) or an int64 vector (for `enqueue_many`). Args: capacity: An integer. The upper bound on the number of elements that may be stored in this queue. types: A list of `DType` objects. The length of `types` must equal the number of tensors in each queue element, except the first priority element. The first tensor in each element is the priority, which must be type int64. shapes: (Optional.) A list of fully-defined `TensorShape` objects, with the same length as `types`, or `None`. names: (Optional.) A list of strings naming the components in the queue with the same length as `dtypes`, or `None`. If specified, the dequeue methods return a dictionary with the names as keys. shared_name: (Optional.) If non-empty, this queue will be shared under the given name across multiple sessions. name: Optional name for the queue operation. """ types = _as_type_list(types) shapes = _as_shape_list(shapes, types) queue_ref = gen_data_flow_ops.priority_queue_v2( component_types=types, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name) priority_dtypes = [_dtypes.int64] + types priority_shapes = [()] + shapes if shapes else shapes super(PriorityQueue, self).__init__(priority_dtypes, priority_shapes, names, queue_ref) # TODO(josh11b): class BatchQueue(QueueBase): class Barrier(object): """Represents a key-value map that persists across graph executions.""" def __init__(self, types, shapes=None, shared_name=None, name="barrier"): """Creates a barrier that persists across different graph executions. A barrier represents a key-value map, where each key is a string, and each value is a tuple of tensors. At runtime, the barrier contains 'complete' and 'incomplete' elements. A complete element has defined tensors for all components of its value tuple, and may be accessed using take_many. An incomplete element has some undefined components in its value tuple, and may be updated using insert_many. The barrier call `take_many` outputs values in a particular order. First, it only outputs completed values. Second, the order in which completed values are returned matches the order in which their very first component was inserted into the barrier. So, for example, for this sequence of insertions and removals: barrier = Barrier((tf.string, tf.int32), shapes=((), ())) barrier.insert_many(0, keys=["k1", "k2"], values=["a", "b"]).run() barrier.insert_many(1, keys=["k1"], values=[1]).run() barrier.insert_many(0, keys=["k3"], values=["c"]).run() barrier.insert_many(1, keys=["k3"], values=[3]).run() barrier.insert_many(1, keys=["k2"], values=[2]).run() (indices, keys, values) = barrier.take_many(2) (indices_val, keys_val, values0_val, values1_val) = session.run([indices, keys, values[0], values[1]]) The output will be (up to permutation of "k1" and "k2"): indices_val == (-2**63, -2**63) keys_val == ("k1", "k2") values0_val == ("a", "b") values1_val == (1, 2) Note the key "k2" was inserted into the barrier before "k3". Even though "k3" was completed first, both are complete by the time take_many is called. As a result, "k2" is prioritized and "k1" and "k2" are returned first. "k3" remains in the barrier until the next execution of `take_many`. Since "k1" and "k2" had their first insertions into the barrier together, their indices are the same (-2**63). The index of "k3" will be -2**63 + 1, because it was the next new inserted key. Args: types: A single dtype or a tuple of dtypes, corresponding to the dtypes of the tensor elements that comprise a value in this barrier. shapes: Optional. Constraints on the shapes of tensors in the values: a single tensor shape tuple; a tuple of tensor shape tuples for each barrier-element tuple component; or None if the shape should not be constrained. shared_name: Optional. If non-empty, this barrier will be shared under the given name across multiple sessions. name: Optional name for the barrier op. Raises: ValueError: If one of the `shapes` indicate no elements. """ self._types = _as_type_list(types) if shapes is not None: shapes = _as_shape_list(shapes, self._types) self._shapes = [tensor_shape.TensorShape(s) for s in shapes] for i, shape in enumerate(self._shapes): if shape.num_elements() == 0: raise ValueError("Empty tensors are not supported, but received " "shape '%s' at index %d" % (shape, i)) else: self._shapes = [tensor_shape.unknown_shape() for _ in self._types] self._barrier_ref = gen_data_flow_ops.barrier( component_types=self._types, shapes=self._shapes, shared_name=shared_name, name=name) if context.executing_eagerly(): self._name = context.context().scope_name else: self._name = self._barrier_ref.op.name.split("/")[-1] @property def barrier_ref(self): """Get the underlying barrier reference.""" return self._barrier_ref @property def name(self): """The name of the underlying barrier.""" if context.executing_eagerly(): return self._name return self._barrier_ref.op.name def insert_many(self, component_index, keys, values, name=None): """For each key, assigns the respective value to the specified component. This operation updates each element at component_index. Args: component_index: The component of the value that is being assigned. keys: A vector of keys, with length n. values: An any-dimensional tensor of values, which are associated with the respective keys. The first dimension must have length n. name: Optional name for the op. Returns: The operation that performs the insertion. Raises: InvalidArgumentsError: If inserting keys and values without elements. """ if name is None: name = "%s_BarrierInsertMany" % self._name return gen_data_flow_ops.barrier_insert_many( self._barrier_ref, keys, values, component_index, name=name) def take_many(self, num_elements, allow_small_batch=False, timeout=None, name=None): """Takes the given number of completed elements from this barrier. This operation concatenates completed-element component tensors along the 0th dimension to make a single component tensor. If barrier has no completed elements, this operation will block until there are 'num_elements' elements to take. TODO(b/25743580): the semantics of `allow_small_batch` are experimental and may be extended to other cases in the future. TODO(ebrevdo): If a take_many(allow_small_batch=True) is blocking already when the barrier is closed, it will block for ever. Fix this by using asynchronous operations. Args: num_elements: The number of elements to take. allow_small_batch: If the barrier is closed, don't block if there are less completed elements than requested, but instead return all available completed elements. timeout: This specifies the number of milliseconds to block before returning with DEADLINE_EXCEEDED. (This option is not supported yet.) name: A name for the operation (optional). Returns: A tuple of (index, key, value_list). "index" is a int64 tensor of length num_elements containing the index of the insert_many call for which the very first component of the given element was inserted into the Barrier, starting with the value -2**63. Note, this value is different from the index of the insert_many call for which the element was completed. "key" is a string tensor of length num_elements containing the keys. "value_list" is a tuple of tensors, each one with size num_elements in the 0th dimension for each component in the barrier's values. """ if name is None: name = "%s_BarrierTakeMany" % self._name ret = gen_data_flow_ops.barrier_take_many( self._barrier_ref, num_elements, self._types, allow_small_batch, timeout, name=name) # NOTE(mrry): Not using a shape function because we need access to # the Barrier object. if not context.executing_eagerly(): op = ret[0].op if allow_small_batch: batch_dim = None else: batch_dim = tensor_shape.Dimension( tensor_util.constant_value(op.inputs[1])) op.outputs[0].set_shape(tensor_shape.TensorShape([batch_dim])) # indices op.outputs[1].set_shape(tensor_shape.TensorShape([batch_dim])) # keys for output, shape in zip(op.outputs[2:], self._shapes): # value_list output.set_shape( tensor_shape.TensorShape([batch_dim]).concatenate(shape)) return ret def close(self, cancel_pending_enqueues=False, name=None): """Closes this barrier. This operation signals that no more new key values will be inserted in the given barrier. Subsequent InsertMany operations with new keys will fail. InsertMany operations that just complement already existing keys with other components, will continue to succeed. Subsequent TakeMany operations will continue to succeed if sufficient elements remain in the barrier. Subsequent TakeMany operations that would block will fail immediately. If `cancel_pending_enqueues` is `True`, all pending requests to the underlying queue will also be canceled, and completing of already started values is also not acceptable anymore. Args: cancel_pending_enqueues: (Optional.) A boolean, defaulting to `False` (described above). name: Optional name for the op. Returns: The operation that closes the barrier. """ if name is None: name = "%s_BarrierClose" % self._name return gen_data_flow_ops.barrier_close( self._barrier_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name) def ready_size(self, name=None): """Compute the number of complete elements in the given barrier. Args: name: A name for the operation (optional). Returns: A single-element tensor containing the number of complete elements in the given barrier. """ if name is None: name = "%s_BarrierReadySize" % self._name return gen_data_flow_ops.barrier_ready_size(self._barrier_ref, name=name) def incomplete_size(self, name=None): """Compute the number of incomplete elements in the given barrier. Args: name: A name for the operation (optional). Returns: A single-element tensor containing the number of incomplete elements in the given barrier. """ if name is None: name = "%s_BarrierIncompleteSize" % self._name return gen_data_flow_ops.barrier_incomplete_size( self._barrier_ref, name=name) @tf_export(v1=["ConditionalAccumulatorBase"]) class ConditionalAccumulatorBase(object): """A conditional accumulator for aggregating gradients. Up-to-date gradients (i.e., time step at which gradient was computed is equal to the accumulator's time step) are added to the accumulator. Extraction of the average gradient is blocked until the required number of gradients has been accumulated. """ def __init__(self, dtype, shape, accumulator_ref): """Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. accumulator_ref: A handle to the conditional accumulator, created by sub- classes """ self._dtype = dtype if shape is not None: self._shape = tensor_shape.TensorShape(shape) else: self._shape = tensor_shape.unknown_shape() self._accumulator_ref = accumulator_ref if context.executing_eagerly(): self._name = context.context().scope_name else: self._name = self._accumulator_ref.op.name.split("/")[-1] @property def accumulator_ref(self): """The underlying accumulator reference.""" return self._accumulator_ref @property def name(self): """The name of the underlying accumulator.""" return self._name @property def dtype(self): """The datatype of the gradients accumulated by this accumulator.""" return self._dtype def num_accumulated(self, name=None): """Number of gradients that have currently been aggregated in accumulator. Args: name: Optional name for the operation. Returns: Number of accumulated gradients currently in accumulator. """ if name is None: name = "%s_NumAccumulated" % self._name return gen_data_flow_ops.resource_accumulator_num_accumulated( self._accumulator_ref, name=name) def set_global_step(self, new_global_step, name=None): """Sets the global time step of the accumulator. The operation logs a warning if we attempt to set to a time step that is lower than the accumulator's own time step. Args: new_global_step: Value of new time step. Can be a variable or a constant name: Optional name for the operation. Returns: Operation that sets the accumulator's time step. """ return gen_data_flow_ops.resource_accumulator_set_global_step( self._accumulator_ref, math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64), name=name) @tf_export(v1=["ConditionalAccumulator"]) class ConditionalAccumulator(ConditionalAccumulatorBase): """A conditional accumulator for aggregating gradients. Up-to-date gradients (i.e., time step at which gradient was computed is equal to the accumulator's time step) are added to the accumulator. Extraction of the average gradient is blocked until the required number of gradients has been accumulated. """ def __init__(self, dtype, shape=None, shared_name=None, name="conditional_accumulator", reduction_type="MEAN"): """Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. shared_name: Optional. If non-empty, this accumulator will be shared under the given name across multiple sessions. name: Optional name for the accumulator. reduction_type: Reduction type to use when taking the gradient. """ accumulator_ref = gen_data_flow_ops.resource_conditional_accumulator( dtype=dtype, shape=shape, shared_name=shared_name, name=name, reduction_type=reduction_type) if context.executing_eagerly(): self._resource_deleter = resource_variable_ops.EagerResourceDeleter( handle=accumulator_ref, handle_device=context.context().device_name) super(ConditionalAccumulator, self).__init__(dtype, shape, accumulator_ref) def apply_grad(self, grad, local_step=0, name=None): """Attempts to apply a gradient to the accumulator. The attempt is silently dropped if the gradient is stale, i.e., local_step is less than the accumulator's global time step. Args: grad: The gradient tensor to be applied. local_step: Time step at which the gradient was computed. name: Optional name for the operation. Returns: The operation that (conditionally) applies a gradient to the accumulator. Raises: ValueError: If grad is of the wrong shape """ grad = ops.convert_to_tensor(grad, self._dtype) grad.get_shape().assert_is_compatible_with(self._shape) local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64) return gen_data_flow_ops.resource_accumulator_apply_gradient( self._accumulator_ref, local_step=local_step, gradient=grad, name=name) def take_grad(self, num_required, name=None): """Attempts to extract the average gradient from the accumulator. The operation blocks until sufficient number of gradients have been successfully applied to the accumulator. Once successful, the following actions are also triggered: - Counter of accumulated gradients is reset to 0. - Aggregated gradient is reset to 0 tensor. - Accumulator's internal time step is incremented by 1. Args: num_required: Number of gradients that needs to have been aggregated name: Optional name for the operation Returns: A tensor holding the value of the average gradient. Raises: InvalidArgumentError: If num_required < 1 """ out = gen_data_flow_ops.resource_accumulator_take_gradient( self._accumulator_ref, num_required, dtype=self._dtype, name=name) out.set_shape(self._shape) return out @tf_export( v1=["sparse.SparseConditionalAccumulator", "SparseConditionalAccumulator"]) class SparseConditionalAccumulator(ConditionalAccumulatorBase): """A conditional accumulator for aggregating sparse gradients. Sparse gradients are represented by `IndexedSlices`. Up-to-date gradients (i.e., time step at which gradient was computed is equal to the accumulator's time step) are added to the accumulator. Extraction of the average gradient is blocked until the required number of gradients has been accumulated. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. shared_name: Optional. If non-empty, this accumulator will be shared under the given name across multiple sessions. name: Optional name for the accumulator. reduction_type: Reduction type to use when taking the gradient. """ def __init__(self, dtype, shape=None, shared_name=None, name="sparse_conditional_accumulator", reduction_type="MEAN"): accumulator_ref = gen_data_flow_ops.sparse_conditional_accumulator( dtype=dtype, shape=shape, shared_name=shared_name, name=name, reduction_type=reduction_type) super(SparseConditionalAccumulator, self).__init__(dtype, shape, accumulator_ref) def apply_indexed_slices_grad(self, grad, local_step=0, name=None): """Attempts to apply a gradient to the accumulator. The attempt is silently dropped if the gradient is stale, i.e., `local_step` is less than the accumulator's global time step. Args: grad: The gradient `IndexedSlices` to be applied. local_step: Time step at which the gradient was computed. name: Optional name for the operation. Returns: The operation that (conditionally) applies a gradient to the accumulator. Raises: InvalidArgumentError: If grad is of the wrong shape """ return self.apply_grad( grad_indices=grad.indices, grad_values=grad.values, grad_shape=grad.dense_shape, local_step=local_step, name=name) def apply_grad(self, grad_indices, grad_values, grad_shape=None, local_step=0, name=None): """Attempts to apply a sparse gradient to the accumulator. The attempt is silently dropped if the gradient is stale, i.e., `local_step` is less than the accumulator's global time step. A sparse gradient is represented by its indices, values and possibly empty or None shape. Indices must be a vector representing the locations of non-zero entries in the tensor. Values are the non-zero slices of the gradient, and must have the same first dimension as indices, i.e., the nnz represented by indices and values must be consistent. Shape, if not empty or None, must be consistent with the accumulator's shape (if also provided). Example: A tensor [[0, 0], [0, 1], [2, 3]] can be represented indices: [1,2] values: [[0,1],[2,3]] shape: [3, 2] Args: grad_indices: Indices of the sparse gradient to be applied. grad_values: Values of the sparse gradient to be applied. grad_shape: Shape of the sparse gradient to be applied. local_step: Time step at which the gradient was computed. name: Optional name for the operation. Returns: The operation that (conditionally) applies a gradient to the accumulator. Raises: InvalidArgumentError: If grad is of the wrong shape """ local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64) return gen_data_flow_ops.sparse_accumulator_apply_gradient( self._accumulator_ref, local_step=local_step, gradient_indices=math_ops.cast(grad_indices, _dtypes.int64), gradient_values=grad_values, gradient_shape=math_ops.cast( [] if grad_shape is None else grad_shape, _dtypes.int64), has_known_shape=(grad_shape is not None), name=name) def take_grad(self, num_required, name=None): """Attempts to extract the average gradient from the accumulator. The operation blocks until sufficient number of gradients have been successfully applied to the accumulator. Once successful, the following actions are also triggered: - Counter of accumulated gradients is reset to 0. - Aggregated gradient is reset to 0 tensor. - Accumulator's internal time step is incremented by 1. Args: num_required: Number of gradients that needs to have been aggregated name: Optional name for the operation Returns: A tuple of indices, values, and shape representing the average gradient. Raises: InvalidArgumentError: If `num_required` < 1 """ return gen_data_flow_ops.sparse_accumulator_take_gradient( self._accumulator_ref, num_required, dtype=self._dtype, name=name) def take_indexed_slices_grad(self, num_required, name=None): """Attempts to extract the average gradient from the accumulator. The operation blocks until sufficient number of gradients have been successfully applied to the accumulator. Once successful, the following actions are also triggered: - Counter of accumulated gradients is reset to 0. - Aggregated gradient is reset to 0 tensor. - Accumulator's internal time step is incremented by 1. Args: num_required: Number of gradients that needs to have been aggregated name: Optional name for the operation Returns: An `IndexedSlices` holding the value of the average gradient. Raises: InvalidArgumentError: If `num_required` < 1 """ return_val = gen_data_flow_ops.sparse_accumulator_take_gradient( self._accumulator_ref, num_required, dtype=self._dtype, name=name) return ops.IndexedSlices( indices=return_val.indices, values=return_val.values, dense_shape=return_val.shape) # SparseConditionalAccumulator is not switched to resource. Use old kernels. def num_accumulated(self, name=None): """Number of gradients that have currently been aggregated in accumulator. Args: name: Optional name for the operation. Returns: Number of accumulated gradients currently in accumulator. """ if name is None: name = "%s_NumAccumulated" % self._name return gen_data_flow_ops.accumulator_num_accumulated( self._accumulator_ref, name=name) def set_global_step(self, new_global_step, name=None): """Sets the global time step of the accumulator. The operation logs a warning if we attempt to set to a time step that is lower than the accumulator's own time step. Args: new_global_step: Value of new time step. Can be a variable or a constant name: Optional name for the operation. Returns: Operation that sets the accumulator's time step. """ return gen_data_flow_ops.accumulator_set_global_step( self._accumulator_ref, math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64), name=name) class BaseStagingArea(object): """Base class for Staging Areas.""" _identifier = 0 _lock = threading.Lock() def __init__(self, dtypes, shapes=None, names=None, shared_name=None, capacity=0, memory_limit=0): if shared_name is None: self._name = ( ops.get_default_graph().unique_name(self.__class__.__name__)) elif isinstance(shared_name, six.string_types): self._name = shared_name else: raise ValueError("shared_name must be a string") self._dtypes = dtypes if shapes is not None: if len(shapes) != len(dtypes): raise ValueError("StagingArea shapes must be the same length as dtypes") self._shapes = [tensor_shape.TensorShape(s) for s in shapes] else: self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes] if names is not None: if len(names) != len(dtypes): raise ValueError("StagingArea names must be the same length as dtypes") self._names = names else: self._names = None self._capacity = capacity self._memory_limit = memory_limit # all get and put ops must colocate with this op with ops.name_scope("%s_root" % self._name): self._coloc_op = control_flow_ops.no_op() @property def name(self): """The name of the staging area.""" return self._name @property def dtypes(self): """The list of dtypes for each component of a staging area element.""" return self._dtypes @property def shapes(self): """The list of shapes for each component of a staging area element.""" return self._shapes @property def names(self): """The list of names for each component of a staging area element.""" return self._names @property def capacity(self): """The maximum number of elements of this staging area.""" return self._capacity @property def memory_limit(self): """The maximum number of bytes of this staging area.""" return self._memory_limit def _check_put_dtypes(self, vals, indices=None): """Validate and convert `vals` to a list of `Tensor`s. The `vals` argument can be a Tensor, a list or tuple of tensors, or a dictionary with tensor values. If `vals` is a list, then the appropriate indices associated with the values must be provided. If it is a dictionary, the staging area must have been constructed with a `names` attribute and the dictionary keys must match the staging area names. `indices` will be inferred from the dictionary keys. If the staging area was constructed with a `names` attribute, `vals` must be a dictionary. Checks that the dtype and shape of each value matches that of the staging area. Args: vals: A tensor, a list or tuple of tensors, or a dictionary. Returns: A (tensors, indices) tuple where `tensors` is a list of `Tensor` objects and `indices` is a list of indices associated with the tensors. Raises: ValueError: If `vals` or `indices` is invalid. """ if isinstance(vals, dict): if not self._names: raise ValueError( "Staging areas must have names to enqueue a dictionary") if not set(vals.keys()).issubset(self._names): raise ValueError("Keys in dictionary to put do not match names " "of staging area. Dictionary: (%s), Queue: (%s)" % (sorted(vals.keys()), sorted(self._names))) # The order of values in `self._names` indicates the order in which the # tensors in the dictionary `vals` must be listed. vals, indices, _ = zip(*[(vals[k], i, k) for i, k in enumerate(self._names) if k in vals]) else: if self._names: raise ValueError("You must enqueue a dictionary in a staging area " "with names") if indices is None: raise ValueError("Indices must be supplied when inserting a list " "of tensors") if len(indices) != len(vals): raise ValueError("Number of indices '%s' doesn't match " "number of values '%s'") if not isinstance(vals, (list, tuple)): vals = [vals] indices = [0] # Sanity check number of values if not len(vals) <= len(self._dtypes): raise ValueError("Unexpected number of inputs '%s' vs '%s'" % (len(vals), len(self._dtypes))) tensors = [] for val, i in zip(vals, indices): dtype, shape = self._dtypes[i], self._shapes[i] # Check dtype if val.dtype != dtype: raise ValueError("Datatypes do not match. '%s' != '%s'" % (str(val.dtype), str(dtype))) # Check shape val.get_shape().assert_is_compatible_with(shape) tensors.append( ops.convert_to_tensor(val, dtype=dtype, name="component_%d" % i)) return tensors, indices def _create_device_transfers(self, tensors): """Encode inter-device transfers if the current device is not the same as the Staging Area's device. """ if not isinstance(tensors, (tuple, list)): tensors = [tensors] curr_device_scope = control_flow_ops.no_op().device if curr_device_scope != self._coloc_op.device: tensors = [array_ops.identity(t) for t in tensors] return tensors def _get_return_value(self, tensors, indices): """Return the value to return from a get op. If the staging area has names, return a dictionary with the names as keys. Otherwise return either a single tensor or a list of tensors depending on the length of `tensors`. Args: tensors: List of tensors from the get op. indices: Indices of associated names and shapes Returns: A single tensor, a list of tensors, or a dictionary of tensors. """ tensors = self._create_device_transfers(tensors) # Sets shape for output, i in zip(tensors, indices): output.set_shape(self._shapes[i]) if self._names: # The returned values in `tensors` are in the same order as # the names in `self._names`. return {self._names[i]: t for t, i in zip(tensors, indices)} return tensors def _scope_vals(self, vals): """Return a list of values to pass to `name_scope()`. Args: vals: A tensor, a list or tuple of tensors, or a dictionary. Returns: The values in vals as a list. """ if isinstance(vals, (list, tuple)): return vals elif isinstance(vals, dict): return vals.values() else: return [vals] class StagingArea(BaseStagingArea): """Class for staging inputs. No ordering guarantees. A `StagingArea` is a TensorFlow data structure that stores tensors across multiple steps, and exposes operations that can put and get tensors. Each `StagingArea` element is a tuple of one or more tensors, where each tuple component has a static dtype, and may have a static shape. The capacity of a `StagingArea` may be bounded or unbounded. It supports multiple concurrent producers and consumers; and provides exactly-once delivery. Each element of a `StagingArea` is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a staging area element must have the respective fixed shape. If it is unspecified, different elements may have different shapes, It can be configured with a capacity in which case put(values) will block until space becomes available. Similarly, it can be configured with a memory limit which will block put(values) until space is available. This is mostly useful for limiting the number of tensors on devices such as GPUs. All get() and peek() commands block if the requested data is not present in the Staging Area. """ def __init__(self, dtypes, shapes=None, names=None, shared_name=None, capacity=0, memory_limit=0): """Constructs a staging area object. The two optional lists, `shapes` and `names`, must be of the same length as `dtypes` if provided. The values at a given index `i` indicate the shape and name to use for the corresponding queue component in `dtypes`. The device scope at the time of object creation determines where the storage for the `StagingArea` will reside. Calls to `put` will incur a copy to this memory space, if necessary. Tensors returned by `get` will be placed according to the device scope when `get` is called. Args: dtypes: A list of types. The length of dtypes must equal the number of tensors in each element. shapes: (Optional.) Constraints on the shapes of tensors in an element. A list of shape tuples or None. This list is the same length as dtypes. If the shape of any tensors in the element are constrained, all must be; shapes can be None if the shapes should not be constrained. names: (Optional.) If provided, the `get()` and `put()` methods will use dictionaries with these names as keys. Must be None or a list or tuple of the same length as `dtypes`. shared_name: (Optional.) A name to be used for the shared object. By passing the same name to two different python objects they will share the underlying staging area. Must be a string. capacity: (Optional.) Maximum number of elements. An integer. If zero, the Staging Area is unbounded memory_limit: (Optional.) Maximum number of bytes of all tensors in the Staging Area. An integer. If zero, the Staging Area is unbounded Raises: ValueError: If one of the arguments is invalid. """ super(StagingArea, self).__init__(dtypes, shapes, names, shared_name, capacity, memory_limit) def put(self, values, name=None): """Create an op that places a value into the staging area. This operation will block if the `StagingArea` has reached its capacity. Args: values: A single tensor, a list or tuple of tensors, or a dictionary with tensor values. The number of elements must match the length of the list provided to the dtypes argument when creating the StagingArea. name: A name for the operation (optional). Returns: The created op. Raises: ValueError: If the number or type of inputs don't match the staging area. """ with ops.name_scope(name, "%s_put" % self._name, self._scope_vals(values)) as scope: if not isinstance(values, (list, tuple, dict)): values = [values] # Hard-code indices for this staging area indices = list(six.moves.range(len(values))) vals, _ = self._check_put_dtypes(values, indices) with ops.colocate_with(self._coloc_op): op = gen_data_flow_ops.stage( values=vals, shared_name=self._name, name=scope, capacity=self._capacity, memory_limit=self._memory_limit) return op def __internal_get(self, get_fn, name): with ops.colocate_with(self._coloc_op): ret = get_fn() indices = list(six.moves.range(len(self._dtypes))) # Hard coded return self._get_return_value(ret, indices) def get(self, name=None): """Gets one element from this staging area. If the staging area is empty when this operation executes, it will block until there is an element to dequeue. Note that unlike others ops that can block, like the queue Dequeue operations, this can stop other work from happening. To avoid this, the intended use is for this to be called only when there will be an element already available. One method for doing this in a training loop would be to run a `put()` call during a warmup session.run call, and then call both `get()` and `put()` in each subsequent step. The placement of the returned tensor will be determined by the current device scope when this function is called. Args: name: A name for the operation (optional). Returns: The tuple of tensors that was gotten. """ if name is None: name = "%s_get" % self._name # pylint: disable=bad-continuation fn = lambda: gen_data_flow_ops.unstage(dtypes=self._dtypes, shared_name=self._name, name=name, capacity=self._capacity, memory_limit=self._memory_limit) # pylint: enable=bad-continuation return self.__internal_get(fn, name) def peek(self, index, name=None): """Peeks at an element in the staging area. If the staging area is too small to contain the element at the specified index, it will block until enough elements are inserted to complete the operation. The placement of the returned tensor will be determined by the current device scope when this function is called. Args: index: The index of the tensor within the staging area to look up. name: A name for the operation (optional). Returns: The tuple of tensors that was gotten. """ if name is None: name = "%s_peek" % self._name # pylint: disable=bad-continuation fn = lambda: gen_data_flow_ops.stage_peek(index, dtypes=self._dtypes, shared_name=self._name, name=name, capacity=self._capacity, memory_limit=self._memory_limit) # pylint: enable=bad-continuation return self.__internal_get(fn, name) def size(self, name=None): """Returns the number of elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_size" % self._name return gen_data_flow_ops.stage_size( name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit) def clear(self, name=None): """Clears the staging area. Args: name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_clear" % self._name return gen_data_flow_ops.stage_clear( name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit) class MapStagingArea(BaseStagingArea): """A `MapStagingArea` is a TensorFlow data structure that stores tensors across multiple steps, and exposes operations that can put and get tensors. Each `MapStagingArea` element is a (key, value) pair. Only int64 keys are supported, other types should be hashed to produce a key. Values are a tuple of one or more tensors. Each tuple component has a static dtype, and may have a static shape. The capacity of a `MapStagingArea` may be bounded or unbounded. It supports multiple concurrent producers and consumers; and provides exactly-once delivery. Each value tuple of a `MapStagingArea` is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a staging area element must have the respective fixed shape. If it is unspecified, different elements may have different shapes, It behaves like an associative container with support for: - put(key, values) - peek(key) like dict.get(key) - get(key) like dict.pop(key) - get(key=None) like dict.popitem() - size() - clear() If ordered a tree structure ordered by key will be used and get(key=None) will remove (key, value) pairs in increasing key order. Otherwise a hashtable It can be configured with a capacity in which case put(key, values) will block until space becomes available. Similarly, it can be configured with a memory limit which will block put(key, values) until space is available. This is mostly useful for limiting the number of tensors on devices such as GPUs. All get() and peek() commands block if the requested (key, value) pair is not present in the staging area. Partial puts are supported and will be placed in an incomplete map until such time as all values associated with the key have been inserted. Once completed, this (key, value) pair will be inserted into the map. Data in the incomplete map counts towards the memory limit, but not towards capacity limit. Partial gets from the map are also supported. This removes the partially requested tensors from the entry, but the entry is only removed from the map once all tensors associated with it are removed. """ def __init__(self, dtypes, shapes=None, names=None, shared_name=None, ordered=False, capacity=0, memory_limit=0): """Args: dtypes: A list of types. The length of dtypes must equal the number of tensors in each element. capacity: (Optional.) Maximum number of elements. An integer. If zero, the Staging Area is unbounded memory_limit: (Optional.) Maximum number of bytes of all tensors in the Staging Area (excluding keys). An integer. If zero, the Staging Area is unbounded ordered: (Optional.) If True the underlying data structure is a tree ordered on key. Otherwise assume a hashtable. shapes: (Optional.) Constraints on the shapes of tensors in an element. A list of shape tuples or None. This list is the same length as dtypes. If the shape of any tensors in the element are constrained, all must be; shapes can be None if the shapes should not be constrained. names: (Optional.) If provided, the `get()` and `put()` methods will use dictionaries with these names as keys. Must be None or a list or tuple of the same length as `dtypes`. shared_name: (Optional.) A name to be used for the shared object. By passing the same name to two different python objects they will share the underlying staging area. Must be a string. Raises: ValueError: If one of the arguments is invalid. """ super(MapStagingArea, self).__init__(dtypes, shapes, names, shared_name, capacity, memory_limit) # Defer to different methods depending if the map is ordered self._ordered = ordered if ordered: self._put_fn = gen_data_flow_ops.ordered_map_stage self._pop_fn = gen_data_flow_ops.ordered_map_unstage self._popitem_fn = gen_data_flow_ops.ordered_map_unstage_no_key self._peek_fn = gen_data_flow_ops.ordered_map_peek self._size_fn = gen_data_flow_ops.ordered_map_size self._incomplete_size_fn = gen_data_flow_ops.ordered_map_incomplete_size self._clear_fn = gen_data_flow_ops.ordered_map_clear else: self._put_fn = gen_data_flow_ops.map_stage self._pop_fn = gen_data_flow_ops.map_unstage self._popitem_fn = gen_data_flow_ops.map_unstage_no_key self._peek_fn = gen_data_flow_ops.map_peek self._size_fn = gen_data_flow_ops.map_size self._incomplete_size_fn = gen_data_flow_ops.map_incomplete_size self._clear_fn = gen_data_flow_ops.map_clear def put(self, key, vals, indices=None, name=None): """Create an op that stores the (key, vals) pair in the staging area. Incomplete puts are possible, preferably using a dictionary for vals as the appropriate dtypes and shapes can be inferred from the value names dictionary key values. If vals is a list or tuple, indices must also be specified so that the op knows at which element position to perform the insert. This operation will block if the capacity or memory limit of this container is reached. Args: key: Key associated with the data vals: Tensor (or a dict/tuple of Tensors) to place into the staging area. indices: (Optional) if vals is a tuple/list, this is required. name: A name for the operation (optional) Returns: The created op Raises: ValueError: If the number or type of inputs don't match the staging area. """ with ops.name_scope(name, "%s_put" % self._name, self._scope_vals(vals)) as scope: vals, indices = self._check_put_dtypes(vals, indices) with ops.colocate_with(self._coloc_op): op = self._put_fn( key, indices, vals, dtypes=self._dtypes, shared_name=self._name, name=scope, capacity=self._capacity, memory_limit=self._memory_limit) return op def _get_indices_and_dtypes(self, indices=None): if indices is None: indices = list(six.moves.range(len(self._dtypes))) if not isinstance(indices, (tuple, list)): raise TypeError("Invalid indices type '%s'" % type(indices)) if len(indices) == 0: raise ValueError("Empty indices") if all(isinstance(i, str) for i in indices): if self._names is None: raise ValueError("String indices provided '%s', but this Staging Area " "was not created with names." % indices) try: indices = [self._names.index(n) for n in indices] except ValueError: raise ValueError("Named index '%s' not in " "Staging Area names '%s'" % (n, self._names)) elif all(isinstance(i, int) for i in indices): pass else: raise TypeError("Mixed types in indices '%s'. " "May only be str or int" % indices) dtypes = [self._dtypes[i] for i in indices] return indices, dtypes def peek(self, key, indices=None, name=None): """Peeks at staging area data associated with the key. If the key is not in the staging area, it will block until the associated (key, value) is inserted. Args: key: Key associated with the required data indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_pop" % self._name indices, dtypes = self._get_indices_and_dtypes(indices) with ops.colocate_with(self._coloc_op): result = self._peek_fn( key, shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit) return self._get_return_value(result, indices) def get(self, key=None, indices=None, name=None): """If the key is provided, the associated (key, value) is returned from the staging area. If the key is not in the staging area, this method will block until the associated (key, value) is inserted. If no key is provided and the staging area is ordered, the (key, value) with the smallest key will be returned. Otherwise, a random (key, value) will be returned. If the staging area is empty when this operation executes, it will block until there is an element to dequeue. Args: key: Key associated with the required data (Optional) indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op """ if key is None: return self._popitem(indices=indices, name=name) else: return self._pop(key, indices=indices, name=name) def _pop(self, key, indices=None, name=None): """Remove and return the associated (key, value) is returned from the staging area. If the key is not in the staging area, this method will block until the associated (key, value) is inserted. Args: key: Key associated with the required data indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_get" % self._name indices, dtypes = self._get_indices_and_dtypes(indices) with ops.colocate_with(self._coloc_op): result = self._pop_fn( key, shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit) return key, self._get_return_value(result, indices) def _popitem(self, indices=None, name=None): """If the staging area is ordered, the (key, value) with the smallest key will be returned. Otherwise, a random (key, value) will be returned. If the staging area is empty when this operation executes, it will block until there is an element to dequeue. Args: key: Key associated with the required data indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_get_nokey" % self._name indices, dtypes = self._get_indices_and_dtypes(indices) with ops.colocate_with(self._coloc_op): key, result = self._popitem_fn( shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit) # Separate keys and results out from # underlying namedtuple key = self._create_device_transfers(key)[0] result = self._get_return_value(result, indices) return key, result def size(self, name=None): """Returns the number of elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_size" % self._name return self._size_fn( shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit) def incomplete_size(self, name=None): """Returns the number of incomplete elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_incomplete_size" % self._name return self._incomplete_size_fn( shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit) def clear(self, name=None): """Clears the staging area. Args: name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_clear" % self._name return self._clear_fn( shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit) class RecordInput(object): """RecordInput asynchronously reads and randomly yields TFRecords. A RecordInput Op will continuously read a batch of records asynchronously into a buffer of some fixed capacity. It can also asynchronously yield random records from this buffer. It will not start yielding until at least `buffer_size / 2` elements have been placed into the buffer so that sufficient randomization can take place. The order the files are read will be shifted each epoch by `shift_amount` so that the data is presented in a different order every epoch. """ def __init__(self, file_pattern, batch_size=1, buffer_size=1, parallelism=1, shift_ratio=0, seed=0, name=None, batches=None, compression_type=None): """Constructs a RecordInput Op. Args: file_pattern: File path to the dataset, possibly containing wildcards. All matching files will be iterated over each epoch. batch_size: How many records to return at a time. buffer_size: The maximum number of records the buffer will contain. parallelism: How many reader threads to use for reading from files. shift_ratio: What percentage of the total number files to move the start file forward by each epoch. seed: Specify the random number seed used by generator that randomizes records. name: Optional name for the operation. batches: None by default, creating a single batch op. Otherwise specifies how many batches to create, which are returned as a list when `get_yield_op()` is called. An example use case is to split processing between devices on one computer. compression_type: The type of compression for the file. Currently ZLIB and GZIP are supported. Defaults to none. Raises: ValueError: If one of the arguments is invalid. """ self._batch_size = batch_size if batches is not None: self._batch_size *= batches self._batches = batches self._file_pattern = file_pattern self._buffer_size = buffer_size self._parallelism = parallelism self._shift_ratio = shift_ratio self._seed = seed self._name = name self._compression_type = python_io.TFRecordCompressionType.NONE if compression_type is not None: self._compression_type = compression_type def get_yield_op(self): """Adds a node that yields a group of records every time it is executed. If RecordInput `batches` parameter is not None, it yields a list of record batches with the specified `batch_size`. """ compression_type = python_io.TFRecordOptions.get_compression_type_string( python_io.TFRecordOptions(self._compression_type)) records = gen_data_flow_ops.record_input( file_pattern=self._file_pattern, file_buffer_size=self._buffer_size, file_parallelism=self._parallelism, file_shuffle_shift_ratio=self._shift_ratio, batch_size=self._batch_size, file_random_seed=self._seed, compression_type=compression_type, name=self._name) if self._batches is None: return records else: with ops.name_scope(self._name): batch_list = [[] for _ in six.moves.range(self._batches)] records = array_ops.split(records, self._batch_size, 0) for index, protobuf in enumerate(records): batch_index = index % self._batches batch_list[batch_index].append(array_ops.reshape(protobuf, [])) return batch_list
apache-2.0
iulian787/spack
var/spack/repos/builtin/packages/memkind/package.py
1
2407
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * import os class Memkind(AutotoolsPackage): """The memkind library is a user extensible heap manager built on top of jemalloc which enables control of memory characteristics and a partitioning of the heap between kinds of memory. The kinds of memory are defined by operating system memory policies that have been applied to virtual address ranges. Memory characteristics supported by memkind without user extension include control of NUMA and page size features. The jemalloc non-standard interface has been extended to enable specialized arenas to make requests for virtual memory from the operating system through the memkind partition interface. Through the other memkind interfaces the user can control and extend memory partition features and allocate memory while selecting enabled features.""" homepage = "https://github.com/memkind/memkind" url = "https://github.com/memkind/memkind/archive/v1.7.0.tar.gz" version('1.10.1', sha256='c203615d964a0bb151756ad8a5c9565391ee77d79c1f8b59d2ea8ff87989b294') version('1.10.0', sha256='0399a1d6a179d065cdbc59bb687fcd00d09dfbb1789334acfdf7431a48707d26') version('1.9.0', sha256='491f21c8d09904ad23700c755b9134fbed08bf227506c2fde135429688158b84') version('1.8.0', sha256='8b57c5afa8afa6793e4662322e37620bbb11f119cd8d29654ec00945bbe13a17') version('1.7.0', sha256='5048eaaa1bc484203c685a019f3f428ab6c9b1cf94ef6d264e299bc0127ec572') depends_on('autoconf', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('m4', type='build') depends_on('numactl') phases = ['build_jemalloc', 'autoreconf', 'configure', 'build', 'install'] def patch(self): with open('VERSION', 'w') as version_file: version_file.write('{0}\n'.format(self.version)) def build_jemalloc(self, spec, prefix): if os.path.exists('build_jemalloc.sh'): bash = which('bash') bash('./build_jemalloc.sh') def autoreconf(self, spec, prefix): if os.path.exists('autogen.sh'): bash = which('bash') bash('./autogen.sh')
lgpl-2.1
pronexo-odoo/odoo-argentina
l10n_ar_account_check_duo/wizard_third/ticket_check_deposit.py
1
7776
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2012 OpenERP - Team de Localización Argentina. # https://launchpad.net/~openerp-l10n-ar-localization # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import osv, fields from tools.translate import _ import netsvc import logging _logger = logging.getLogger(__name__) class wizard_ticket_deposit(osv.osv_memory): _name = 'wizard.ticket.deposit' _columns = { 'name':fields.char('Ticket deposited number', size=30), 'bank_account_id': fields.many2one('res.partner.bank', 'Bank Account',required=True), 'date': fields.date('Deposit Date',required=True), 'total_amount':fields.float("Total Amount",readonly=True), 'ticket_deposit':fields.many2one('ticket.deposit',string='Ticket Deposit'), } def default_get(self, cr, uid, fields, context=None): amount_total=0.00 one_time= True partnerid= {} values = super(wizard_ticket_deposit, self).default_get(cr, uid, fields, context=context) third_check = self.pool.get('account.third.check') if context is None: context = {} record_ids = context.get('active_ids', []) check_objs = third_check.browse(cr, uid, record_ids, context=context) for check in check_objs: if check.state == 'holding': amount_total += check.amount else: raise osv.except_osv('Check %s selected error' % (check.number), 'The selected checks must to be in the holding.' ) values.update({'total_amount': amount_total}) return values def action_ticket_deposit(self, cr, uid, ids, context=None): third_check_obj = self.pool.get('account.third.check') wf_service = netsvc.LocalService('workflow') ticket_obj = self.pool.get('ticket.deposit') ticket_line_obj = self.pool.get('ticket.deposit.line') move_line = self.pool.get('account.move.line') wizard = self.browse(cr, uid, ids[0], context=context) wizard_ids = [wizard.id] period_id = self.pool.get('account.period').find(cr, uid, wizard.date)[0] if context is None: context = {} record_ids = context.get('active_ids', []) check_ids = third_check_obj.browse(cr, uid, record_ids, context=context) for wizid in wizard_ids: pay = self.browse(cr, uid, wizid, context=context) #creo el ticket ticket_obj.create(cr, uid, { 'name': pay.name, 'bank_account_id': wizard.bank_account_id.id, 'date': pay.date, 'total_ammount': pay.total_amount, }) #busco el id del receipt id_ticket_dep= ticket_obj.search(cr, uid, [('name','=',pay.name)],context=context) for third_check in third_check_obj.browse(cr, uid, record_ids, context=context): third_check_obj.write(cr, uid, third_check.id, { 'ticket_deposit_id': id_ticket_dep[0], }) ticket_line_obj.create(cr, uid, { 'ticket_deposit_id': id_ticket_dep[0], 'account_third_check_id': third_check.id, 'name': 'Activo', }) id_receipt= ticket_obj.search(cr, uid, [('name','=',pay.name)],context=context) self.write(cr, uid, wizid, { 'ticket_deposit': id_receipt[0], }) for check in check_ids: if not (check.voucher_id.journal_id.default_credit_account_id.id or check.voucher_id.journal_id.default_debit_account_id.id): raise osv.except_osv('Journal %s selected error' % (check.voucher_id.journal_id.id), 'The journal must to be created defaults account for debit and credit.' ) if not wizard.bank_account_id.account_id.id: raise osv.except_osv(' %s selected error' % (wizard.bank_account_id.bank.name), 'The account must to be created in The Company Bank / Accounting Information.' ) if check.state != 'holding': raise osv.except_osv('Check %s selected error' % (check.number), 'The selected checks must to be in the holding.' ) else: name = self.pool.get('ir.sequence').next_by_id(cr, uid, check.voucher_id.journal_id.sequence_id.id, context=context) move_id = self.pool.get('account.move').create(cr, uid, { 'name': name, 'journal_id': check.voucher_id.journal_id.id, 'state': 'draft', 'period_id': period_id, 'date': wizard.date, 'ref': 'Check Deposit Nr. ' + check.number, }) move_line.create(cr, uid, { 'name': name, 'centralisation': 'normal', 'account_id': wizard.bank_account_id.account_id.id, 'move_id': move_id, 'journal_id': check.voucher_id.journal_id.id, 'period_id': period_id, 'debit': check.amount, 'credit': 0.0, 'ref': 'Check Deposit Nr. ' + check.number, 'state': 'valid', }) move_line.create(cr, uid, { 'name': name, 'centralisation': 'normal', 'account_id': check.voucher_id.journal_id.default_credit_account_id.id, 'move_id': move_id, 'journal_id': check.voucher_id.journal_id.id, 'period_id': period_id, 'debit': 0.0, 'credit': check.amount, 'ref': 'Check Deposit Nr. ' + check.number, 'state': 'valid', }) check.write({'account_bank_id': wizard.bank_account_id.id}) wf_service.trg_validate(uid, 'account.third.check', check.id,'holding_deposited', cr) self.pool.get('account.move').write(cr, uid, [move_id], {'state': 'posted',}) return {} wizard_ticket_deposit()
agpl-3.0
kevinlee12/oppia
core/platform/transactions/gae_transaction_services.py
2
2181
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides a seam for transaction services.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from google.appengine.ext import ndb def run_in_transaction(fn, *args, **kwargs): """Runs a function in a transaction. Either all of the operations in the transaction are applied, or none of them are applied. If an exception is raised, the transaction is likely not safe to commit, since TransactionOptions.ALLOWED is used. Args: fn: callable. A function (or callable) to be called. *args: list(*). Variable length argument list passed to the callable. **kwargs: *. Arbitrary keyword arguments passed to the callable. Returns: *. Whatever fn() returns. Raises: Exception. Whatever fn() raises. datastore_errors.TransactionFailedError. The transaction failed. """ return ndb.transaction( lambda: fn(*args, **kwargs), xg=True, propagation=ndb.TransactionOptions.ALLOWED, ) def toplevel_wrapper(*args, **kwargs): """Enables a WSGI application to not exit until all its asynchronous requests have finished. For more information, see https://developers.google.com/appengine/docs/python/ndb/async#intro Args: *args: list(*). Variable length argument list. **kwargs: *. Arbitrary keyword arguments. Returns: app. The entire app toplevel. """ return ndb.toplevel(*args, **kwargs)
apache-2.0
PandaWei/tp-libvirt
libvirt/tests/src/virsh_cmd/domain/virsh_migrate_compcache.py
1
5964
import logging import subprocess import time from autotest.client.shared import error from autotest.client.shared import utils from autotest.client.shared import ssh_key from virttest import virsh, utils_misc from virttest.utils_test import libvirt as utlv def get_page_size(): """ Get the current memory page size using getconf. If getconf doesn't exist, assume it's 4096. :return: An integer of current page size bytes. """ try: getconf_path = utils_misc.find_command('getconf') return int(utils.run(getconf_path + ' PAGESIZE').stdout) except ValueError: logging.warning('getconf not found! Assuming 4K for PAGESIZE') return 4096 def run(test, params, env): """ Test command: migrate-compcache <domain> [--size <number>] 1) Run migrate-compcache command and check return code. """ vm_ref = params.get("vm_ref", "name") vm_name = params.get('main_vm') start_vm = 'yes' == params.get('start_vm', 'yes') pause_vm = 'yes' == params.get('pause_after_start_vm', 'no') expect_succeed = 'yes' == params.get('expect_succeed', 'yes') size_option = params.get('size_option', 'valid') action = params.get('compcache_action', 'get') vm = env.get_vm(vm_name) # Check if the virsh command migrate-compcache is available if not virsh.has_help_command('migrate-compcache'): raise error.TestNAError("This version of libvirt does not support " "virsh command migrate-compcache") # Prepare the VM state if it's not correct. if start_vm and not vm.is_alive(): vm.start() elif not start_vm and vm.is_alive(): vm.destroy() if pause_vm and not vm.is_paused(): vm.pause() # Setup domain reference if vm_ref == 'domname': vm_ref = vm_name # Setup size according to size_option: # minimal: Same as memory page size # maximal: Same as guest memory # empty: An empty string # small: One byte less than page size # large: Larger than guest memory # huge : Largest int64 page_size = get_page_size() if size_option == 'minimal': size = str(page_size) elif size_option == 'maximal': size = str(vm.get_max_mem() * 1024) elif size_option == 'empty': size = '""' elif size_option == 'small': size = str(page_size - 1) elif size_option == 'large': # Guest memory is larger than the max mem set, # add 50MB to ensure size exceeds guest memory. size = str(vm.get_max_mem() * 1024 + 50000000) elif size_option == 'huge': size = str(2 ** 64 - 1) else: size = size_option # If we need to get, just omit the size option if action == 'get': size = None # Run testing command result = virsh.migrate_compcache(vm_ref, size=size) logging.debug(result) remote_uri = params.get("jobabort_remote_uri") remote_host = params.get("migrate_dest_host") remote_user = params.get("migrate_dest_user", "root") remote_pwd = params.get("migrate_dest_pwd") check_job_compcache = False if not remote_host.count("EXAMPLE") and size is not None and expect_succeed: # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22) if vm.is_dead(): vm.start() if vm.is_paused(): vm.resume() vm.wait_for_login() # Do actual migration to verify compression cache of migrate jobs command = "virsh migrate %s %s --compressed" % (vm_name, remote_uri) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give enough time for starting job t = 0 while t < 5: jobtype = vm.get_job_type() if "None" == jobtype: t += 1 time.sleep(1) continue elif jobtype is False: logging.error("Get job type failed.") break else: logging.debug("Job started: %s", jobtype) break jobinfo = virsh.domjobinfo(vm_ref, debug=True, ignore_status=True).stdout check_job_compcache = True if p.poll(): try: p.kill() except OSError: pass # Cleanup in case of successful migration utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri) # Shut down the VM to make sure the compcache setting cleared if vm.is_alive(): vm.destroy() # Check test result if expect_succeed: if result.exit_status != 0: raise error.TestFail( 'Expected succeed, but failed with result:\n%s' % result) if check_job_compcache: for line in jobinfo.splitlines(): detail = line.split(":") if detail[0].count("Compression cache"): value = detail[-1].split()[0].strip() value = int(float(value)) unit = detail[-1].split()[-1].strip() if unit == "KiB": size = int(int(size) / 1024) elif unit == "MiB": size = int(int(size) / 1048576) elif unit == "GiB": size = int(int(size) / 1073741824) if value != size: raise error.TestFail("Compression cache is not match" " with setted") else: return raise error.TestFail("Get compression cahce in job failed.") elif not expect_succeed: if result.exit_status == 0: raise error.TestFail( 'Expected fail, but succeed with result:\n%s' % result)
gpl-2.0
Suwmlee/XX-Net
lib/win32/pycparser/ply/cpp.py
192
33040
# ----------------------------------------------------------------------------- # cpp.py # # Author: David Beazley (http://www.dabeaz.com) # Copyright (C) 2007 # All rights reserved # # This module implements an ANSI-C style lexical preprocessor for PLY. # ----------------------------------------------------------------------------- from __future__ import generators # ----------------------------------------------------------------------------- # Default preprocessor lexer definitions. These tokens are enough to get # a basic preprocessor working. Other modules may import these if they want # ----------------------------------------------------------------------------- tokens = ( 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT', 'CPP_POUND','CPP_DPOUND' ) literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\"" # Whitespace def t_CPP_WS(t): r'\s+' t.lexer.lineno += t.value.count("\n") return t t_CPP_POUND = r'\#' t_CPP_DPOUND = r'\#\#' # Identifier t_CPP_ID = r'[A-Za-z_][\w_]*' # Integer literal def CPP_INTEGER(t): r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU]|[lL]|[uU][lL]|[lL][uU])?)' return t t_CPP_INTEGER = CPP_INTEGER # Floating literal t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' # String literal def t_CPP_STRING(t): r'\"([^\\\n]|(\\(.|\n)))*?\"' t.lexer.lineno += t.value.count("\n") return t # Character constant 'c' or L'c' def t_CPP_CHAR(t): r'(L)?\'([^\\\n]|(\\(.|\n)))*?\'' t.lexer.lineno += t.value.count("\n") return t # Comment def t_CPP_COMMENT(t): r'(/\*(.|\n)*?\*/)|(//.*?\n)' t.lexer.lineno += t.value.count("\n") return t def t_error(t): t.type = t.value[0] t.value = t.value[0] t.lexer.skip(1) return t import re import copy import time import os.path # ----------------------------------------------------------------------------- # trigraph() # # Given an input string, this function replaces all trigraph sequences. # The following mapping is used: # # ??= # # ??/ \ # ??' ^ # ??( [ # ??) ] # ??! | # ??< { # ??> } # ??- ~ # ----------------------------------------------------------------------------- _trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''') _trigraph_rep = { '=':'#', '/':'\\', "'":'^', '(':'[', ')':']', '!':'|', '<':'{', '>':'}', '-':'~' } def trigraph(input): return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input) # ------------------------------------------------------------------ # Macro object # # This object holds information about preprocessor macros # # .name - Macro name (string) # .value - Macro value (a list of tokens) # .arglist - List of argument names # .variadic - Boolean indicating whether or not variadic macro # .vararg - Name of the variadic parameter # # When a macro is created, the macro replacement token sequence is # pre-scanned and used to create patch lists that are later used # during macro expansion # ------------------------------------------------------------------ class Macro(object): def __init__(self,name,value,arglist=None,variadic=False): self.name = name self.value = value self.arglist = arglist self.variadic = variadic if variadic: self.vararg = arglist[-1] self.source = None # ------------------------------------------------------------------ # Preprocessor object # # Object representing a preprocessor. Contains macro definitions, # include directories, and other information # ------------------------------------------------------------------ class Preprocessor(object): def __init__(self,lexer=None): if lexer is None: lexer = lex.lexer self.lexer = lexer self.macros = { } self.path = [] self.temp_path = [] # Probe the lexer for selected tokens self.lexprobe() tm = time.localtime() self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm)) self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm)) self.parser = None # ----------------------------------------------------------------------------- # tokenize() # # Utility function. Given a string of text, tokenize into a list of tokens # ----------------------------------------------------------------------------- def tokenize(self,text): tokens = [] self.lexer.input(text) while True: tok = self.lexer.token() if not tok: break tokens.append(tok) return tokens # --------------------------------------------------------------------- # error() # # Report a preprocessor error/warning of some kind # ---------------------------------------------------------------------- def error(self,file,line,msg): print("%s:%d %s" % (file,line,msg)) # ---------------------------------------------------------------------- # lexprobe() # # This method probes the preprocessor lexer object to discover # the token types of symbols that are important to the preprocessor. # If this works right, the preprocessor will simply "work" # with any suitable lexer regardless of how tokens have been named. # ---------------------------------------------------------------------- def lexprobe(self): # Determine the token type for identifiers self.lexer.input("identifier") tok = self.lexer.token() if not tok or tok.value != "identifier": print("Couldn't determine identifier type") else: self.t_ID = tok.type # Determine the token type for integers self.lexer.input("12345") tok = self.lexer.token() if not tok or int(tok.value) != 12345: print("Couldn't determine integer type") else: self.t_INTEGER = tok.type self.t_INTEGER_TYPE = type(tok.value) # Determine the token type for strings enclosed in double quotes self.lexer.input("\"filename\"") tok = self.lexer.token() if not tok or tok.value != "\"filename\"": print("Couldn't determine string type") else: self.t_STRING = tok.type # Determine the token type for whitespace--if any self.lexer.input(" ") tok = self.lexer.token() if not tok or tok.value != " ": self.t_SPACE = None else: self.t_SPACE = tok.type # Determine the token type for newlines self.lexer.input("\n") tok = self.lexer.token() if not tok or tok.value != "\n": self.t_NEWLINE = None print("Couldn't determine token for newlines") else: self.t_NEWLINE = tok.type self.t_WS = (self.t_SPACE, self.t_NEWLINE) # Check for other characters used by the preprocessor chars = [ '<','>','#','##','\\','(',')',',','.'] for c in chars: self.lexer.input(c) tok = self.lexer.token() if not tok or tok.value != c: print("Unable to lex '%s' required for preprocessor" % c) # ---------------------------------------------------------------------- # add_path() # # Adds a search path to the preprocessor. # ---------------------------------------------------------------------- def add_path(self,path): self.path.append(path) # ---------------------------------------------------------------------- # group_lines() # # Given an input string, this function splits it into lines. Trailing whitespace # is removed. Any line ending with \ is grouped with the next line. This # function forms the lowest level of the preprocessor---grouping into text into # a line-by-line format. # ---------------------------------------------------------------------- def group_lines(self,input): lex = self.lexer.clone() lines = [x.rstrip() for x in input.splitlines()] for i in xrange(len(lines)): j = i+1 while lines[i].endswith('\\') and (j < len(lines)): lines[i] = lines[i][:-1]+lines[j] lines[j] = "" j += 1 input = "\n".join(lines) lex.input(input) lex.lineno = 1 current_line = [] while True: tok = lex.token() if not tok: break current_line.append(tok) if tok.type in self.t_WS and '\n' in tok.value: yield current_line current_line = [] if current_line: yield current_line # ---------------------------------------------------------------------- # tokenstrip() # # Remove leading/trailing whitespace tokens from a token list # ---------------------------------------------------------------------- def tokenstrip(self,tokens): i = 0 while i < len(tokens) and tokens[i].type in self.t_WS: i += 1 del tokens[:i] i = len(tokens)-1 while i >= 0 and tokens[i].type in self.t_WS: i -= 1 del tokens[i+1:] return tokens # ---------------------------------------------------------------------- # collect_args() # # Collects comma separated arguments from a list of tokens. The arguments # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions) # where tokencount is the number of tokens consumed, args is a list of arguments, # and positions is a list of integers containing the starting index of each # argument. Each argument is represented by a list of tokens. # # When collecting arguments, leading and trailing whitespace is removed # from each argument. # # This function properly handles nested parenthesis and commas---these do not # define new arguments. # ---------------------------------------------------------------------- def collect_args(self,tokenlist): args = [] positions = [] current_arg = [] nesting = 1 tokenlen = len(tokenlist) # Search for the opening '('. i = 0 while (i < tokenlen) and (tokenlist[i].type in self.t_WS): i += 1 if (i < tokenlen) and (tokenlist[i].value == '('): positions.append(i+1) else: self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments") return 0, [], [] i += 1 while i < tokenlen: t = tokenlist[i] if t.value == '(': current_arg.append(t) nesting += 1 elif t.value == ')': nesting -= 1 if nesting == 0: if current_arg: args.append(self.tokenstrip(current_arg)) positions.append(i) return i+1,args,positions current_arg.append(t) elif t.value == ',' and nesting == 1: args.append(self.tokenstrip(current_arg)) positions.append(i+1) current_arg = [] else: current_arg.append(t) i += 1 # Missing end argument self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments") return 0, [],[] # ---------------------------------------------------------------------- # macro_prescan() # # Examine the macro value (token sequence) and identify patch points # This is used to speed up macro expansion later on---we'll know # right away where to apply patches to the value to form the expansion # ---------------------------------------------------------------------- def macro_prescan(self,macro): macro.patch = [] # Standard macro arguments macro.str_patch = [] # String conversion expansion macro.var_comma_patch = [] # Variadic macro comma patch i = 0 while i < len(macro.value): if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist: argnum = macro.arglist.index(macro.value[i].value) # Conversion of argument to a string if i > 0 and macro.value[i-1].value == '#': macro.value[i] = copy.copy(macro.value[i]) macro.value[i].type = self.t_STRING del macro.value[i-1] macro.str_patch.append((argnum,i-1)) continue # Concatenation elif (i > 0 and macro.value[i-1].value == '##'): macro.patch.append(('c',argnum,i-1)) del macro.value[i-1] continue elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'): macro.patch.append(('c',argnum,i)) i += 1 continue # Standard expansion else: macro.patch.append(('e',argnum,i)) elif macro.value[i].value == '##': if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \ ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \ (macro.value[i+1].value == macro.vararg): macro.var_comma_patch.append(i-1) i += 1 macro.patch.sort(key=lambda x: x[2],reverse=True) # ---------------------------------------------------------------------- # macro_expand_args() # # Given a Macro and list of arguments (each a token list), this method # returns an expanded version of a macro. The return value is a token sequence # representing the replacement macro tokens # ---------------------------------------------------------------------- def macro_expand_args(self,macro,args): # Make a copy of the macro token sequence rep = [copy.copy(_x) for _x in macro.value] # Make string expansion patches. These do not alter the length of the replacement sequence str_expansion = {} for argnum, i in macro.str_patch: if argnum not in str_expansion: str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\") rep[i] = copy.copy(rep[i]) rep[i].value = str_expansion[argnum] # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid comma_patch = False if macro.variadic and not args[-1]: for i in macro.var_comma_patch: rep[i] = None comma_patch = True # Make all other patches. The order of these matters. It is assumed that the patch list # has been sorted in reverse order of patch location since replacements will cause the # size of the replacement sequence to expand from the patch point. expanded = { } for ptype, argnum, i in macro.patch: # Concatenation. Argument is left unexpanded if ptype == 'c': rep[i:i+1] = args[argnum] # Normal expansion. Argument is macro expanded first elif ptype == 'e': if argnum not in expanded: expanded[argnum] = self.expand_macros(args[argnum]) rep[i:i+1] = expanded[argnum] # Get rid of removed comma if necessary if comma_patch: rep = [_i for _i in rep if _i] return rep # ---------------------------------------------------------------------- # expand_macros() # # Given a list of tokens, this function performs macro expansion. # The expanded argument is a dictionary that contains macros already # expanded. This is used to prevent infinite recursion. # ---------------------------------------------------------------------- def expand_macros(self,tokens,expanded=None): if expanded is None: expanded = {} i = 0 while i < len(tokens): t = tokens[i] if t.type == self.t_ID: if t.value in self.macros and t.value not in expanded: # Yes, we found a macro match expanded[t.value] = True m = self.macros[t.value] if not m.arglist: # A simple macro ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded) for e in ex: e.lineno = t.lineno tokens[i:i+1] = ex i += len(ex) else: # A macro with arguments j = i + 1 while j < len(tokens) and tokens[j].type in self.t_WS: j += 1 if tokens[j].value == '(': tokcount,args,positions = self.collect_args(tokens[j:]) if not m.variadic and len(args) != len(m.arglist): self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist))) i = j + tokcount elif m.variadic and len(args) < len(m.arglist)-1: if len(m.arglist) > 2: self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1)) else: self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1)) i = j + tokcount else: if m.variadic: if len(args) == len(m.arglist)-1: args.append([]) else: args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1] del args[len(m.arglist):] # Get macro replacement text rep = self.macro_expand_args(m,args) rep = self.expand_macros(rep,expanded) for r in rep: r.lineno = t.lineno tokens[i:j+tokcount] = rep i += len(rep) del expanded[t.value] continue elif t.value == '__LINE__': t.type = self.t_INTEGER t.value = self.t_INTEGER_TYPE(t.lineno) i += 1 return tokens # ---------------------------------------------------------------------- # evalexpr() # # Evaluate an expression token sequence for the purposes of evaluating # integral expressions. # ---------------------------------------------------------------------- def evalexpr(self,tokens): # tokens = tokenize(line) # Search for defined macros i = 0 while i < len(tokens): if tokens[i].type == self.t_ID and tokens[i].value == 'defined': j = i + 1 needparen = False result = "0L" while j < len(tokens): if tokens[j].type in self.t_WS: j += 1 continue elif tokens[j].type == self.t_ID: if tokens[j].value in self.macros: result = "1L" else: result = "0L" if not needparen: break elif tokens[j].value == '(': needparen = True elif tokens[j].value == ')': break else: self.error(self.source,tokens[i].lineno,"Malformed defined()") j += 1 tokens[i].type = self.t_INTEGER tokens[i].value = self.t_INTEGER_TYPE(result) del tokens[i+1:j+1] i += 1 tokens = self.expand_macros(tokens) for i,t in enumerate(tokens): if t.type == self.t_ID: tokens[i] = copy.copy(t) tokens[i].type = self.t_INTEGER tokens[i].value = self.t_INTEGER_TYPE("0L") elif t.type == self.t_INTEGER: tokens[i] = copy.copy(t) # Strip off any trailing suffixes tokens[i].value = str(tokens[i].value) while tokens[i].value[-1] not in "0123456789abcdefABCDEF": tokens[i].value = tokens[i].value[:-1] expr = "".join([str(x.value) for x in tokens]) expr = expr.replace("&&"," and ") expr = expr.replace("||"," or ") expr = expr.replace("!"," not ") try: result = eval(expr) except StandardError: self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression") result = 0 return result # ---------------------------------------------------------------------- # parsegen() # # Parse an input string/ # ---------------------------------------------------------------------- def parsegen(self,input,source=None): # Replace trigraph sequences t = trigraph(input) lines = self.group_lines(t) if not source: source = "" self.define("__FILE__ \"%s\"" % source) self.source = source chunk = [] enable = True iftrigger = False ifstack = [] for x in lines: for i,tok in enumerate(x): if tok.type not in self.t_WS: break if tok.value == '#': # Preprocessor directive for tok in x: if tok in self.t_WS and '\n' in tok.value: chunk.append(tok) dirtokens = self.tokenstrip(x[i+1:]) if dirtokens: name = dirtokens[0].value args = self.tokenstrip(dirtokens[1:]) else: name = "" args = [] if name == 'define': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] self.define(args) elif name == 'include': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] oldfile = self.macros['__FILE__'] for tok in self.include(args): yield tok self.macros['__FILE__'] = oldfile self.source = source elif name == 'undef': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] self.undef(args) elif name == 'ifdef': ifstack.append((enable,iftrigger)) if enable: if not args[0].value in self.macros: enable = False iftrigger = False else: iftrigger = True elif name == 'ifndef': ifstack.append((enable,iftrigger)) if enable: if args[0].value in self.macros: enable = False iftrigger = False else: iftrigger = True elif name == 'if': ifstack.append((enable,iftrigger)) if enable: result = self.evalexpr(args) if not result: enable = False iftrigger = False else: iftrigger = True elif name == 'elif': if ifstack: if ifstack[-1][0]: # We only pay attention if outer "if" allows this if enable: # If already true, we flip enable False enable = False elif not iftrigger: # If False, but not triggered yet, we'll check expression result = self.evalexpr(args) if result: enable = True iftrigger = True else: self.error(self.source,dirtokens[0].lineno,"Misplaced #elif") elif name == 'else': if ifstack: if ifstack[-1][0]: if enable: enable = False elif not iftrigger: enable = True iftrigger = True else: self.error(self.source,dirtokens[0].lineno,"Misplaced #else") elif name == 'endif': if ifstack: enable,iftrigger = ifstack.pop() else: self.error(self.source,dirtokens[0].lineno,"Misplaced #endif") else: # Unknown preprocessor directive pass else: # Normal text if enable: chunk.extend(x) for tok in self.expand_macros(chunk): yield tok chunk = [] # ---------------------------------------------------------------------- # include() # # Implementation of file-inclusion # ---------------------------------------------------------------------- def include(self,tokens): # Try to extract the filename and then process an include file if not tokens: return if tokens: if tokens[0].value != '<' and tokens[0].type != self.t_STRING: tokens = self.expand_macros(tokens) if tokens[0].value == '<': # Include <...> i = 1 while i < len(tokens): if tokens[i].value == '>': break i += 1 else: print("Malformed #include <...>") return filename = "".join([x.value for x in tokens[1:i]]) path = self.path + [""] + self.temp_path elif tokens[0].type == self.t_STRING: filename = tokens[0].value[1:-1] path = self.temp_path + [""] + self.path else: print("Malformed #include statement") return for p in path: iname = os.path.join(p,filename) try: data = open(iname,"r").read() dname = os.path.dirname(iname) if dname: self.temp_path.insert(0,dname) for tok in self.parsegen(data,filename): yield tok if dname: del self.temp_path[0] break except IOError: pass else: print("Couldn't find '%s'" % filename) # ---------------------------------------------------------------------- # define() # # Define a new macro # ---------------------------------------------------------------------- def define(self,tokens): if isinstance(tokens,(str,unicode)): tokens = self.tokenize(tokens) linetok = tokens try: name = linetok[0] if len(linetok) > 1: mtype = linetok[1] else: mtype = None if not mtype: m = Macro(name.value,[]) self.macros[name.value] = m elif mtype.type in self.t_WS: # A normal macro m = Macro(name.value,self.tokenstrip(linetok[2:])) self.macros[name.value] = m elif mtype.value == '(': # A macro with arguments tokcount, args, positions = self.collect_args(linetok[1:]) variadic = False for a in args: if variadic: print("No more arguments may follow a variadic argument") break astr = "".join([str(_i.value) for _i in a]) if astr == "...": variadic = True a[0].type = self.t_ID a[0].value = '__VA_ARGS__' variadic = True del a[1:] continue elif astr[-3:] == "..." and a[0].type == self.t_ID: variadic = True del a[1:] # If, for some reason, "." is part of the identifier, strip off the name for the purposes # of macro expansion if a[0].value[-3:] == '...': a[0].value = a[0].value[:-3] continue if len(a) > 1 or a[0].type != self.t_ID: print("Invalid macro argument") break else: mvalue = self.tokenstrip(linetok[1+tokcount:]) i = 0 while i < len(mvalue): if i+1 < len(mvalue): if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##': del mvalue[i] continue elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS: del mvalue[i+1] i += 1 m = Macro(name.value,mvalue,[x[0].value for x in args],variadic) self.macro_prescan(m) self.macros[name.value] = m else: print("Bad macro definition") except LookupError: print("Bad macro definition") # ---------------------------------------------------------------------- # undef() # # Undefine a macro # ---------------------------------------------------------------------- def undef(self,tokens): id = tokens[0].value try: del self.macros[id] except LookupError: pass # ---------------------------------------------------------------------- # parse() # # Parse input text. # ---------------------------------------------------------------------- def parse(self,input,source=None,ignore={}): self.ignore = ignore self.parser = self.parsegen(input,source) # ---------------------------------------------------------------------- # token() # # Method to return individual tokens # ---------------------------------------------------------------------- def token(self): try: while True: tok = next(self.parser) if tok.type not in self.ignore: return tok except StopIteration: self.parser = None return None if __name__ == '__main__': import ply.lex as lex lexer = lex.lex() # Run a preprocessor import sys f = open(sys.argv[1]) input = f.read() p = Preprocessor(lexer) p.parse(input,sys.argv[1]) while True: tok = p.token() if not tok: break print(p.source, tok)
bsd-2-clause
gammalib/gammalib
inst/cta/test/dev/test_gauss.py
1
3354
#! /usr/bin/env python # ========================================================================== # This script tests the Gaussian gradient. # # Requires: # - matplotlib (optional) # # Copyright (C) 2012 Juergen Knoedlseder # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ========================================================================== from gammalib import * from math import * # ====================== # # Test Gaussian gradient # # ====================== # def gauss(dir, sigma): """ Test Gaussian gradient. """ # Compute derivative dh = 0.0001 s = GSkyDir() e = GEnergy() t = GTime() f1 = GModelSpatialRadialGauss(s, sigma) f2 = GModelSpatialRadialGauss(s, sigma + dh) v1 = f1.eval(GPhoton(dir, e, t), True) g1 = f1[2].gradient() v2 = f2.eval(GPhoton(dir, e, t), True) g2 = f2[2].gradient() g = (v2 - v1) / dh # Print result #print v1, v2 #print g1, g2, g # Return return g # ============= # # Show Gaussian # # ============= # def show_gaussian(sigma): """ Show Gaussian using matplotlib (if available). """ # Only proceed if matplotlib is available try: # Import matplotlib import matplotlib.pyplot as plt # Create figure plt.figure(1) plt.title("Gaussian (sigma=" + str(sigma) + " deg)") # Setup gaussian skydir = GSkyDir() gauss = GModelSpatialRadialGauss(skydir, sigma) # Create angular axis theta = [i * sigma * 0.05 for i in range(50)] # Extract function f_gauss = [] f_expected = [] sigma_rad = sigma * (pi / 180.0) norm = 1.0 / (2.0 * pi * sigma_rad * sigma_rad) eng = GEnergy() time = GTime() for t in theta: s = GSkyDir() s.radec_deg(0.0, t) f = gauss.eval(GPhoton(s, eng, time)) e = norm * exp(-0.5 * t * t / sigma / sigma) f_gauss.append(f) f_expected.append(e) # Plot data plt.plot(theta, f_gauss, 'r-') plt.plot(theta, f_expected, 'b.') # Set axes plt.xlabel("Separation (deg)") plt.ylabel("Function value") # Show plot plt.show() except ImportError: print("Matplotlib is not (correctly) installed on your system.") # Return return #==========================# # Main routine entry point # #==========================# if __name__ == '__main__': """ Test Gaussian gradient. """ # Dump header print("") print("**************************") print("* Test Gaussian gradient *") print("**************************") # Show Gaussian show_gaussian(3.0)
gpl-3.0
sushramesh/lwc
lib/python2.7/site-packages/django/db/backends/sqlite3/base.py
103
17462
""" SQLite3 backend for django. Works with either the pysqlite2 module or the sqlite3 module in the standard library. """ from __future__ import unicode_literals import datetime import decimal import re import warnings from django.conf import settings from django.db import utils from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.base.validation import BaseDatabaseValidation from django.utils import six, timezone from django.utils.dateparse import parse_date, parse_duration, parse_time from django.utils.encoding import force_text from django.utils.safestring import SafeBytes try: import pytz except ImportError: pytz = None try: try: from pysqlite2 import dbapi2 as Database except ImportError: from sqlite3 import dbapi2 as Database except ImportError as exc: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc) # Some of these import sqlite3, so import them after checking if it's installed. from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip from .utils import parse_datetime_with_timezone_support # isort:skip DatabaseError = Database.DatabaseError IntegrityError = Database.IntegrityError def adapt_datetime_with_timezone_support(value): # Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL. if settings.USE_TZ: if timezone.is_naive(value): warnings.warn("SQLite received a naive datetime (%s)" " while time zone support is active." % value, RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) value = value.astimezone(timezone.utc).replace(tzinfo=None) return value.isoformat(str(" ")) def decoder(conv_func): """ The Python sqlite3 interface returns always byte strings. This function converts the received value to a regular string before passing it to the receiver function. """ return lambda s: conv_func(s.decode('utf-8')) Database.register_converter(str("bool"), decoder(lambda s: s == '1')) Database.register_converter(str("time"), decoder(parse_time)) Database.register_converter(str("date"), decoder(parse_date)) Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support)) Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support)) Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support)) Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal)) Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support) Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal) if six.PY2: Database.register_adapter(str, lambda s: s.decode('utf-8')) Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8')) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'sqlite' # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { 'AutoField': 'integer', 'BinaryField': 'BLOB', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'CommaSeparatedIntegerField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime', 'DecimalField': 'decimal', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'real', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveIntegerField': 'integer unsigned', 'PositiveSmallIntegerField': 'smallint unsigned', 'SlugField': 'varchar(%(max_length)s)', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'char(32)', } data_types_suffix = { 'AutoField': 'AUTOINCREMENT', } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See http://www.sqlite.org/lang_expr.html for an explanation. operators = { 'exact': '= %s', 'iexact': "LIKE %s ESCAPE '\\'", 'contains': "LIKE %s ESCAPE '\\'", 'icontains': "LIKE %s ESCAPE '\\'", 'regex': 'REGEXP %s', 'iregex': "REGEXP '(?i)' || %s", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE %s ESCAPE '\\'", 'endswith': "LIKE %s ESCAPE '\\'", 'istartswith': "LIKE %s ESCAPE '\\'", 'iendswith': "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'", 'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", 'startswith': r"LIKE {} || '%%' ESCAPE '\'", 'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'", 'endswith': r"LIKE '%%' || {} ESCAPE '\'", 'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.features = DatabaseFeatures(self) self.ops = DatabaseOperations(self) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict['NAME']: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") kwargs = { 'database': settings_dict['NAME'], 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, } kwargs.update(settings_dict['OPTIONS']) # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in pysqlite and it cannot be changed once a connection is # opened. if 'check_same_thread' in kwargs and kwargs['check_same_thread']: warnings.warn( 'The `check_same_thread` option was provided and set to ' 'True. It will be overridden with False. Use the ' '`DatabaseWrapper.allow_thread_sharing` property instead ' 'for controlling thread shareability.', RuntimeWarning ) kwargs.update({'check_same_thread': False}) if self.features.can_share_in_memory_db: kwargs.update({'uri': True}) return kwargs def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) conn.create_function("django_date_extract", 2, _sqlite_date_extract) conn.create_function("django_date_trunc", 2, _sqlite_date_trunc) conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract) conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc) conn.create_function("regexp", 2, _sqlite_regexp) conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta) conn.create_function("django_power", 2, _sqlite_power) return conn def init_connection_state(self): pass def create_cursor(self): return self.connection.cursor(factory=SQLiteCursorWrapper) def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(self.settings_dict['NAME']): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # Two conditions are required here: # - A sufficiently recent version of SQLite to support savepoints, # - Being in a transaction, which can only happen inside 'atomic'. # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.features.uses_savepoints and self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = '' # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def check_constraints(self, table_names=None): """ Checks each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides detailed information about the invalid reference in the error message. Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE") """ cursor = self.cursor() if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute(""" SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL""" % (primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name)) for bad_row in cursor.fetchall(): raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid " "foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s." % (table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name)) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self, name): return name == ":memory:" or "mode=memory" in force_text(name) FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s') class SQLiteCursorWrapper(Database.Cursor): """ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return Database.Cursor.execute(self, query) query = self.convert_query(query) return Database.Cursor.execute(self, query, params) def executemany(self, query, param_list): query = self.convert_query(query) return Database.Cursor.executemany(self, query, param_list) def convert_query(self, query): return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%') def _sqlite_date_extract(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_timestamp(dt) except (ValueError, TypeError): return None if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 else: return getattr(dt, lookup_type) def _sqlite_date_trunc(lookup_type, dt): try: dt = backend_utils.typecast_timestamp(dt) except (ValueError, TypeError): return None if lookup_type == 'year': return "%i-01-01" % dt.year elif lookup_type == 'month': return "%i-%02i-01" % (dt.year, dt.month) elif lookup_type == 'day': return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) def _sqlite_datetime_extract(lookup_type, dt, tzname): if dt is None: return None try: dt = backend_utils.typecast_timestamp(dt) except (ValueError, TypeError): return None if tzname is not None: dt = timezone.localtime(dt, pytz.timezone(tzname)) if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 else: return getattr(dt, lookup_type) def _sqlite_datetime_trunc(lookup_type, dt, tzname): try: dt = backend_utils.typecast_timestamp(dt) except (ValueError, TypeError): return None if tzname is not None: dt = timezone.localtime(dt, pytz.timezone(tzname)) if lookup_type == 'year': return "%i-01-01 00:00:00" % dt.year elif lookup_type == 'month': return "%i-%02i-01 00:00:00" % (dt.year, dt.month) elif lookup_type == 'day': return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'hour': return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour) elif lookup_type == 'minute': return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) elif lookup_type == 'second': return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) def _sqlite_format_dtdelta(conn, lhs, rhs): """ LHS and RHS can be either: - An integer number of microseconds - A string representing a timedelta object - A string representing a datetime """ try: if isinstance(lhs, six.integer_types): lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000)) real_lhs = parse_duration(lhs) if real_lhs is None: real_lhs = backend_utils.typecast_timestamp(lhs) if isinstance(rhs, six.integer_types): rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000)) real_rhs = parse_duration(rhs) if real_rhs is None: real_rhs = backend_utils.typecast_timestamp(rhs) if conn.strip() == '+': out = real_lhs + real_rhs else: out = real_lhs - real_rhs except (ValueError, TypeError): return None # typecast_timestamp returns a date or a datetime without timezone. # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]" return str(out) def _sqlite_regexp(re_pattern, re_string): return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False def _sqlite_power(x, y): return x ** y
mit
ninotoshi/tensorflow
tensorflow/python/framework/errors_test.py
14
3484
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.framework.errors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings import tensorflow as tf from tensorflow.core.lib.core import error_codes_pb2 class ErrorsTest(tf.test.TestCase): def testUniqueClassForEachErrorCode(self): for error_code, exc_type in [ (tf.errors.CANCELLED, tf.errors.CancelledError), (tf.errors.UNKNOWN, tf.errors.UnknownError), (tf.errors.INVALID_ARGUMENT, tf.errors.InvalidArgumentError), (tf.errors.DEADLINE_EXCEEDED, tf.errors.DeadlineExceededError), (tf.errors.NOT_FOUND, tf.errors.NotFoundError), (tf.errors.ALREADY_EXISTS, tf.errors.AlreadyExistsError), (tf.errors.PERMISSION_DENIED, tf.errors.PermissionDeniedError), (tf.errors.UNAUTHENTICATED, tf.errors.UnauthenticatedError), (tf.errors.RESOURCE_EXHAUSTED, tf.errors.ResourceExhaustedError), (tf.errors.FAILED_PRECONDITION, tf.errors.FailedPreconditionError), (tf.errors.ABORTED, tf.errors.AbortedError), (tf.errors.OUT_OF_RANGE, tf.errors.OutOfRangeError), (tf.errors.UNIMPLEMENTED, tf.errors.UnimplementedError), (tf.errors.INTERNAL, tf.errors.InternalError), (tf.errors.UNAVAILABLE, tf.errors.UnavailableError), (tf.errors.DATA_LOSS, tf.errors.DataLossError), ]: # pylint: disable=protected-access self.assertTrue(isinstance( tf.errors._make_specific_exception(None, None, None, error_code), exc_type)) # pylint: enable=protected-access def testKnownErrorClassForEachErrorCodeInProto(self): for error_code in error_codes_pb2.Code.values(): # pylint: disable=line-too-long if error_code in (error_codes_pb2.OK, error_codes_pb2.DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_): continue # pylint: enable=line-too-long with warnings.catch_warnings(record=True) as w: # pylint: disable=protected-access exc = tf.errors._make_specific_exception(None, None, None, error_code) # pylint: enable=protected-access self.assertEqual(0, len(w)) # No warning is raised. self.assertTrue(isinstance(exc, tf.OpError)) self.assertTrue(tf.OpError in exc.__class__.__bases__) def testUnknownErrorCodeCausesWarning(self): with warnings.catch_warnings(record=True) as w: # pylint: disable=protected-access exc = tf.errors._make_specific_exception(None, None, None, 37) # pylint: enable=protected-access self.assertEqual(1, len(w)) self.assertTrue("Unknown error code: 37" in str(w[0].message)) self.assertTrue(isinstance(exc, tf.OpError)) if __name__ == "__main__": tf.test.main()
apache-2.0
Southpaw-TACTIC/TACTIC
src/tactic/ui/table/explorer_wdg.py
2
3695
########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ['ExplorerTableElementWdg', 'ExplorerElementWdg'] from pyasm.common import TacticException, Common from pyasm.biz import Project from pyasm.web import Widget from pyasm.widget import IconWdg from tactic.ui.common import BaseTableElementWdg from tactic.ui.widget import IconButtonWdg class ExplorerElementWdg(BaseTableElementWdg): ARGS_KEYS = { 'mode': { 'description': 'Determines which directory to go to when the explorer button is pressed.', 'type': 'SelectWdg', 'values': 'sandbox|repository', 'category': 'options' } } def get_decrement(self): decrement = self.get_option('decrement') if not decrement: decrement = 0 else: decrement = int(decrement) return decrement def get_base_dir( self, sobject): decrement = self.get_decrement() base_dir = Project.get_sandbox_base_dir(sobject, decrement=decrement) return base_dir def get_lib_dir(self, sobject): snapshot = None base_dir = Project.get_project_lib_dir(sobject, snapshot) return base_dir def get_client_repo_dir(self, sobject): snapshot = None base_dir = Project.get_project_client_lib_dir(sobject, snapshot) #TODO: u could decrement the client repo dir here, if really needed return base_dir def get_title(self): widget = Widget() title = super(ExplorerElementWdg, self).get_title() widget.add( title ) return widget def get_display(self): sobject = self.get_current_sobject() mode = self.get_option('mode') if not mode: mode = 'sandbox' widget = Widget() sobject_dir = '' sobject_lib_dir = '' # find the path to open explorer if sobject.is_insert(): button = IconWdg("No Path Found", IconWdg.CROSS, long=False) else: try: if mode == 'sandbox': sobject_dir = self.get_base_dir(sobject) elif mode in ['client_repo', 'repository']: sobject_dir = self.get_client_repo_dir(sobject) sobject_lib_dir = self.get_lib_dir(sobject) sobject_dir = sobject_dir.strip() sobject_dir = Common.process_unicode_string(sobject_dir) except TacticException as e: print("WARNING: ", str(e)) button = IconWdg("No Path Found", IconWdg.CROSS, long=False) else: button = IconButtonWdg(title="Explore: %s" % sobject_dir, icon=IconWdg.LOAD) if sobject_dir == sobject_lib_dir: button.add_behavior({'type':'click_up', 'cbjs_action':"spt.alert('You are not allowed to browse directories on a web server.');"}) else: button.add_behavior({'type':'click_up', 'cbjs_action':'''var applet = spt.Applet.get(); applet.makedirs('%s'); applet.open_explorer('%s');''' % (sobject_dir, sobject_dir)} ) widget.add(button) return widget def is_editable(cls): '''to avoid all those CellEditWdg''' return False is_editable = classmethod(is_editable) # DEPRECATED use ExplorerElementWdg class ExplorerTableElementWdg(ExplorerElementWdg): pass
epl-1.0
qedi-r/home-assistant
homeassistant/components/arduino/sensor.py
4
1767
"""Support for getting information from Arduino pins.""" import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.components import arduino from homeassistant.const import CONF_NAME from homeassistant.helpers.entity import Entity import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_PINS = "pins" CONF_TYPE = "analog" PIN_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string}) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_PINS): vol.Schema({cv.positive_int: PIN_SCHEMA})} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Arduino platform.""" if arduino.BOARD is None: _LOGGER.error("A connection has not been made to the Arduino board") return False pins = config.get(CONF_PINS) sensors = [] for pinnum, pin in pins.items(): sensors.append(ArduinoSensor(pin.get(CONF_NAME), pinnum, CONF_TYPE)) add_entities(sensors) class ArduinoSensor(Entity): """Representation of an Arduino Sensor.""" def __init__(self, name, pin, pin_type): """Initialize the sensor.""" self._pin = pin self._name = name self.pin_type = pin_type self.direction = "in" self._value = None arduino.BOARD.set_mode(self._pin, self.direction, self.pin_type) @property def state(self): """Return the state of the sensor.""" return self._value @property def name(self): """Get the name of the sensor.""" return self._name def update(self): """Get the latest value from the pin.""" self._value = arduino.BOARD.get_analog_inputs()[self._pin][1]
apache-2.0
Softmotions/edx-platform
lms/djangoapps/courseware/grades.py
10
33625
# Compute grades using real division, with no integer truncation from __future__ import division from collections import defaultdict from functools import partial import json import random import logging from contextlib import contextmanager from django.conf import settings from django.db import transaction from django.test.client import RequestFactory from django.core.cache import cache import dogstats_wrapper as dog_stats_api from courseware import courses from courseware.model_data import FieldDataCache, ScoresClient from student.models import anonymous_id_for_user from util.module_utils import yield_dynamic_descriptor_descendants from xmodule import graders from xmodule.graders import Score from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError from .models import StudentModule from .module_render import get_module_for_descriptor from submissions import api as sub_api # installed from the edx-submissions repository from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from openedx.core.djangoapps.signals.signals import GRADES_UPDATED log = logging.getLogger("edx.courseware") class MaxScoresCache(object): """ A cache for unweighted max scores for problems. The key assumption here is that any problem that has not yet recorded a score for a user is worth the same number of points. An XBlock is free to score one student at 2/5 and another at 1/3. But a problem that has never issued a score -- say a problem two students have only seen mentioned in their progress pages and never interacted with -- should be worth the same number of points for everyone. """ def __init__(self, cache_prefix): self.cache_prefix = cache_prefix self._max_scores_cache = {} self._max_scores_updates = {} @classmethod def create_for_course(cls, course): """ Given a CourseDescriptor, return a correctly configured `MaxScoresCache` This method will base the `MaxScoresCache` cache prefix value on the last time something was published to the live version of the course. This is so that we don't have to worry about stale cached values for max scores -- any time a content change occurs, we change our cache keys. """ if course.subtree_edited_on is None: # check for subtree_edited_on because old XML courses doesn't have this attribute cache_key = u"{}".format(course.id) else: cache_key = u"{}.{}".format(course.id, course.subtree_edited_on.isoformat()) return cls(cache_key) def fetch_from_remote(self, locations): """ Populate the local cache with values from django's cache """ remote_dict = cache.get_many([self._remote_cache_key(loc) for loc in locations]) self._max_scores_cache = { self._local_cache_key(remote_key): value for remote_key, value in remote_dict.items() if value is not None } def push_to_remote(self): """ Update the remote cache """ if self._max_scores_updates: cache.set_many( { self._remote_cache_key(key): value for key, value in self._max_scores_updates.items() }, 60 * 60 * 24 # 1 day ) def _remote_cache_key(self, location): """Convert a location to a remote cache key (add our prefixing).""" return u"grades.MaxScores.{}___{}".format(self.cache_prefix, unicode(location)) def _local_cache_key(self, remote_key): """Convert a remote cache key to a local cache key (i.e. location str).""" return remote_key.split(u"___", 1)[1] def num_cached_from_remote(self): """How many items did we pull down from the remote cache?""" return len(self._max_scores_cache) def num_cached_updates(self): """How many local updates are we waiting to push to the remote cache?""" return len(self._max_scores_updates) def set(self, location, max_score): """ Adds a max score to the max_score_cache """ loc_str = unicode(location) if self._max_scores_cache.get(loc_str) != max_score: self._max_scores_updates[loc_str] = max_score def get(self, location): """ Retrieve a max score from the cache """ loc_str = unicode(location) max_score = self._max_scores_updates.get(loc_str) if max_score is None: max_score = self._max_scores_cache.get(loc_str) return max_score class ProgressSummary(object): """ Wrapper class for the computation of a user's scores across a course. Attributes chapters: a summary of all sections with problems in the course. It is organized as an array of chapters, each containing an array of sections, each containing an array of scores. This contains information for graded and ungraded problems, and is good for displaying a course summary with due dates, etc. weighted_scores: a dictionary mapping module locations to weighted Score objects. locations_to_children: a dictionary mapping module locations to their direct descendants. """ def __init__(self, chapters, weighted_scores, locations_to_children): self.chapters = chapters self.weighted_scores = weighted_scores self.locations_to_children = locations_to_children def score_for_module(self, location): """ Calculate the aggregate weighted score for any location in the course. This method returns a tuple containing (earned_score, possible_score). If the location is of 'problem' type, this method will return the possible and earned scores for that problem. If the location refers to a composite module (a vertical or section ) the scores will be the sums of all scored problems that are children of the chosen location. """ if location in self.weighted_scores: score = self.weighted_scores[location] return score.earned, score.possible children = self.locations_to_children[location] earned = 0.0 possible = 0.0 for child in children: child_earned, child_possible = self.score_for_module(child) earned += child_earned possible += child_possible return earned, possible def descriptor_affects_grading(block_types_affecting_grading, descriptor): """ Returns True if the descriptor could have any impact on grading, else False. Something might be a scored item if it is capable of storing a score (has_score=True). We also have to include anything that can have children, since those children might have scores. We can avoid things like Videos, which have state but cannot ever impact someone's grade. """ return descriptor.location.block_type in block_types_affecting_grading def field_data_cache_for_grading(course, user): """ Given a CourseDescriptor and User, create the FieldDataCache for grading. This will generate a FieldDataCache that only loads state for those things that might possibly affect the grading process, and will ignore things like Videos. """ descriptor_filter = partial(descriptor_affects_grading, course.block_types_affecting_grading) return FieldDataCache.cache_for_descriptor_descendents( course.id, user, course, depth=None, descriptor_filter=descriptor_filter ) def answer_distributions(course_key): """ Given a course_key, return answer distributions in the form of a dictionary mapping: (problem url_name, problem display_name, problem_id) -> {dict: answer -> count} Answer distributions are found by iterating through all StudentModule entries for a given course with type="problem" and a grade that is not null. This means that we only count LoncapaProblems that people have submitted. Other types of items like ORA or sequences will not be collected. Empty Loncapa problem state that gets created from runnig the progress page is also not counted. This method accesses the StudentModule table directly instead of using the CapaModule abstraction. The main reason for this is so that we can generate the report without any side-effects -- we don't have to worry about answer distribution potentially causing re-evaluation of the student answer. This also allows us to use the read-replica database, which reduces risk of bad locking behavior. And quite frankly, it makes this a lot less confusing. Also, we're pulling all available records from the database for this course rather than crawling through a student's course-tree -- the latter could potentially cause us trouble with A/B testing. The distribution report may not be aware of problems that are not visible to the user being used to generate the report. This method will try to use a read-replica database if one is available. """ # dict: { module.module_state_key : (url_name, display_name) } state_keys_to_problem_info = {} # For caching, used by url_and_display_name def url_and_display_name(usage_key): """ For a given usage_key, return the problem's url and display_name. Handle modulestore access and caching. This method ignores permissions. Raises: InvalidKeyError: if the usage_key does not parse ItemNotFoundError: if there is no content that corresponds to this usage_key. """ problem_store = modulestore() if usage_key not in state_keys_to_problem_info: problem = problem_store.get_item(usage_key) problem_info = (problem.url_name, problem.display_name_with_default) state_keys_to_problem_info[usage_key] = problem_info return state_keys_to_problem_info[usage_key] # Iterate through all problems submitted for this course in no particular # order, and build up our answer_counts dict that we will eventually return answer_counts = defaultdict(lambda: defaultdict(int)) for module in StudentModule.all_submitted_problems_read_only(course_key): try: state_dict = json.loads(module.state) if module.state else {} raw_answers = state_dict.get("student_answers", {}) except ValueError: log.error( u"Answer Distribution: Could not parse module state for StudentModule id=%s, course=%s", module.id, course_key, ) continue try: url, display_name = url_and_display_name(module.module_state_key.map_into_course(course_key)) # Each problem part has an ID that is derived from the # module.module_state_key (with some suffix appended) for problem_part_id, raw_answer in raw_answers.items(): # Convert whatever raw answers we have (numbers, unicode, None, etc.) # to be unicode values. Note that if we get a string, it's always # unicode and not str -- state comes from the json decoder, and that # always returns unicode for strings. answer = unicode(raw_answer) answer_counts[(url, display_name, problem_part_id)][answer] += 1 except (ItemNotFoundError, InvalidKeyError): msg = ( "Answer Distribution: Item {} referenced in StudentModule {} " + "for user {} in course {} not found; " + "This can happen if a student answered a question that " + "was later deleted from the course. This answer will be " + "omitted from the answer distribution CSV." ).format( module.module_state_key, module.id, module.student_id, course_key ) log.warning(msg) continue return answer_counts @transaction.commit_manually def grade(student, request, course, keep_raw_scores=False, field_data_cache=None, scores_client=None): """ Wraps "_grade" with the manual_transaction context manager just in case there are unanticipated errors. Send a signal to update the minimum grade requirement status. """ with manual_transaction(): grade_summary = _grade(student, request, course, keep_raw_scores, field_data_cache, scores_client) responses = GRADES_UPDATED.send_robust( sender=None, username=student.username, grade_summary=grade_summary, course_key=course.id, deadline=course.end ) for receiver, response in responses: log.info('Signal fired when student grade is calculated. Receiver: %s. Response: %s', receiver, response) return grade_summary def _grade(student, request, course, keep_raw_scores, field_data_cache, scores_client): """ Unwrapped version of "grade" This grades a student as quickly as possible. It returns the output from the course grader, augmented with the final letter grade. The keys in the output are: course: a CourseDescriptor - grade : A final letter grade. - percent : The final percent for the class (rounded up). - section_breakdown : A breakdown of each section that makes up the grade. (For display) - grade_breakdown : A breakdown of the major components that make up the final grade. (For display) - keep_raw_scores : if True, then value for key 'raw_scores' contains scores for every graded module More information on the format is in the docstring for CourseGrader. """ if field_data_cache is None: with manual_transaction(): field_data_cache = field_data_cache_for_grading(course, student) if scores_client is None: scores_client = ScoresClient.from_field_data_cache(field_data_cache) # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs # scores that were registered with the submissions API, which for the moment # means only openassessment (edx-ora2) submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)) max_scores_cache = MaxScoresCache.create_for_course(course) # For the moment, we have to get scorable_locations from field_data_cache # and not from scores_client, because scores_client is ignorant of things # in the submissions API. As a further refactoring step, submissions should # be hidden behind the ScoresClient. max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations) grading_context = course.grading_context raw_scores = [] totaled_scores = {} # This next complicated loop is just to collect the totaled_scores, which is # passed to the grader for section_format, sections in grading_context['graded_sections'].iteritems(): format_scores = [] for section in sections: section_descriptor = section['section_descriptor'] section_name = section_descriptor.display_name_with_default # some problems have state that is updated independently of interaction # with the LMS, so they need to always be scored. (E.g. foldit., # combinedopenended) should_grade_section = any( descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors'] ) # If there are no problems that always have to be regraded, check to # see if any of our locations are in the scores from the submissions # API. If scores exist, we have to calculate grades for this section. if not should_grade_section: should_grade_section = any( descriptor.location.to_deprecated_string() in submissions_scores for descriptor in section['xmoduledescriptors'] ) if not should_grade_section: should_grade_section = any( descriptor.location in scores_client for descriptor in section['xmoduledescriptors'] ) # If we haven't seen a single problem in the section, we don't have # to grade it at all! We can assume 0% if should_grade_section: scores = [] def create_module(descriptor): '''creates an XModule instance given a descriptor''' # TODO: We need the request to pass into here. If we could forego that, our arguments # would be simpler return get_module_for_descriptor( student, request, descriptor, field_data_cache, course.id, course=course ) descendants = yield_dynamic_descriptor_descendants(section_descriptor, student.id, create_module) for module_descriptor in descendants: (correct, total) = get_score( student, module_descriptor, create_module, scores_client, submissions_scores, max_scores_cache, ) if correct is None and total is None: continue if settings.GENERATE_PROFILE_SCORES: # for debugging! if total > 1: correct = random.randrange(max(total - 2, 1), total + 1) else: correct = total graded = module_descriptor.graded if not total > 0: # We simply cannot grade a problem that is 12/0, because we might need it as a percentage graded = False scores.append( Score( correct, total, graded, module_descriptor.display_name_with_default, module_descriptor.location ) ) __, graded_total = graders.aggregate_scores(scores, section_name) if keep_raw_scores: raw_scores += scores else: graded_total = Score(0.0, 1.0, True, section_name, None) #Add the graded total to totaled_scores if graded_total.possible > 0: format_scores.append(graded_total) else: log.info( "Unable to grade a section with a total possible score of zero. " + str(section_descriptor.location) ) totaled_scores[section_format] = format_scores # Grading policy might be overriden by a CCX, need to reset it course.set_grading_policy(course.grading_policy) grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES) # We round the grade here, to make sure that the grade is an whole percentage and # doesn't get displayed differently than it gets grades grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100 letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent']) grade_summary['grade'] = letter_grade grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging if keep_raw_scores: # way to get all RAW scores out to instructor # so grader can be double-checked grade_summary['raw_scores'] = raw_scores max_scores_cache.push_to_remote() return grade_summary def grade_for_percentage(grade_cutoffs, percentage): """ Returns a letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None. Arguments - grade_cutoffs is a dictionary mapping a grade to the lowest possible percentage to earn that grade. - percentage is the final percent across all problems in a course """ letter_grade = None # Possible grades, sorted in descending order of score descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True) for possible_grade in descending_grades: if percentage >= grade_cutoffs[possible_grade]: letter_grade = possible_grade break return letter_grade @transaction.commit_manually def progress_summary(student, request, course, field_data_cache=None, scores_client=None): """ Wraps "_progress_summary" with the manual_transaction context manager just in case there are unanticipated errors. """ with manual_transaction(): progress = _progress_summary(student, request, course, field_data_cache, scores_client) if progress: return progress.chapters else: return None @transaction.commit_manually def get_weighted_scores(student, course, field_data_cache=None, scores_client=None): """ Uses the _progress_summary method to return a ProgressSummmary object containing details of a students weighted scores for the course. """ with manual_transaction(): request = _get_mock_request(student) return _progress_summary(student, request, course, field_data_cache, scores_client) # TODO: This method is not very good. It was written in the old course style and # then converted over and performance is not good. Once the progress page is redesigned # to not have the progress summary this method should be deleted (so it won't be copied). def _progress_summary(student, request, course, field_data_cache=None, scores_client=None): """ Unwrapped version of "progress_summary". This pulls a summary of all problems in the course. Returns - courseware_summary is a summary of all sections with problems in the course. It is organized as an array of chapters, each containing an array of sections, each containing an array of scores. This contains information for graded and ungraded problems, and is good for displaying a course summary with due dates, etc. Arguments: student: A User object for the student to grade course: A Descriptor containing the course to grade If the student does not have access to load the course module, this function will return None. """ with manual_transaction(): if field_data_cache is None: field_data_cache = field_data_cache_for_grading(course, student) if scores_client is None: scores_client = ScoresClient.from_field_data_cache(field_data_cache) course_module = get_module_for_descriptor( student, request, course, field_data_cache, course.id, course=course ) if not course_module: return None course_module = getattr(course_module, '_x_module', course_module) submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)) max_scores_cache = MaxScoresCache.create_for_course(course) # For the moment, we have to get scorable_locations from field_data_cache # and not from scores_client, because scores_client is ignorant of things # in the submissions API. As a further refactoring step, submissions should # be hidden behind the ScoresClient. max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations) chapters = [] locations_to_children = defaultdict(list) locations_to_weighted_scores = {} # Don't include chapters that aren't displayable (e.g. due to error) for chapter_module in course_module.get_display_items(): # Skip if the chapter is hidden if chapter_module.hide_from_toc: continue sections = [] for section_module in chapter_module.get_display_items(): # Skip if the section is hidden with manual_transaction(): if section_module.hide_from_toc: continue graded = section_module.graded scores = [] module_creator = section_module.xmodule_runtime.get_module for module_descriptor in yield_dynamic_descriptor_descendants( section_module, student.id, module_creator ): locations_to_children[module_descriptor.parent].append(module_descriptor.location) (correct, total) = get_score( student, module_descriptor, module_creator, scores_client, submissions_scores, max_scores_cache, ) if correct is None and total is None: continue weighted_location_score = Score( correct, total, graded, module_descriptor.display_name_with_default, module_descriptor.location ) scores.append(weighted_location_score) locations_to_weighted_scores[module_descriptor.location] = weighted_location_score scores.reverse() section_total, _ = graders.aggregate_scores( scores, section_module.display_name_with_default) module_format = section_module.format if section_module.format is not None else '' sections.append({ 'display_name': section_module.display_name_with_default, 'url_name': section_module.url_name, 'scores': scores, 'section_total': section_total, 'format': module_format, 'due': section_module.due, 'graded': graded, }) chapters.append({ 'course': course.display_name_with_default, 'display_name': chapter_module.display_name_with_default, 'url_name': chapter_module.url_name, 'sections': sections }) max_scores_cache.push_to_remote() return ProgressSummary(chapters, locations_to_weighted_scores, locations_to_children) def weighted_score(raw_correct, raw_total, weight): """Return a tuple that represents the weighted (correct, total) score.""" # If there is no weighting, or weighting can't be applied, return input. if weight is None or raw_total == 0: return (raw_correct, raw_total) return (float(raw_correct) * weight / raw_total, float(weight)) def get_score(user, problem_descriptor, module_creator, scores_client, submissions_scores_cache, max_scores_cache): """ Return the score for a user on a problem, as a tuple (correct, total). e.g. (5,7) if you got 5 out of 7 points. If this problem doesn't have a score, or we couldn't load it, returns (None, None). user: a Student object problem_descriptor: an XModuleDescriptor scores_client: an initialized ScoresClient module_creator: a function that takes a descriptor, and returns the corresponding XModule for this user. Can return None if user doesn't have access, or if something else went wrong. submissions_scores_cache: A dict of location names to (earned, possible) point tuples. If an entry is found in this cache, it takes precedence. max_scores_cache: a MaxScoresCache """ submissions_scores_cache = submissions_scores_cache or {} if not user.is_authenticated(): return (None, None) location_url = problem_descriptor.location.to_deprecated_string() if location_url in submissions_scores_cache: return submissions_scores_cache[location_url] # some problems have state that is updated independently of interaction # with the LMS, so they need to always be scored. (E.g. foldit.) if problem_descriptor.always_recalculate_grades: problem = module_creator(problem_descriptor) if problem is None: return (None, None) score = problem.get_score() if score is not None: return (score['score'], score['total']) else: return (None, None) if not problem_descriptor.has_score: # These are not problems, and do not have a score return (None, None) # Check the score that comes from the ScoresClient (out of CSM). # If an entry exists and has a total associated with it, we trust that # value. This is important for cases where a student might have seen an # older version of the problem -- they're still graded on what was possible # when they tried the problem, not what it's worth now. score = scores_client.get(problem_descriptor.location) cached_max_score = max_scores_cache.get(problem_descriptor.location) if score and score.total is not None: # We have a valid score, just use it. correct = score.correct if score.correct is not None else 0.0 total = score.total elif cached_max_score is not None and settings.FEATURES.get("ENABLE_MAX_SCORE_CACHE"): # We don't have a valid score entry but we know from our cache what the # max possible score is, so they've earned 0.0 / cached_max_score correct = 0.0 total = cached_max_score else: # This means we don't have a valid score entry and we don't have a # cached_max_score on hand. We know they've earned 0.0 points on this, # but we need to instantiate the module (i.e. load student state) in # order to find out how much it was worth. problem = module_creator(problem_descriptor) if problem is None: return (None, None) correct = 0.0 total = problem.max_score() # Problem may be an error module (if something in the problem builder failed) # In which case total might be None if total is None: return (None, None) else: # add location to the max score cache max_scores_cache.set(problem_descriptor.location, total) return weighted_score(correct, total, problem_descriptor.weight) @contextmanager def manual_transaction(): """A context manager for managing manual transactions""" try: yield except Exception: transaction.rollback() log.exception('Due to an error, this transaction has been rolled back') raise else: transaction.commit() def iterate_grades_for(course_or_id, students, keep_raw_scores=False): """Given a course_id and an iterable of students (User), yield a tuple of: (student, gradeset, err_msg) for every student enrolled in the course. If an error occurred, gradeset will be an empty dict and err_msg will be an exception message. If there was no error, err_msg is an empty string. The gradeset is a dictionary with the following fields: - grade : A final letter grade. - percent : The final percent for the class (rounded up). - section_breakdown : A breakdown of each section that makes up the grade. (For display) - grade_breakdown : A breakdown of the major components that make up the final grade. (For display) - raw_scores: contains scores for every graded module """ if isinstance(course_or_id, (basestring, CourseKey)): course = courses.get_course_by_id(course_or_id) else: course = course_or_id for student in students: with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]): try: request = _get_mock_request(student) # Grading calls problem rendering, which calls masquerading, # which checks session vars -- thus the empty session dict below. # It's not pretty, but untangling that is currently beyond the # scope of this feature. request.session = {} gradeset = grade(student, request, course, keep_raw_scores) yield student, gradeset, "" except Exception as exc: # pylint: disable=broad-except # Keep marching on even if this student couldn't be graded for # some reason, but log it for future reference. log.exception( 'Cannot grade student %s (%s) in course %s because of exception: %s', student.username, student.id, course.id, exc.message ) yield student, {}, exc.message def _get_mock_request(student): """ Make a fake request because grading code expects to be able to look at the request. We have to attach the correct user to the request before grading that student. """ request = RequestFactory().get('/') request.user = student return request
agpl-3.0
hammerlab/pyensembl
setup.py
1
2738
# Copyright (c) 2014-2018. Mount Sinai School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import re from setuptools import setup package_name = "pyensembl" current_directory = os.path.dirname(__file__) readme_filename = 'README.md' readme_path = os.path.join(current_directory, readme_filename) github_url = "https://github.com/openvax/%s" % package_name try: with open(readme_path, 'r') as f: readme_markdown = f.read() except IOError as e: print(e) print("Failed to open %s" % readme_path) readme_markdown = "" with open('%s/__init__.py' % package_name, 'r') as f: version = re.search( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1) if not version: raise RuntimeError('Cannot find version information') if __name__ == '__main__': setup( name=package_name, version=version, description="Python interface to ensembl reference genome metadata", author="Alex Rubinsteyn", author_email="alex.rubinsteyn@mssm.edu", url=github_url, license="http://www.apache.org/licenses/LICENSE-2.0.html", entry_points={ 'console_scripts': [ 'pyensembl = %s.shell:run' % package_name ], }, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Operating System :: OS Independent', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', ], install_requires=[ "typechecks>=0.0.2", "numpy>=1.7", "pandas>=0.15", "datacache>=1.1.4", "memoized-property>=1.0.2", "six>=1.9.0", "gtfparse>=1.1.0", "serializable", "tinytimer", ], long_description=readme_markdown, long_description_content_type='text/markdown', packages=[package_name], package_data={package_name: ['logging.conf']}, )
apache-2.0
xsynergy510x/android_external_chromium_org
tools/perf/benchmarks/dromaeo.py
27
6772
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import math import os from metrics import power from telemetry import benchmark from telemetry.page import page_set from telemetry.page import page_test from telemetry.value import scalar class _DromaeoMeasurement(page_test.PageTest): def __init__(self): super(_DromaeoMeasurement, self).__init__() self._power_metric = None def CustomizeBrowserOptions(self, options): power.PowerMetric.CustomizeBrowserOptions(options) def WillStartBrowser(self, platform): self._power_metric = power.PowerMetric(platform) def DidNavigateToPage(self, page, tab): self._power_metric.Start(page, tab) def ValidateAndMeasurePage(self, page, tab, results): tab.WaitForJavaScriptExpression( 'window.document.getElementById("pause") &&' + 'window.document.getElementById("pause").value == "Run"', 120) # Start spying on POST request that will report benchmark results, and # intercept result data. tab.ExecuteJavaScript('(function() {' + ' var real_jquery_ajax_ = window.jQuery;' + ' window.results_ = "";' + ' window.jQuery.ajax = function(request) {' + ' if (request.url == "store.php") {' + ' window.results_ =' + ' decodeURIComponent(request.data);' + ' window.results_ = window.results_.substring(' + ' window.results_.indexOf("=") + 1, ' + ' window.results_.lastIndexOf("&"));' + ' real_jquery_ajax_(request);' + ' }' + ' };' + '})();') # Starts benchmark. tab.ExecuteJavaScript('window.document.getElementById("pause").click();') tab.WaitForJavaScriptExpression('!!window.results_', 600) self._power_metric.Stop(page, tab) self._power_metric.AddResults(tab, results) score = eval(tab.EvaluateJavaScript('window.results_ || "[]"')) def Escape(k): chars = [' ', '.', '-', '/', '(', ')', '*'] for c in chars: k = k.replace(c, '_') return k def AggregateData(container, key, value): if key not in container: container[key] = {'count': 0, 'sum': 0} container[key]['count'] += 1 container[key]['sum'] += math.log(value) suffix = page.url[page.url.index('?') + 1 :] def AddResult(name, value): important = False if name == suffix: important = True results.AddValue(scalar.ScalarValue( results.current_page, Escape(name), 'runs/s', value, important)) aggregated = {} for data in score: AddResult('%s/%s' % (data['collection'], data['name']), data['mean']) top_name = data['collection'].split('-', 1)[0] AggregateData(aggregated, top_name, data['mean']) collection_name = data['collection'] AggregateData(aggregated, collection_name, data['mean']) for key, value in aggregated.iteritems(): AddResult(key, math.exp(value['sum'] / value['count'])) class _DromaeoBenchmark(benchmark.Benchmark): """A base class for Dromaeo benchmarks.""" test = _DromaeoMeasurement def CreatePageSet(self, options): """Makes a PageSet for Dromaeo benchmarks.""" # Subclasses are expected to define class members called query_param and # tag. if not hasattr(self, 'query_param') or not hasattr(self, 'tag'): raise NotImplementedError('query_param or tag not in Dromaeo benchmark.') archive_data_file = '../page_sets/data/dromaeo.%s.json' % self.tag ps = page_set.PageSet( make_javascript_deterministic=False, archive_data_file=archive_data_file, file_path=os.path.abspath(__file__)) url = 'http://dromaeo.com?%s' % self.query_param ps.AddPageWithDefaultRunNavigate(url) return ps class DromaeoDomCoreAttr(_DromaeoBenchmark): """Dromaeo DOMCore attr JavaScript benchmark.""" tag = 'domcoreattr' query_param = 'dom-attr' class DromaeoDomCoreModify(_DromaeoBenchmark): """Dromaeo DOMCore modify JavaScript benchmark.""" tag = 'domcoremodify' query_param = 'dom-modify' class DromaeoDomCoreQuery(_DromaeoBenchmark): """Dromaeo DOMCore query JavaScript benchmark.""" tag = 'domcorequery' query_param = 'dom-query' class DromaeoDomCoreTraverse(_DromaeoBenchmark): """Dromaeo DOMCore traverse JavaScript benchmark.""" tag = 'domcoretraverse' query_param = 'dom-traverse' class DromaeoJslibAttrJquery(_DromaeoBenchmark): """Dromaeo JSLib attr jquery JavaScript benchmark""" tag = 'jslibattrjquery' query_param = 'jslib-attr-jquery' class DromaeoJslibAttrPrototype(_DromaeoBenchmark): """Dromaeo JSLib attr prototype JavaScript benchmark""" tag = 'jslibattrprototype' query_param = 'jslib-attr-prototype' class DromaeoJslibEventJquery(_DromaeoBenchmark): """Dromaeo JSLib event jquery JavaScript benchmark""" tag = 'jslibeventjquery' query_param = 'jslib-event-jquery' class DromaeoJslibEventPrototype(_DromaeoBenchmark): """Dromaeo JSLib event prototype JavaScript benchmark""" tag = 'jslibeventprototype' query_param = 'jslib-event-prototype' @benchmark.Disabled('xp') # crbug.com/389731 class DromaeoJslibModifyJquery(_DromaeoBenchmark): """Dromaeo JSLib modify jquery JavaScript benchmark""" tag = 'jslibmodifyjquery' query_param = 'jslib-modify-jquery' class DromaeoJslibModifyPrototype(_DromaeoBenchmark): """Dromaeo JSLib modify prototype JavaScript benchmark""" tag = 'jslibmodifyprototype' query_param = 'jslib-modify-prototype' class DromaeoJslibStyleJquery(_DromaeoBenchmark): """Dromaeo JSLib style jquery JavaScript benchmark""" tag = 'jslibstylejquery' query_param = 'jslib-style-jquery' class DromaeoJslibStylePrototype(_DromaeoBenchmark): """Dromaeo JSLib style prototype JavaScript benchmark""" tag = 'jslibstyleprototype' query_param = 'jslib-style-prototype' class DromaeoJslibTraverseJquery(_DromaeoBenchmark): """Dromaeo JSLib traverse jquery JavaScript benchmark""" tag = 'jslibtraversejquery' query_param = 'jslib-traverse-jquery' class DromaeoJslibTraversePrototype(_DromaeoBenchmark): """Dromaeo JSLib traverse prototype JavaScript benchmark""" tag = 'jslibtraverseprototype' query_param = 'jslib-traverse-prototype' class DromaeoCSSQueryJquery(_DromaeoBenchmark): """Dromaeo CSS Query jquery JavaScript benchmark""" tag = 'cssqueryjquery' query_param = 'cssquery-jquery'
bsd-3-clause
viki9698/jizhanggroup
django/utils/unittest/result.py
570
6105
"""Test result object""" import sys import traceback import unittest from StringIO import StringIO from django.utils.unittest import util from django.utils.unittest.compatibility import wraps __unittest = True def failfast(method): @wraps(method) def inner(self, *args, **kw): if getattr(self, 'failfast', False): self.stop() return method(self, *args, **kw) return inner STDOUT_LINE = '\nStdout:\n%s' STDERR_LINE = '\nStderr:\n%s' class TestResult(unittest.TestResult): """Holder for test result information. Test results are automatically managed by the TestCase and TestSuite classes, and do not need to be explicitly manipulated by writers of tests. Each instance holds the total number of tests run, and collections of failures and errors that occurred among those test runs. The collections contain tuples of (testcase, exceptioninfo), where exceptioninfo is the formatted traceback of the error that occurred. """ _previousTestClass = None _moduleSetUpFailed = False def __init__(self): self.failfast = False self.failures = [] self.errors = [] self.testsRun = 0 self.skipped = [] self.expectedFailures = [] self.unexpectedSuccesses = [] self.shouldStop = False self.buffer = False self._stdout_buffer = None self._stderr_buffer = None self._original_stdout = sys.stdout self._original_stderr = sys.stderr self._mirrorOutput = False def startTest(self, test): "Called when the given test is about to be run" self.testsRun += 1 self._mirrorOutput = False if self.buffer: if self._stderr_buffer is None: self._stderr_buffer = StringIO() self._stdout_buffer = StringIO() sys.stdout = self._stdout_buffer sys.stderr = self._stderr_buffer def startTestRun(self): """Called once before any tests are executed. See startTest for a method called before each test. """ def stopTest(self, test): """Called when the given test has been run""" if self.buffer: if self._mirrorOutput: output = sys.stdout.getvalue() error = sys.stderr.getvalue() if output: if not output.endswith('\n'): output += '\n' self._original_stdout.write(STDOUT_LINE % output) if error: if not error.endswith('\n'): error += '\n' self._original_stderr.write(STDERR_LINE % error) sys.stdout = self._original_stdout sys.stderr = self._original_stderr self._stdout_buffer.seek(0) self._stdout_buffer.truncate() self._stderr_buffer.seek(0) self._stderr_buffer.truncate() self._mirrorOutput = False def stopTestRun(self): """Called once after all tests are executed. See stopTest for a method called after each test. """ @failfast def addError(self, test, err): """Called when an error has occurred. 'err' is a tuple of values as returned by sys.exc_info(). """ self.errors.append((test, self._exc_info_to_string(err, test))) self._mirrorOutput = True @failfast def addFailure(self, test, err): """Called when an error has occurred. 'err' is a tuple of values as returned by sys.exc_info().""" self.failures.append((test, self._exc_info_to_string(err, test))) self._mirrorOutput = True def addSuccess(self, test): "Called when a test has completed successfully" pass def addSkip(self, test, reason): """Called when a test is skipped.""" self.skipped.append((test, reason)) def addExpectedFailure(self, test, err): """Called when an expected failure/error occured.""" self.expectedFailures.append( (test, self._exc_info_to_string(err, test))) @failfast def addUnexpectedSuccess(self, test): """Called when a test was expected to fail, but succeed.""" self.unexpectedSuccesses.append(test) def wasSuccessful(self): "Tells whether or not this result was a success" return (len(self.failures) + len(self.errors) == 0) def stop(self): "Indicates that the tests should be aborted" self.shouldStop = True def _exc_info_to_string(self, err, test): """Converts a sys.exc_info()-style tuple of values into a string.""" exctype, value, tb = err # Skip test runner traceback levels while tb and self._is_relevant_tb_level(tb): tb = tb.tb_next if exctype is test.failureException: # Skip assert*() traceback levels length = self._count_relevant_tb_levels(tb) msgLines = traceback.format_exception(exctype, value, tb, length) else: msgLines = traceback.format_exception(exctype, value, tb) if self.buffer: output = sys.stdout.getvalue() error = sys.stderr.getvalue() if output: if not output.endswith('\n'): output += '\n' msgLines.append(STDOUT_LINE % output) if error: if not error.endswith('\n'): error += '\n' msgLines.append(STDERR_LINE % error) return ''.join(msgLines) def _is_relevant_tb_level(self, tb): return '__unittest' in tb.tb_frame.f_globals def _count_relevant_tb_levels(self, tb): length = 0 while tb and not self._is_relevant_tb_level(tb): length += 1 tb = tb.tb_next return length def __repr__(self): return "<%s run=%i errors=%i failures=%i>" % \ (util.strclass(self.__class__), self.testsRun, len(self.errors), len(self.failures))
bsd-3-clause
ganeshrn/ansible
lib/ansible/plugins/callback/__init__.py
9
17661
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import difflib import json import sys from copy import deepcopy from ansible import constants as C from ansible.module_utils.common._collections_compat import MutableMapping from ansible.module_utils.six import PY3 from ansible.module_utils._text import to_text from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins import AnsiblePlugin, get_plugin_class from ansible.utils.color import stringc from ansible.utils.display import Display from ansible.vars.clean import strip_internal_keys, module_response_deepcopy if PY3: # OrderedDict is needed for a backwards compat shim on Python3.x only # https://github.com/ansible/ansible/pull/49512 from collections import OrderedDict else: OrderedDict = None global_display = Display() __all__ = ["CallbackBase"] _DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations')) class CallbackBase(AnsiblePlugin): ''' This is a base ansible callback class that does nothing. New callbacks should use this class as a base and override any callback methods they wish to execute custom actions. ''' def __init__(self, display=None, options=None): if display: self._display = display else: self._display = global_display if self._display.verbosity >= 4: name = getattr(self, 'CALLBACK_NAME', 'unnamed') ctype = getattr(self, 'CALLBACK_TYPE', 'old') version = getattr(self, 'CALLBACK_VERSION', '1.0') self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__)) self.disabled = False self.wants_implicit_tasks = False self._plugin_options = {} if options is not None: self.set_options(options) self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason') ''' helper for callbacks, so they don't all have to include deepcopy ''' _copy_result = deepcopy def set_option(self, k, v): self._plugin_options[k] = v def get_option(self, k): return self._plugin_options[k] def set_options(self, task_keys=None, var_options=None, direct=None): ''' This is different than the normal plugin method as callbacks get called early and really don't accept keywords. Also _options was already taken for CLI args and callbacks use _plugin_options instead. ''' # load from config self._plugin_options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct) @staticmethod def host_label(result): """Return label for the hostname (& delegated hostname) of a task result. """ label = "%s" % result._host.get_name() if result._task.delegate_to and result._task.delegate_to != result._host.get_name(): # show delegated host label += " -> %s" % result._task.delegate_to # in case we have 'extra resolution' ahost = result._result.get('_ansible_delegated_vars', {}).get('ansible_host', result._task.delegate_to) if result._task.delegate_to != ahost: label += "(%s)" % ahost return label def _run_is_verbose(self, result, verbosity=0): return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True) and result._result.get('_ansible_verbose_override', False) is False) def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2): indent = 4 # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. abridged_result = strip_internal_keys(module_response_deepcopy(result)) # remove invocation unless specifically wanting it if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: del abridged_result['invocation'] # remove diff information from screen output if self._display.verbosity < 3 and 'diff' in result: del abridged_result['diff'] # remove exception from screen output if 'exception' in abridged_result: del abridged_result['exception'] try: jsonified_results = json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys) except TypeError: # Python3 bug: throws an exception when keys are non-homogenous types: # https://bugs.python.org/issue25457 # sort into an OrderedDict and then json.dumps() that instead if not OrderedDict: raise jsonified_results = json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)), cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=False) return jsonified_results def _handle_warnings(self, res): ''' display warnings, if enabled and any exist in the result ''' if C.ACTION_WARNINGS: if 'warnings' in res and res['warnings']: for warning in res['warnings']: self._display.warning(warning) del res['warnings'] if 'deprecations' in res and res['deprecations']: for warning in res['deprecations']: self._display.deprecated(**warning) del res['deprecations'] def _handle_exception(self, result, use_stderr=False): if 'exception' in result: msg = "An exception occurred during task execution. " if self._display.verbosity < 3: # extract just the actual error message from the exception text error = result['exception'].strip().split('\n')[-1] msg += "To see the full traceback, use -vvv. The error was: %s" % error else: msg = "The full traceback is:\n" + result['exception'] del result['exception'] self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr) def _serialize_diff(self, diff): return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n' def _get_diff(self, difflist): if not isinstance(difflist, list): difflist = [difflist] ret = [] for diff in difflist: if 'dst_binary' in diff: ret.append(u"diff skipped: destination file appears to be binary\n") if 'src_binary' in diff: ret.append(u"diff skipped: source file appears to be binary\n") if 'dst_larger' in diff: ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger']) if 'src_larger' in diff: ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger']) if 'before' in diff and 'after' in diff: # format complex structures into 'files' for x in ['before', 'after']: if isinstance(diff[x], MutableMapping): diff[x] = self._serialize_diff(diff[x]) elif diff[x] is None: diff[x] = '' if 'before_header' in diff: before_header = u"before: %s" % diff['before_header'] else: before_header = u'before' if 'after_header' in diff: after_header = u"after: %s" % diff['after_header'] else: after_header = u'after' before_lines = diff['before'].splitlines(True) after_lines = diff['after'].splitlines(True) if before_lines and not before_lines[-1].endswith(u'\n'): before_lines[-1] += u'\n\\ No newline at end of file\n' if after_lines and not after_lines[-1].endswith('\n'): after_lines[-1] += u'\n\\ No newline at end of file\n' differ = difflib.unified_diff(before_lines, after_lines, fromfile=before_header, tofile=after_header, fromfiledate=u'', tofiledate=u'', n=C.DIFF_CONTEXT) difflines = list(differ) if len(difflines) >= 3 and sys.version_info[:2] == (2, 6): # difflib in Python 2.6 adds trailing spaces after # filenames in the -- before/++ after headers. difflines[0] = difflines[0].replace(u' \n', u'\n') difflines[1] = difflines[1].replace(u' \n', u'\n') # it also treats empty files differently difflines[2] = difflines[2].replace(u'-1,0', u'-0,0').replace(u'+1,0', u'+0,0') has_diff = False for line in difflines: has_diff = True if line.startswith(u'+'): line = stringc(line, C.COLOR_DIFF_ADD) elif line.startswith(u'-'): line = stringc(line, C.COLOR_DIFF_REMOVE) elif line.startswith(u'@@'): line = stringc(line, C.COLOR_DIFF_LINES) ret.append(line) if has_diff: ret.append('\n') if 'prepared' in diff: ret.append(diff['prepared']) return u''.join(ret) def _get_item_label(self, result): ''' retrieves the value to be displayed as a label for an item entry from a result object''' if result.get('_ansible_no_log', False): item = "(censored due to no_log)" else: item = result.get('_ansible_item_label', result.get('item')) return item def _process_items(self, result): # just remove them as now they get handled by individual callbacks del result._result['results'] def _clean_results(self, result, task_name): ''' removes data from results for display ''' # mostly controls that debug only outputs what it was meant to if task_name in C._ACTION_DEBUG: if 'msg' in result: # msg should be alone for key in list(result.keys()): if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'): result.pop(key) else: # 'var' value as field, so eliminate others and what is left should be varname for hidme in self._hide_in_debug: result.pop(hidme, None) def _print_task_path(self, task, color=C.COLOR_DEBUG): path = task.get_path() if path: self._display.display(u"task path: %s" % path, color=color) def set_play_context(self, play_context): pass def on_any(self, *args, **kwargs): pass def runner_on_failed(self, host, res, ignore_errors=False): pass def runner_on_ok(self, host, res): pass def runner_on_skipped(self, host, item=None): pass def runner_on_unreachable(self, host, res): pass def runner_on_no_hosts(self): pass def runner_on_async_poll(self, host, res, jid, clock): pass def runner_on_async_ok(self, host, res, jid): pass def runner_on_async_failed(self, host, res, jid): pass def playbook_on_start(self): pass def playbook_on_notify(self, host, handler): pass def playbook_on_no_hosts_matched(self): pass def playbook_on_no_hosts_remaining(self): pass def playbook_on_task_start(self, name, is_conditional): pass def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None): pass def playbook_on_setup(self): pass def playbook_on_import_for_host(self, host, imported_file): pass def playbook_on_not_import_for_host(self, host, missing_file): pass def playbook_on_play_start(self, name): pass def playbook_on_stats(self, stats): pass def on_file_diff(self, host, diff): pass # V2 METHODS, by default they call v1 counterparts if possible def v2_on_any(self, *args, **kwargs): self.on_any(args, kwargs) def v2_runner_on_failed(self, result, ignore_errors=False): host = result._host.get_name() self.runner_on_failed(host, result._result, ignore_errors) def v2_runner_on_ok(self, result): host = result._host.get_name() self.runner_on_ok(host, result._result) def v2_runner_on_skipped(self, result): if C.DISPLAY_SKIPPED_HOSTS: host = result._host.get_name() self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {}))) def v2_runner_on_unreachable(self, result): host = result._host.get_name() self.runner_on_unreachable(host, result._result) def v2_runner_on_async_poll(self, result): host = result._host.get_name() jid = result._result.get('ansible_job_id') # FIXME, get real clock clock = 0 self.runner_on_async_poll(host, result._result, jid, clock) def v2_runner_on_async_ok(self, result): host = result._host.get_name() jid = result._result.get('ansible_job_id') self.runner_on_async_ok(host, result._result, jid) def v2_runner_on_async_failed(self, result): host = result._host.get_name() # Attempt to get the async job ID. If the job does not finish before the # async timeout value, the ID may be within the unparsed 'async_result' dict. jid = result._result.get('ansible_job_id') if not jid and 'async_result' in result._result: jid = result._result['async_result'].get('ansible_job_id') self.runner_on_async_failed(host, result._result, jid) def v2_playbook_on_start(self, playbook): self.playbook_on_start() def v2_playbook_on_notify(self, handler, host): self.playbook_on_notify(host, handler) def v2_playbook_on_no_hosts_matched(self): self.playbook_on_no_hosts_matched() def v2_playbook_on_no_hosts_remaining(self): self.playbook_on_no_hosts_remaining() def v2_playbook_on_task_start(self, task, is_conditional): self.playbook_on_task_start(task.name, is_conditional) # FIXME: not called def v2_playbook_on_cleanup_task_start(self, task): pass # no v1 correspondence def v2_playbook_on_handler_task_start(self, task): pass # no v1 correspondence def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None): self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe) # FIXME: not called def v2_playbook_on_import_for_host(self, result, imported_file): host = result._host.get_name() self.playbook_on_import_for_host(host, imported_file) # FIXME: not called def v2_playbook_on_not_import_for_host(self, result, missing_file): host = result._host.get_name() self.playbook_on_not_import_for_host(host, missing_file) def v2_playbook_on_play_start(self, play): self.playbook_on_play_start(play.name) def v2_playbook_on_stats(self, stats): self.playbook_on_stats(stats) def v2_on_file_diff(self, result): if 'diff' in result._result: host = result._host.get_name() self.on_file_diff(host, result._result['diff']) def v2_playbook_on_include(self, included_file): pass # no v1 correspondence def v2_runner_item_on_ok(self, result): pass def v2_runner_item_on_failed(self, result): pass def v2_runner_item_on_skipped(self, result): pass def v2_runner_retry(self, result): pass def v2_runner_on_start(self, host, task): """Event used when host begins execution of a task .. versionadded:: 2.8 """ pass
gpl-3.0
google/google-ctf
third_party/edk2/BaseTools/Source/Python/GenFds/GuidSection.py
1
12965
## @file # process GUIDed section generation # # Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR> # Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR> # # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## # Import Modules # from __future__ import absolute_import from . import Section import subprocess from .Ffs import SectionSuffix import Common.LongFilePathOs as os from .GenFdsGlobalVariable import GenFdsGlobalVariable from .GenFdsGlobalVariable import FindExtendTool from CommonDataClass.FdfClass import GuidSectionClassObject import sys from Common import EdkLogger from Common.BuildToolError import * from .FvImageSection import FvImageSection from Common.LongFilePathSupport import OpenLongFilePath as open from Common.DataType import * ## generate GUIDed section # # class GuidSection(GuidSectionClassObject) : ## The constructor # # @param self The object pointer # def __init__(self): GuidSectionClassObject.__init__(self) ## GenSection() method # # Generate GUIDed section # # @param self The object pointer # @param OutputPath Where to place output file # @param ModuleName Which module this section belongs to # @param SecNum Index of section # @param KeyStringList Filter for inputs of section generation # @param FfsInf FfsInfStatement object that contains this section data # @param Dict dictionary contains macro and its value # @retval tuple (Generated file name, section alignment) # def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict={}, IsMakefile=False): # # Generate all section # self.KeyStringList = KeyStringList self.CurrentArchList = GenFdsGlobalVariable.ArchList if FfsInf is not None: self.Alignment = FfsInf.__ExtendMacro__(self.Alignment) self.NameGuid = FfsInf.__ExtendMacro__(self.NameGuid) self.SectionType = FfsInf.__ExtendMacro__(self.SectionType) self.CurrentArchList = [FfsInf.CurrentArch] SectFile = tuple() SectAlign = [] Index = 0 MaxAlign = None if self.FvAddr != []: FvAddrIsSet = True else: FvAddrIsSet = False if self.ProcessRequired in ("TRUE", "1"): if self.FvAddr != []: #no use FvAddr when the image is processed. self.FvAddr = [] if self.FvParentAddr is not None: #no use Parent Addr when the image is processed. self.FvParentAddr = None for Sect in self.SectionList: Index = Index + 1 SecIndex = '%s.%d' % (SecNum, Index) # set base address for inside FvImage if isinstance(Sect, FvImageSection): if self.FvAddr != []: Sect.FvAddr = self.FvAddr.pop(0) self.IncludeFvSection = True elif isinstance(Sect, GuidSection): Sect.FvAddr = self.FvAddr Sect.FvParentAddr = self.FvParentAddr ReturnSectList, align = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile) if isinstance(Sect, GuidSection): if Sect.IncludeFvSection: self.IncludeFvSection = Sect.IncludeFvSection if align is not None: if MaxAlign is None: MaxAlign = align if GenFdsGlobalVariable.GetAlignment (align) > GenFdsGlobalVariable.GetAlignment (MaxAlign): MaxAlign = align if ReturnSectList != []: if align is None: align = "1" for file in ReturnSectList: SectFile += (file,) SectAlign.append(align) if MaxAlign is not None: if self.Alignment is None: self.Alignment = MaxAlign else: if GenFdsGlobalVariable.GetAlignment (MaxAlign) > GenFdsGlobalVariable.GetAlignment (self.Alignment): self.Alignment = MaxAlign OutputFile = OutputPath + \ os.sep + \ ModuleName + \ SUP_MODULE_SEC + \ SecNum + \ SectionSuffix['GUIDED'] OutputFile = os.path.normpath(OutputFile) ExternalTool = None ExternalOption = None if self.NameGuid is not None: ExternalTool, ExternalOption = FindExtendTool(self.KeyStringList, self.CurrentArchList, self.NameGuid) # # If not have GUID , call default # GENCRC32 section # if self.NameGuid is None : GenFdsGlobalVariable.VerboseLogger("Use GenSection function Generate CRC32 Section") GenFdsGlobalVariable.GenerateSection(OutputFile, SectFile, Section.Section.SectionType[self.SectionType], InputAlign=SectAlign, IsMakefile=IsMakefile) OutputFileList = [] OutputFileList.append(OutputFile) return OutputFileList, self.Alignment #or GUID not in External Tool List elif ExternalTool is None: EdkLogger.error("GenFds", GENFDS_ERROR, "No tool found with GUID %s" % self.NameGuid) else: DummyFile = OutputFile + ".dummy" # # Call GenSection with DUMMY section type. # GenFdsGlobalVariable.GenerateSection(DummyFile, SectFile, InputAlign=SectAlign, IsMakefile=IsMakefile) # # Use external tool process the Output # TempFile = OutputPath + \ os.sep + \ ModuleName + \ SUP_MODULE_SEC + \ SecNum + \ '.tmp' TempFile = os.path.normpath(TempFile) # # Remove temp file if its time stamp is older than dummy file # Just in case the external tool fails at this time but succeeded before # Error should be reported if the external tool does not generate a new output based on new input # if os.path.exists(TempFile) and os.path.exists(DummyFile) and os.path.getmtime(TempFile) < os.path.getmtime(DummyFile): os.remove(TempFile) FirstCall = False CmdOption = '-e' if ExternalOption is not None: CmdOption = CmdOption + ' ' + ExternalOption if not GenFdsGlobalVariable.EnableGenfdsMultiThread: if self.ProcessRequired not in ("TRUE", "1") and self.IncludeFvSection and not FvAddrIsSet and self.FvParentAddr is not None: #FirstCall is only set for the encapsulated flash FV image without process required attribute. FirstCall = True # # Call external tool # ReturnValue = [1] if FirstCall: #first try to call the guided tool with -z option and CmdOption for the no process required guided tool. GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, '-z' + ' ' + CmdOption, ReturnValue) # # when no call or first call failed, ReturnValue are not 1. # Call the guided tool with CmdOption # if ReturnValue[0] != 0: FirstCall = False ReturnValue[0] = 0 GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption) # # There is external tool which does not follow standard rule which return nonzero if tool fails # The output file has to be checked # if not os.path.exists(TempFile) : EdkLogger.error("GenFds", COMMAND_FAILURE, 'Fail to call %s, no output file was generated' % ExternalTool) FileHandleIn = open(DummyFile, 'rb') FileHandleIn.seek(0, 2) InputFileSize = FileHandleIn.tell() FileHandleOut = open(TempFile, 'rb') FileHandleOut.seek(0, 2) TempFileSize = FileHandleOut.tell() Attribute = [] HeaderLength = None if self.ExtraHeaderSize != -1: HeaderLength = str(self.ExtraHeaderSize) if self.ProcessRequired == "NONE" and HeaderLength is None: if TempFileSize > InputFileSize: FileHandleIn.seek(0) BufferIn = FileHandleIn.read() FileHandleOut.seek(0) BufferOut = FileHandleOut.read() if BufferIn == BufferOut[TempFileSize - InputFileSize:]: HeaderLength = str(TempFileSize - InputFileSize) #auto sec guided attribute with process required if HeaderLength is None: Attribute.append('PROCESSING_REQUIRED') FileHandleIn.close() FileHandleOut.close() if FirstCall and 'PROCESSING_REQUIRED' in Attribute: # Guided data by -z option on first call is the process required data. Call the guided tool with the real option. GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption) # # Call Gensection Add Section Header # if self.ProcessRequired in ("TRUE", "1"): if 'PROCESSING_REQUIRED' not in Attribute: Attribute.append('PROCESSING_REQUIRED') if self.AuthStatusValid in ("TRUE", "1"): Attribute.append('AUTH_STATUS_VALID') GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'], Guid=self.NameGuid, GuidAttr=Attribute, GuidHdrLen=HeaderLength) else: #add input file for GenSec get PROCESSING_REQUIRED GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption, IsMakefile=IsMakefile) Attribute = [] HeaderLength = None if self.ExtraHeaderSize != -1: HeaderLength = str(self.ExtraHeaderSize) if self.AuthStatusValid in ("TRUE", "1"): Attribute.append('AUTH_STATUS_VALID') if self.ProcessRequired == "NONE" and HeaderLength is None: GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'], Guid=self.NameGuid, GuidAttr=Attribute, GuidHdrLen=HeaderLength, DummyFile=DummyFile, IsMakefile=IsMakefile) else: if self.ProcessRequired in ("TRUE", "1"): if 'PROCESSING_REQUIRED' not in Attribute: Attribute.append('PROCESSING_REQUIRED') GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'], Guid=self.NameGuid, GuidAttr=Attribute, GuidHdrLen=HeaderLength, IsMakefile=IsMakefile) OutputFileList = [] OutputFileList.append(OutputFile) if 'PROCESSING_REQUIRED' in Attribute: # reset guided section alignment to none for the processed required guided data self.Alignment = None self.IncludeFvSection = False self.ProcessRequired = "TRUE" if IsMakefile and self.Alignment is not None and self.Alignment.strip() == '0': self.Alignment = '1' return OutputFileList, self.Alignment
apache-2.0
todaychi/hue
desktop/core/ext-py/guppy-0.1.10/guppy/sets/__init__.py
37
2124
#._cv_part guppy.sets from setsc import BitSet # base bitset type from setsc import ImmBitSet # immutable bitset type from setsc import immbit # immutable bitset singleton constructor from setsc import immbitrange # immutable bitset range constructor from setsc import immbitset # immutable bitset constructor from setsc import MutBitSet # mutable bitset from setsc import NodeSet # base nodeset type from setsc import ImmNodeSet # immmutable nodeset type from setsc import MutNodeSet # mutable nodeset type import copy_reg from setsc import _bs if hasattr(copy_reg, 'safe_constructors'): # < 2.3 version copy_reg.safe_constructors[_bs] = 1 else: # In at least Python 2.3.3, we have to set __module__; # it didn't find it otherwise. _bs.__module__ # Due to bug in Python version 2.3.3, we have to read it first.. _bs.__module__= 'guppy.sets' # ..to be able to set it. del copy_reg # Define some constructors. # Constructor names are lower case. # Some constructors are equal to types. # But this connection depends on the implementation. # So one may wish the user to not depend on this. mutbitset = MutBitSet immnodeset = ImmNodeSet mutnodeset = MutNodeSet def mutnodeset_union(iterable): "Return a mutable nodeset which is the union of all nodesets in iterable." set = mutnodeset() for it in iterable: set |= it return set def immnodeset_union(iterable, *args): "Return an immmutable nodeset which is the union of all nodesets in iterable." set = mutnodeset_union(iterable) return immnodeset(set, *args) def laxnodeset(v): """\ Return a nodeset with elements from the argument. If the argument is already a nodeset, it self will be returned. Otherwise it will be converted to a nodeset, that can be mutable or immutable depending on what happens to be most effectively implemented.""" if not isinstance(v, NodeSet): v = immnodeset(v) return v # Make attributes assignable by reading one; # this is getting around a bug in Python 2.3.3 # and should be harmless in any version. try: mutnodeset()._hiding_tag_ except AttributeError: pass
apache-2.0
sambitgaan/nupic
src/nupic/math/proposal.py
49
10882
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- raise Exception("XERROR proposal not available") from nupic.math.dist import * class Proposal(object): # Must support the following methods: # sample, logForward, logBackward = propose(currentValue, randomNumberGenerator) # None = adapt(accepted) def adapt(self, accepted): pass class RangeWrapper(Proposal): def __init__(self, prop, min=None, max=None, offset=0, maxIterations=1000): self.prop = prop self.offset = offset self.min = min self.max = max self.maxIterations = 1000 def propose(self, inCurrent, r): iterations = 0 maxIterations = 10 done = False prop = None value = None while not done: if self.offset is not None: current = inCurrent - self.offset else: current = inCurrent prop = self.prop.propose(current, r) value = prop[0] if self.offset is not None: value = value + self.offset done = True if (self.min is not None) and (value < self.min): done = False if (self.max is not None) and (value > self.max): done = False if iterations >= self.maxIterations: raise RuntimeError("Failed to sample in %d iterations." % self.maxIterations) prop = tuple([value] + list(prop[1:])) return prop def toOdds(p): return p / (1.0 - p) def toProbability(o): return o / (1.0 + o) def logit(p): return numpy.log(p / (1.0 - p)) def invlogit(lo): o = numpy.exp(lo) return o / (1.0 + o) def estimateProportion(x, n, prior=0.5): prop = (x + prior) / (n + 2.0 * prior) return prop class CircularQueue(object): def __init__(self, n): self.queue = [None] * n self.max = n self.n = 0 def add(self, x): self.queue[self.n % self.max] = x self.n += 1 def clear(self): self.n = 0 def getEarliest(self): if self.n > self.max: return self.queue[(self.n+1) % self.max] else: assert self.n return self.queue[0] class MovingAverage(CircularQueue): def __init__(self, n, prior=None): CircularQueue.__init__(self, n) if prior is not None: self.pn = prior self.pd = 1.0 else: self.pn = 0.0 self.pd = 0.0 self.sum = 0.0 def add(self, x): rem = 0 if self.n >= self.max: rem = self.queue[(self.n+1) % self.max] CircularQueue.add(self, x) self.sum += (x - rem) def clear(self): self.sum = 0 CircularQueue.clear(self) def get(self): return (self.sum + self.pn) / (min(self.n, self.max) + self.pd) def getSum(self): return (self.sum + self.pn), (min(self.n, self.max) + self.pd) class TransitionKernel(Proposal): def __init__(self, proposal, kernel, adaptiveWindow=None, target=0.5): self.prop = proposal if hasattr(kernel, "__iter__"): assert len(kernel) in [2, 3] if len(kernel) == 2: self.minKernel, self.maxKernel = kernel self.kernel = (kernel[0] + kernel[1]) / 2.0 elif len(kernel) == 3: self.minKernel, self.kernel, self.maxKernel = kernel else: raise RuntimeError("Specify kernel as 1 number, a range, or a 3-tuple " "with (min, start, max)") else: self.kernel = kernel self.minKernel = min(0.01, kernel) # Assume a default bounds. self.maxKernel = max(0.99, kernel) # Assume a default bounds. self.target = target self.accepted = None if adaptiveWindow: self.accepted = MovingAverage(adaptiveWindow, prior=0.5) def propose(self, current, r): # Adapt if have sufficient data. willAdaptForward = False willAdaptBackward = False if self.accepted: minAdapt = self.accepted.max willAdaptForward = ((self.accepted.n+0) >= minAdapt) willAdaptBackward = ((self.accepted.n+1) >= minAdapt) kernelForward = self.kernel kernelBackward = kernelForward targetOdds = logit(self.target) if willAdaptForward: obsOdds = logit(self.accepted.get()) weight = 0.1 logOR = weight * (targetOdds - obsOdds) kernelForward = invlogit(logit(self.kernel) + logOR) kernelForward = max(self.minKernel, min(self.maxKernel, kernelForward)) if willAdaptBackward: # Now consider the backward direction. a, b = self.accepted.getSum() if (self.accepted.n >= self.accepted.max): nextOdds = logit(estimateProportion(a + 1 - self.accepted.getEarliest(), b)) else: nextOdds = logit(estimateProportion(a + 1, b + 1)) nextWeight = 0.1 nextLogOR = nextWeight * (targetOdds - nextOdds) kernelBackward = invlogit(logit(kernelForward) + nextLogOR) kernelBackward = max(self.minKernel, min(self.maxKernel, kernelBackward)) if willAdaptForward or willAdaptBackward: if not "obsOdds" in locals(): obsOdds = logit(self.accepted.get()) print " Adapting:", obsOdds, "->", targetOdds print " Adapted:", self.kernel, kernelForward, kernelBackward self.kernel = kernelForward # Perform the proposal. stay = (r.uniform(0, 1) < kernelForward) if stay: # print "stay" return (current, numpy.log(kernelForward), numpy.log(kernelBackward)) else: # print "propose" while True: # Rejection sample to avoid the case of staying in one place. # Otherwise, our 'stay' probability will be wrong. proposed, logForward, logBackward = self.prop.propose(current, r) if proposed != current: break return (proposed, (logForward + numpy.log(1.0 - kernelForward)), (logBackward + numpy.log(1.0 - kernelBackward))) def adapt(self, accepted): if self.accepted: self.accepted.add(int(accepted)) class DiscreteProposal(Proposal): def __init__(self, keys, kernel): self.keys = [i for i in keys] self.keyMap = dict((key, i) for (i, key) in enumerate(self.keys)) nKeys = len(keys) assert nKeys > 0 self.nKeys = nKeys if nKeys == 1: kernel = 1.0 self.logp = 0 else: self.logp = -numpy.log(nKeys-1) assert (kernel >= 0) and (kernel <= 1.0) self.kernel = kernel def propose(self, current, r): """Generates a random sample from the discrete probability distribution and returns its value, the log of the probability of sampling that value and the log of the probability of sampling the current value (passed in). """ stay = (r.uniform(0, 1) < self.kernel) if stay: logKernel = numpy.log(self.kernel) return current, logKernel, logKernel else: # Choose uniformly, not according to the pmf. curIndex = self.keyMap[current] ri = r.randint(0, self.nKeys-1) logKernel = numpy.log(1.0 - self.kernel) lp = logKernel + self.logp if ri < curIndex: return self.keys[ri], lp, lp else: return self.keys[ri+1], lp, lp class TwoFishProposal(Proposal): def __init__(self, scale, minVal=-numpy.inf, maxVal=numpy.inf): self.dist = PoissonDistribution(scale) self.minVal = minVal self.maxVal = maxVal @staticmethod def proposePositive(dist, minVal, maxVal, r): if minVal > 0: raise RuntimeError("Current value is outside legal range.") elif maxVal < 0: raise RuntimeError("Current value is outside legal range.") cdf = dist.cdf(maxVal) while 1: diff, logProb = dist.sample(r) if diff <= maxVal: break rrange = diff - minVal rcdf = dist.cdf(rrange) rlp = dist.logProbability(diff) log2 = numpy.log(2) # The half chance we went up, rather than down. logForward = logProb - numpy.log(cdf) - log2 logBackward = rlp - numpy.log(rcdf) - log2 return diff, logForward, logBackward def propose(self, current, r): up = r.randint(2) if up: diff, logForward, logBackward = TwoFishProposal.proposePositive( self.dist, self.minVal - current, self.maxVal - current, r) return current + diff, logForward, logBackward else: diff, logForward, logBackward = TwoFishProposal.proposePositive( self.dist, current - self.maxVal, current - self.minVal, r) return current - diff, logForward, logBackward class PoissonProposal(Proposal): def __init__(self, offset=0.1): self.offset = offset def propose(self, current, r): """Generates a random sample from the Poisson probability distribution with with location and scale parameter equal to the current value (passed in). Returns the value of the random sample, the log of the probability of sampling that value, and the log of the probability of sampling the current value if the roles of the new sample and the current sample were reversed (the log of the backward proposal probability). """ curLambda = current + self.offset x, logProb = PoissonDistribution(curLambda).sample(r) logBackward = PoissonDistribution(x+self.offset).logDensity(current) return x, logProb, logBackward class NormalProposal(Proposal): def __init__(self, sd): self.dist = NormalDistribution(0, sd) def propose(self, current, r): x, logProb = self.dist.sample(r) return x + current, logProb, logProb class LogNormalProposal(Proposal): def __init__(self, normalSD): self.sd = normalSD def propose(self, current, r): dist = LogNormalDistribution(numpy.log(current), self.sd) x, logDensity = dist.sample(r) # Switch to new center, look at backward density. dist.setNormalMean(numpy.log(x)) return x, logDensity, dist.logDensity(current) class GammaProposal(Proposal): def __init__(self, shape, offset=0.001): self.shape = float(shape) self.offset = offset def propose(self, current, r): forwardScale = max((current + self.offset) / self.shape, 0) fdist = GammaDistribution(self.shape, forwardScale) x, logForward = fdist.sample(r) # backwardScale = max((x + self.offset) / self.shape, 0) backwardScale = (x + self.offset) / self.shape bdist = GammaDistribution(self.shape, backwardScale) return x, logForward, bdist.logDensity(current)
agpl-3.0
jskew/gnuradio
gr-wxgui/python/wxgui/plotter/bar_plotter.py
59
4281
# # Copyright 2009 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import wx from grid_plotter_base import grid_plotter_base from OpenGL import GL import common import numpy LEGEND_TEXT_FONT_SIZE = 8 LEGEND_BOX_PADDING = 3 MIN_PADDING = 0, 0, 0, 70 #top, right, bottom, left #constants for the waveform storage SAMPLES_KEY = 'samples' COLOR_SPEC_KEY = 'color_spec' MARKERY_KEY = 'marker' TRIG_OFF_KEY = 'trig_off' ################################################## # Bar Plotter for histogram waveforms ################################################## class bar_plotter(grid_plotter_base): def __init__(self, parent): """ Create a new bar plotter. """ #init grid_plotter_base.__init__(self, parent, MIN_PADDING) self._bars = list() self._bar_width = .5 self._color_spec = (0, 0, 0) #setup bar cache self._bar_cache = self.new_gl_cache(self._draw_bars) #setup bar plotter self.register_init(self._init_bar_plotter) def _init_bar_plotter(self): """ Run gl initialization tasks. """ GL.glEnableClientState(GL.GL_VERTEX_ARRAY) def _draw_bars(self): """ Draw the vertical bars. """ bars = self._bars num_bars = len(bars) if num_bars == 0: return #use scissor to prevent drawing outside grid GL.glEnable(GL.GL_SCISSOR_TEST) GL.glScissor( self.padding_left, self.padding_bottom+1, self.width-self.padding_left-self.padding_right-1, self.height-self.padding_top-self.padding_bottom-1, ) #load the points points = list() width = self._bar_width/2 for i, bar in enumerate(bars): points.extend([ (i-width, 0), (i+width, 0), (i+width, bar), (i-width, bar), ] ) GL.glColor3f(*self._color_spec) #matrix transforms GL.glPushMatrix() GL.glTranslatef(self.padding_left, self.padding_top, 0) GL.glScalef( (self.width-self.padding_left-self.padding_right), (self.height-self.padding_top-self.padding_bottom), 1, ) GL.glTranslatef(0, 1, 0) GL.glScalef(1.0/(num_bars-1), -1.0/(self.y_max-self.y_min), 1) GL.glTranslatef(0, -self.y_min, 0) #draw the bars GL.glVertexPointerf(points) GL.glDrawArrays(GL.GL_QUADS, 0, len(points)) GL.glPopMatrix() GL.glDisable(GL.GL_SCISSOR_TEST) def _populate_point_label(self, x_val, y_val): """ Get the text the will populate the point label. Give X and Y values for the current point. Give values for the channel at the X coordinate. Args: x_val: the current x value y_val: the current y value Returns: a string with newlines """ if len(self._bars) == 0: return '' scalar = float(len(self._bars)-1)/(self.x_max - self.x_min) #convert x val to bar # bar_index = scalar*(x_val - self.x_min) #if abs(bar_index - round(bar_index)) > self._bar_width/2: return '' bar_index = int(round(bar_index)) bar_start = (bar_index - self._bar_width/2)/scalar + self.x_min bar_end = (bar_index + self._bar_width/2)/scalar + self.x_min bar_value = self._bars[bar_index] return '%s to %s\n%s: %s'%( common.eng_format(bar_start, self.x_units), common.eng_format(bar_end, self.x_units), self.y_label, common.eng_format(bar_value, self.y_units), ) def set_bars(self, bars, bar_width, color_spec): """ Set the bars. Args: bars: a list of bars bar_width: the fractional width of the bar, between 0 and 1 color_spec: the color tuple """ self.lock() self._bars = bars self._bar_width = float(bar_width) self._color_spec = color_spec self._bar_cache.changed(True) self.unlock()
gpl-3.0
antgonza/qiita
qiita_pet/handlers/websocket_handlers.py
3
5203
# ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- # adapted from # https://github.com/leporo/tornado-redis/blob/master/demos/websockets from json import loads, dumps from itertools import chain import toredis from tornado.web import authenticated from tornado.websocket import WebSocketHandler from tornado.gen import engine, Task from qiita_core.qiita_settings import r_client from qiita_pet.handlers.base_handlers import BaseHandler from qiita_db.artifact import Artifact from qiita_core.util import execute_as_transaction class MessageHandler(WebSocketHandler): def __init__(self, *args, **kwargs): super(MessageHandler, self).__init__(*args, **kwargs) # The redis server self.r_client = r_client # The toredis server that allows event-based message handling self.toredis = toredis.Client() self.toredis.connect() self.channel = None self.channel_messages = None def get_current_user(self): user = self.get_secure_cookie("user") if user is None: raise ValueError("No user associated with the websocket!") else: return user.strip('" ') # Open allows for any number arguments, unlike what pylint thinks. # pylint: disable=W0221 @authenticated def open(self): self.write_message('hello') @authenticated def on_message(self, msg): # When the websocket receives a message from the javascript client, # parse into JSON msginfo = loads(msg) # Determine which Redis communication channel the server needs to # listen on self.channel = msginfo.get('user', None) if self.channel is not None: self.channel_messages = '%s:messages' % self.channel self.listen() def listen(self): # Attach a callback on the channel to listen too. This callback is # executed when anything is placed onto the channel. self.toredis.subscribe(self.channel, callback=self.callback) # Potential race-condition where a separate process may have placed # messages into the queue before we've been able to attach listen. oldmessages = self.r_client.lrange(self.channel_messages, 0, -1) if oldmessages is not None: for message in oldmessages: self.write_message(message) def callback(self, msg): message_type, channel, payload = msg # if a compute process wrote to the Redis channel that we are # listening too, and if it is actually a message, send the payload to # the javascript client via the websocket if channel == self.channel and message_type == 'message': self.write_message(payload) @engine def on_close(self): yield Task(self.toredis.unsubscribe, self.channel) self.r_client.delete('%s:messages' % self.channel) self.redis.disconnect() class SelectedSocketHandler(WebSocketHandler, BaseHandler): """Websocket for removing samples on default analysis display page""" @authenticated @execute_as_transaction def on_message(self, msg): # When the websocket receives a message from the javascript client, # parse into JSON msginfo = loads(msg) default = self.current_user.default_analysis if 'remove_sample' in msginfo: data = msginfo['remove_sample'] artifact = Artifact(data['proc_data']) default.remove_samples([artifact], data['samples']) elif 'remove_pd' in msginfo: data = msginfo['remove_pd'] default.remove_samples([Artifact(data['proc_data'])]) elif 'clear' in msginfo: data = msginfo['clear'] artifacts = [Artifact(_id) for _id in data['pids']] default.remove_samples(artifacts) self.write_message(msg) # Open allows for any number arguments, unlike what pylint thinks. # pylint: disable=W0221 @authenticated @execute_as_transaction def open(self): self.write_message('hello') class SelectSamplesHandler(WebSocketHandler, BaseHandler): """Websocket for selecting and deselecting samples on list studies page""" @authenticated @execute_as_transaction def on_message(self, msg): """Selects samples on a message from the user Parameters ---------- msg : JSON str Message containing sample and prc_data information, in the form {proc_data_id': [s1, s2, ...], ...]} """ msginfo = loads(msg) default = self.current_user.default_analysis default.add_samples(msginfo['sel']) # Count total number of unique samples selected and return self.write_message(dumps({ 'sel': len(set( chain.from_iterable(s for s in msginfo['sel'].values()))) }))
bsd-3-clause
catapult-project/catapult-csm
third_party/google-endpoints/Crypto/Cipher/AES.py
126
4218
# -*- coding: utf-8 -*- # # Cipher/AES.py : AES # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """AES symmetric cipher AES `(Advanced Encryption Standard)`__ is a symmetric block cipher standardized by NIST_ . It has a fixed data block size of 16 bytes. Its keys can be 128, 192, or 256 bits long. AES is very fast and secure, and it is the de facto standard for symmetric encryption. As an example, encryption can be done as follows: >>> from Crypto.Cipher import AES >>> from Crypto import Random >>> >>> key = b'Sixteen byte key' >>> iv = Random.new().read(AES.block_size) >>> cipher = AES.new(key, AES.MODE_CFB, iv) >>> msg = iv + cipher.encrypt(b'Attack at dawn') .. __: http://en.wikipedia.org/wiki/Advanced_Encryption_Standard .. _NIST: http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf :undocumented: __revision__, __package__ """ __revision__ = "$Id$" from Crypto.Cipher import blockalgo from Crypto.Cipher import _AES class AESCipher (blockalgo.BlockAlgo): """AES cipher object""" def __init__(self, key, *args, **kwargs): """Initialize an AES cipher object See also `new()` at the module level.""" blockalgo.BlockAlgo.__init__(self, _AES, key, *args, **kwargs) def new(key, *args, **kwargs): """Create a new AES cipher :Parameters: key : byte string The secret key to use in the symmetric cipher. It must be 16 (*AES-128*), 24 (*AES-192*), or 32 (*AES-256*) bytes long. :Keywords: mode : a *MODE_** constant The chaining mode to use for encryption or decryption. Default is `MODE_ECB`. IV : byte string The initialization vector to use for encryption or decryption. It is ignored for `MODE_ECB` and `MODE_CTR`. For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption and `block_size` +2 bytes for decryption (in the latter case, it is actually the *encrypted* IV which was prefixed to the ciphertext). It is mandatory. For all other modes, it must be `block_size` bytes longs. It is optional and when not present it will be given a default value of all zeroes. counter : callable (*Only* `MODE_CTR`). A stateful function that returns the next *counter block*, which is a byte string of `block_size` bytes. For better performance, use `Crypto.Util.Counter`. segment_size : integer (*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext are segmented in. It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8. :Return: an `AESCipher` object """ return AESCipher(key, *args, **kwargs) #: Electronic Code Book (ECB). See `blockalgo.MODE_ECB`. MODE_ECB = 1 #: Cipher-Block Chaining (CBC). See `blockalgo.MODE_CBC`. MODE_CBC = 2 #: Cipher FeedBack (CFB). See `blockalgo.MODE_CFB`. MODE_CFB = 3 #: This mode should not be used. MODE_PGP = 4 #: Output FeedBack (OFB). See `blockalgo.MODE_OFB`. MODE_OFB = 5 #: CounTer Mode (CTR). See `blockalgo.MODE_CTR`. MODE_CTR = 6 #: OpenPGP Mode. See `blockalgo.MODE_OPENPGP`. MODE_OPENPGP = 7 #: Size of a data block (in bytes) block_size = 16 #: Size of a key (in bytes) key_size = ( 16, 24, 32 )
bsd-3-clause
TeddyDesTodes/aubio
python/tests/test_midi2note.py
14
1250
#! /usr/bin/env python # -*- coding: utf-8 -*- from aubio import midi2note import unittest list_of_known_midis = ( ( 0, 'C-1' ), ( 1, 'C#-1' ), ( 38, 'D2' ), ( 48, 'C3' ), ( 59, 'B3' ), ( 60, 'C4' ), ( 127, 'G9' ), ) class midi2note_good_values(unittest.TestCase): def test_midi2note_known_values(self): " known values are correctly converted " for midi, note in list_of_known_midis: self.assertEqual ( midi2note(midi), note ) class midi2note_wrong_values(unittest.TestCase): def test_midi2note_negative_value(self): " fails when passed a negative value " self.assertRaises(ValueError, midi2note, -2) def test_midi2note_negative_value(self): " fails when passed a value greater than 127 " self.assertRaises(ValueError, midi2note, 128) def test_midi2note_floating_value(self): " fails when passed a floating point " self.assertRaises(TypeError, midi2note, 69.2) def test_midi2note_character_value(self): " fails when passed a value that can not be transformed to integer " self.assertRaises(TypeError, midi2note, "a") if __name__ == '__main__': unittest.main()
gpl-3.0
gimoh/ansible-modules-core
cloud/google/gce_net.py
1
9195
#!/usr/bin/python # Copyright 2013 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: gce_net version_added: "1.5" short_description: create/destroy GCE networks and firewall rules description: - This module can create and destroy Google Compute Engine networks and firewall rules U(https://developers.google.com/compute/docs/networking). The I(name) parameter is reserved for referencing a network while the I(fwname) parameter is used to reference firewall rules. IPv4 Address ranges must be specified using the CIDR U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format. Full install/configuration instructions for the gce* modules can be found in the comments of ansible/test/gce_tests.py. options: allowed: description: - the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800;udp:1-25') required: false default: null aliases: [] ipv4_range: description: - the IPv4 address range in CIDR notation for the network required: false aliases: ['cidr'] fwname: description: - name of the firewall rule required: false default: null aliases: ['fwrule'] name: description: - name of the network required: false default: null aliases: [] src_range: description: - the source IPv4 address range in CIDR notation required: false default: null aliases: ['src_cidr'] src_tags: description: - the source instance tags for creating a firewall rule required: false default: null aliases: [] target_tags: version_added: "1.9" description: - the target instance tags for creating a firewall rule required: false default: null aliases: [] state: description: - desired state of the persistent disk required: false default: "present" choices: ["active", "present", "absent", "deleted"] aliases: [] service_account_email: version_added: "1.6" description: - service account email required: false default: null aliases: [] pem_file: version_added: "1.6" description: - path to the pem file associated with the service account email required: false default: null aliases: [] project_id: version_added: "1.6" description: - your GCE project ID required: false default: null aliases: [] requirements: [ "libcloud" ] author: Eric Johnson <erjohnso@google.com> ''' EXAMPLES = ''' # Simple example of creating a new network - local_action: module: gce_net name: privatenet ipv4_range: '10.240.16.0/24' # Simple example of creating a new firewall rule - local_action: module: gce_net name: privatenet fwname: all-web-webproxy allowed: tcp:80,8080 src_tags: ["web", "proxy"] ''' import sys try: from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ ResourceExistsError, ResourceNotFoundError _ = Provider.GCE except ImportError: print("failed=True " + \ "msg='libcloud with GCE support required for this module.'") sys.exit(1) def format_allowed_section(allowed): """Format each section of the allowed list""" if allowed.count(":") == 0: protocol = allowed ports = [] elif allowed.count(":") == 1: protocol, ports = allowed.split(":") else: return [] if ports.count(","): ports = ports.split(",") else: ports = [ports] return_val = {"IPProtocol": protocol} if ports: return_val["ports"] = ports return return_val def format_allowed(allowed): """Format the 'allowed' value so that it is GCE compatible.""" return_value = [] if allowed.count(";") == 0: return [format_allowed_section(allowed)] else: sections = allowed.split(";") for section in sections: return_value.append(format_allowed_section(section)) return return_value def main(): module = AnsibleModule( argument_spec = dict( allowed = dict(), ipv4_range = dict(), fwname = dict(), name = dict(), src_range = dict(type='list'), src_tags = dict(type='list'), target_tags = dict(type='list'), state = dict(default='present'), service_account_email = dict(), pem_file = dict(), project_id = dict(), ) ) gce = gce_connect(module) allowed = module.params.get('allowed') ipv4_range = module.params.get('ipv4_range') fwname = module.params.get('fwname') name = module.params.get('name') src_range = module.params.get('src_range') src_tags = module.params.get('src_tags') target_tags = module.params.get('target_tags') state = module.params.get('state') changed = False json_output = {'state': state} if state in ['active', 'present']: network = None try: network = gce.ex_get_network(name) json_output['name'] = name json_output['ipv4_range'] = network.cidr except ResourceNotFoundError: pass except Exception, e: module.fail_json(msg=unexpected_error_msg(e), changed=False) # user wants to create a new network that doesn't yet exist if name and not network: if not ipv4_range: module.fail_json(msg="Missing required 'ipv4_range' parameter", changed=False) try: network = gce.ex_create_network(name, ipv4_range) json_output['name'] = name json_output['ipv4_range'] = ipv4_range changed = True except Exception, e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if fwname: # user creating a firewall rule if not allowed and not src_range and not src_tags: if changed and network: module.fail_json( msg="Network created, but missing required " + \ "firewall rule parameter(s)", changed=True) module.fail_json( msg="Missing required firewall rule parameter(s)", changed=False) allowed_list = format_allowed(allowed) try: gce.ex_create_firewall(fwname, allowed_list, network=name, source_ranges=src_range, source_tags=src_tags, target_tags=target_tags) changed = True except ResourceExistsError: pass except Exception, e: module.fail_json(msg=unexpected_error_msg(e), changed=False) json_output['fwname'] = fwname json_output['allowed'] = allowed json_output['src_range'] = src_range json_output['src_tags'] = src_tags json_output['target_tags'] = target_tags if state in ['absent', 'deleted']: if fwname: json_output['fwname'] = fwname fw = None try: fw = gce.ex_get_firewall(fwname) except ResourceNotFoundError: pass except Exception, e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if fw: gce.ex_destroy_firewall(fw) changed = True if name: json_output['name'] = name network = None try: network = gce.ex_get_network(name) # json_output['d1'] = 'found network name %s' % name except ResourceNotFoundError: # json_output['d2'] = 'not found network name %s' % name pass except Exception, e: # json_output['d3'] = 'error with %s' % name module.fail_json(msg=unexpected_error_msg(e), changed=False) if network: # json_output['d4'] = 'deleting %s' % name gce.ex_destroy_network(network) # json_output['d5'] = 'deleted %s' % name changed = True json_output['changed'] = changed print json.dumps(json_output) sys.exit(0) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.gce import * main()
gpl-3.0
hrjn/scikit-learn
examples/covariance/plot_lw_vs_oas.py
159
2951
""" ============================= Ledoit-Wolf vs OAS estimation ============================= The usual covariance maximum likelihood estimate can be regularized using shrinkage. Ledoit and Wolf proposed a close formula to compute the asymptotically optimal shrinkage parameter (minimizing a MSE criterion), yielding the Ledoit-Wolf covariance estimate. Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage parameter, the OAS coefficient, whose convergence is significantly better under the assumption that the data are Gaussian. This example, inspired from Chen's publication [1], shows a comparison of the estimated MSE of the LW and OAS methods, using Gaussian distributed data. [1] "Shrinkage Algorithms for MMSE Covariance Estimation" Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy.linalg import toeplitz, cholesky from sklearn.covariance import LedoitWolf, OAS np.random.seed(0) ############################################################################### n_features = 100 # simulation covariance matrix (AR(1) process) r = 0.1 real_cov = toeplitz(r ** np.arange(n_features)) coloring_matrix = cholesky(real_cov) n_samples_range = np.arange(6, 31, 1) repeat = 100 lw_mse = np.zeros((n_samples_range.size, repeat)) oa_mse = np.zeros((n_samples_range.size, repeat)) lw_shrinkage = np.zeros((n_samples_range.size, repeat)) oa_shrinkage = np.zeros((n_samples_range.size, repeat)) for i, n_samples in enumerate(n_samples_range): for j in range(repeat): X = np.dot( np.random.normal(size=(n_samples, n_features)), coloring_matrix.T) lw = LedoitWolf(store_precision=False, assume_centered=True) lw.fit(X) lw_mse[i, j] = lw.error_norm(real_cov, scaling=False) lw_shrinkage[i, j] = lw.shrinkage_ oa = OAS(store_precision=False, assume_centered=True) oa.fit(X) oa_mse[i, j] = oa.error_norm(real_cov, scaling=False) oa_shrinkage[i, j] = oa.shrinkage_ # plot MSE plt.subplot(2, 1, 1) plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1), label='Ledoit-Wolf', color='navy', lw=2) plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1), label='OAS', color='darkorange', lw=2) plt.ylabel("Squared error") plt.legend(loc="upper right") plt.title("Comparison of covariance estimators") plt.xlim(5, 31) # plot shrinkage coefficient plt.subplot(2, 1, 2) plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1), label='Ledoit-Wolf', color='navy', lw=2) plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1), label='OAS', color='darkorange', lw=2) plt.xlabel("n_samples") plt.ylabel("Shrinkage") plt.legend(loc="lower right") plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.) plt.xlim(5, 31) plt.show()
bsd-3-clause
hpicgs/cmake-init
source/tests/googletest/googletest/test/googletest-json-outfiles-test.py
80
5663
#!/usr/bin/env python # Copyright 2018, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_json_output module.""" import json import os import gtest_json_test_utils import gtest_test_utils GTEST_OUTPUT_SUBDIR = 'json_outfiles' GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_' GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_' EXPECTED_1 = { u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'name': u'AllTests', u'testsuites': [{ u'name': u'PropertyOne', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'TestSomeProperties', u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'PropertyOne', u'SetUpProp': u'1', u'TestSomeProperty': u'1', u'TearDownProp': u'1', }], }], } EXPECTED_2 = { u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'name': u'AllTests', u'testsuites': [{ u'name': u'PropertyTwo', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'TestSomeProperties', u'status': u'RUN', u'result': u'COMPLETED', u'timestamp': u'*', u'time': u'*', u'classname': u'PropertyTwo', u'SetUpProp': u'2', u'TestSomeProperty': u'2', u'TearDownProp': u'2', }], }], } class GTestJsonOutFilesTest(gtest_test_utils.TestCase): """Unit test for Google Test's JSON output functionality.""" def setUp(self): # We want the trailing '/' that the last "" provides in os.path.join, for # telling Google Test to create an output directory instead of a single file # for xml output. self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, '') self.DeleteFilesAndDir() def tearDown(self): self.DeleteFilesAndDir() def DeleteFilesAndDir(self): try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + '.json')) except os.error: pass try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + '.json')) except os.error: pass try: os.rmdir(self.output_dir_) except os.error: pass def testOutfile1(self): self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_1) def testOutfile2(self): self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_2) def _TestOutFile(self, test_name, expected): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) output_file_name1 = test_name + '.json' output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) if os.path.isfile(output_file1): with open(output_file1) as f: actual = json.load(f) else: with open(output_file2) as f: actual = json.load(f) self.assertEqual(expected, gtest_json_test_utils.normalize(actual)) if __name__ == '__main__': os.environ['GTEST_STACK_TRACE_DEPTH'] = '0' gtest_test_utils.Main()
mit
lessaworld/SQLpie
scripts/shell.py
1
2395
# -*- coding: utf-8 -*- """ SQLpie License (MIT License) Copyright (c) 2011-2016 André Lessa, http://sqlpie.com See LICENSE file. """ import os, time import sys sys.path.append(os.getcwd()) from flask import Flask, current_app, g from flask import jsonify from flaskext.mysql import MySQL import sqlpie # load global vars import application def handle_shell(app, mysql, protocol, hostname, port): with application.app_context(): print "Launching SQLpie Shell..." time.sleep(2) shell = sqlpie.Shell(protocol, hostname, port) pong = shell.handle_command("/ping") if pong.startswith("URLError"): print pong shell.handle_command("exit") else: print "type a command, or type help for a list of commands.\n" while(True): try: g.conn = mysql.connect() g.cursor = g.conn.cursor() g.conn.begin() cmd = raw_input("SQLpie # ") response = shell.handle_command(cmd) if len(response) > 1: print response + "\n" g.conn.commit() except Exception as e: if sqlpie.Util.is_debug(): traceback.print_tb(sys.exc_info()[2]) try: g.conn.rollback() except: pass finally: g.cursor.close() g.conn.close() if __name__ == '__main__': protocol, hostname, port = "http", "localhost", sqlpie.Config.get(sqlpie.Config.SERVER_PORT) try: if "--protocol" in sys.argv[1:]: protocol = sys.argv[int(sys.argv[sys.argv.index("--protocol") + 1])] if "--hostname" in sys.argv[1:]: hostname = sys.argv[int(sys.argv.index("--hostname") + 1)] if "--port" in sys.argv[1:]: port = sys.argv[int(sys.argv.index("--port") + 1)] os.environ['sqlpie_debug'] = "False" application = Flask(__name__) sqlpie_config = sqlpie.Config().load() setup = sqlpie.DBSetup(sqlpie_config) setup.init(application) mysql = setup.db() with application.app_context(): handle_shell(application, mysql, protocol, hostname, port) except: print "Error. Invalid Parameters." print "Exiting now.\n\n"
mit
FusionSP/android_external_chromium_org
third_party/tlslite/tlslite/utils/aes.py
149
1064
# Author: Trevor Perrin # See the LICENSE file for legal information regarding use of this file. """Abstract class for AES.""" class AES(object): def __init__(self, key, mode, IV, implementation): if len(key) not in (16, 24, 32): raise AssertionError() if mode != 2: raise AssertionError() if len(IV) != 16: raise AssertionError() self.isBlockCipher = True self.block_size = 16 self.implementation = implementation if len(key)==16: self.name = "aes128" elif len(key)==24: self.name = "aes192" elif len(key)==32: self.name = "aes256" else: raise AssertionError() #CBC-Mode encryption, returns ciphertext #WARNING: *MAY* modify the input as well def encrypt(self, plaintext): assert(len(plaintext) % 16 == 0) #CBC-Mode decryption, returns plaintext #WARNING: *MAY* modify the input as well def decrypt(self, ciphertext): assert(len(ciphertext) % 16 == 0)
bsd-3-clause