repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
ravindrapanda/tensorflow
tensorflow/python/keras/applications/mobilenet/__init__.py
73
1147
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MobileNet Keras application.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras._impl.keras.applications.mobilenet import decode_predictions from tensorflow.python.keras._impl.keras.applications.mobilenet import MobileNet from tensorflow.python.keras._impl.keras.applications.mobilenet import preprocess_input del absolute_import del division del print_function
apache-2.0
gnuhub/intellij-community
python/lib/Lib/site-packages/django/contrib/messages/tests/fallback.py
311
6978
from django.contrib.messages import constants from django.contrib.messages.storage.fallback import FallbackStorage, \ CookieStorage from django.contrib.messages.tests.base import BaseTest from django.contrib.messages.tests.cookie import set_cookie_data, \ stored_cookie_messages_count from django.contrib.messages.tests.session import set_session_data, \ stored_session_messages_count class FallbackTest(BaseTest): storage_class = FallbackStorage def get_request(self): self.session = {} request = super(FallbackTest, self).get_request() request.session = self.session return request def get_cookie_storage(self, storage): return storage.storages[-2] def get_session_storage(self, storage): return storage.storages[-1] def stored_cookie_messages_count(self, storage, response): return stored_cookie_messages_count(self.get_cookie_storage(storage), response) def stored_session_messages_count(self, storage, response): return stored_session_messages_count(self.get_session_storage(storage)) def stored_messages_count(self, storage, response): """ Return the storage totals from both cookie and session backends. """ total = (self.stored_cookie_messages_count(storage, response) + self.stored_session_messages_count(storage, response)) return total def test_get(self): request = self.get_request() storage = self.storage_class(request) cookie_storage = self.get_cookie_storage(storage) # Set initial cookie data. example_messages = [str(i) for i in range(5)] set_cookie_data(cookie_storage, example_messages) # Overwrite the _get method of the fallback storage to prove it is not # used (it would cause a TypeError: 'NoneType' object is not callable). self.get_session_storage(storage)._get = None # Test that the message actually contains what we expect. self.assertEqual(list(storage), example_messages) def test_get_empty(self): request = self.get_request() storage = self.storage_class(request) # Overwrite the _get method of the fallback storage to prove it is not # used (it would cause a TypeError: 'NoneType' object is not callable). self.get_session_storage(storage)._get = None # Test that the message actually contains what we expect. self.assertEqual(list(storage), []) def test_get_fallback(self): request = self.get_request() storage = self.storage_class(request) cookie_storage = self.get_cookie_storage(storage) session_storage = self.get_session_storage(storage) # Set initial cookie and session data. example_messages = [str(i) for i in range(5)] set_cookie_data(cookie_storage, example_messages[:4] + [CookieStorage.not_finished]) set_session_data(session_storage, example_messages[4:]) # Test that the message actually contains what we expect. self.assertEqual(list(storage), example_messages) def test_get_fallback_only(self): request = self.get_request() storage = self.storage_class(request) cookie_storage = self.get_cookie_storage(storage) session_storage = self.get_session_storage(storage) # Set initial cookie and session data. example_messages = [str(i) for i in range(5)] set_cookie_data(cookie_storage, [CookieStorage.not_finished], encode_empty=True) set_session_data(session_storage, example_messages) # Test that the message actually contains what we expect. self.assertEqual(list(storage), example_messages) def test_flush_used_backends(self): request = self.get_request() storage = self.storage_class(request) cookie_storage = self.get_cookie_storage(storage) session_storage = self.get_session_storage(storage) # Set initial cookie and session data. set_cookie_data(cookie_storage, ['cookie', CookieStorage.not_finished]) set_session_data(session_storage, ['session']) # When updating, previously used but no longer needed backends are # flushed. response = self.get_response() list(storage) storage.update(response) session_storing = self.stored_session_messages_count(storage, response) self.assertEqual(session_storing, 0) def test_no_fallback(self): """ Confirms that: (1) A short number of messages whose data size doesn't exceed what is allowed in a cookie will all be stored in the CookieBackend. (2) If the CookieBackend can store all messages, the SessionBackend won't be written to at all. """ storage = self.get_storage() response = self.get_response() # Overwrite the _store method of the fallback storage to prove it isn't # used (it would cause a TypeError: 'NoneType' object is not callable). self.get_session_storage(storage)._store = None for i in range(5): storage.add(constants.INFO, str(i) * 100) storage.update(response) cookie_storing = self.stored_cookie_messages_count(storage, response) self.assertEqual(cookie_storing, 5) session_storing = self.stored_session_messages_count(storage, response) self.assertEqual(session_storing, 0) def test_session_fallback(self): """ Confirms that, if the data exceeds what is allowed in a cookie, messages which did not fit are stored in the SessionBackend. """ storage = self.get_storage() response = self.get_response() # see comment in CookieText.test_cookie_max_length msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37) for i in range(5): storage.add(constants.INFO, str(i) * msg_size) storage.update(response) cookie_storing = self.stored_cookie_messages_count(storage, response) self.assertEqual(cookie_storing, 4) session_storing = self.stored_session_messages_count(storage, response) self.assertEqual(session_storing, 1) def test_session_fallback_only(self): """ Confirms that large messages, none of which fit in a cookie, are stored in the SessionBackend (and nothing is stored in the CookieBackend). """ storage = self.get_storage() response = self.get_response() storage.add(constants.INFO, 'x' * 5000) storage.update(response) cookie_storing = self.stored_cookie_messages_count(storage, response) self.assertEqual(cookie_storing, 0) session_storing = self.stored_session_messages_count(storage, response) self.assertEqual(session_storing, 1)
apache-2.0
jaimeantena4040/MiSitioWeb
lib/toaster/orm/migrations/0008_auto__chg_field_variablehistory_operation__chg_field_recipe_descriptio.py
6
16612
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'VariableHistory.operation' db.alter_column(u'orm_variablehistory', 'operation', self.gf('django.db.models.fields.CharField')(max_length=64)) # Changing field 'Recipe.description' db.alter_column(u'orm_recipe', 'description', self.gf('django.db.models.fields.TextField')()) # Changing field 'Target_Image_File.file_name' db.alter_column(u'orm_target_image_file', 'file_name', self.gf('django.db.models.fields.FilePathField')(max_length=254)) # Changing field 'Package.description' db.alter_column(u'orm_package', 'description', self.gf('django.db.models.fields.TextField')()) def backwards(self, orm): # Changing field 'VariableHistory.operation' db.alter_column(u'orm_variablehistory', 'operation', self.gf('django.db.models.fields.CharField')(max_length=16)) # Changing field 'Recipe.description' db.alter_column(u'orm_recipe', 'description', self.gf('django.db.models.fields.CharField')(max_length=100)) # Changing field 'Target_Image_File.file_name' db.alter_column(u'orm_target_image_file', 'file_name', self.gf('django.db.models.fields.FilePathField')(max_length=100)) # Changing field 'Package.description' db.alter_column(u'orm_package', 'description', self.gf('django.db.models.fields.CharField')(max_length=200)) models = { u'orm.build': { 'Meta': {'object_name': 'Build'}, 'bitbake_version': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'build_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'completed_on': ('django.db.models.fields.DateTimeField', [], {}), 'cooker_log_path': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'distro': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'distro_version': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'errors_no': ('django.db.models.fields.IntegerField', [], {'default': '0'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'machine': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'outcome': ('django.db.models.fields.IntegerField', [], {'default': '2'}), 'started_on': ('django.db.models.fields.DateTimeField', [], {}), 'timespent': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'warnings_no': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, u'orm.helptext': { 'Meta': {'object_name': 'HelpText'}, 'area': ('django.db.models.fields.IntegerField', [], {}), 'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'helptext_build'", 'to': u"orm['orm.Build']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'text': ('django.db.models.fields.TextField', [], {}) }, u'orm.layer': { 'Meta': {'object_name': 'Layer'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layer_index_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'local_path': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'orm.layer_version': { 'Meta': {'object_name': 'Layer_Version'}, 'branch': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'layer_version_build'", 'to': u"orm['orm.Build']"}), 'commit': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'layer_version_layer'", 'to': u"orm['orm.Layer']"}), 'priority': ('django.db.models.fields.IntegerField', [], {}) }, u'orm.logmessage': { 'Meta': {'object_name': 'LogMessage'}, 'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'lineno': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '240'}), 'pathname': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Task']", 'null': 'True', 'blank': 'True'}) }, u'orm.package': { 'Meta': {'object_name': 'Package'}, 'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'installed_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}), 'installed_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'license': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Recipe']", 'null': 'True'}), 'revision': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'section': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, u'orm.package_dependency': { 'Meta': {'object_name': 'Package_Dependency'}, 'dep_type': ('django.db.models.fields.IntegerField', [], {}), 'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_dependencies_target'", 'to': u"orm['orm.Package']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_dependencies_source'", 'to': u"orm['orm.Package']"}), 'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']", 'null': 'True'}) }, u'orm.package_file': { 'Meta': {'object_name': 'Package_File'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buildfilelist_package'", 'to': u"orm['orm.Package']"}), 'path': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}), 'size': ('django.db.models.fields.IntegerField', [], {}) }, u'orm.recipe': { 'Meta': {'object_name': 'Recipe'}, 'bugtracker': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'file_path': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}), 'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'layer_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recipe_layer_version'", 'to': u"orm['orm.Layer_Version']"}), 'license': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, u'orm.recipe_dependency': { 'Meta': {'object_name': 'Recipe_Dependency'}, 'dep_type': ('django.db.models.fields.IntegerField', [], {}), 'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'r_dependencies_depends'", 'to': u"orm['orm.Recipe']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'r_dependencies_recipe'", 'to': u"orm['orm.Recipe']"}) }, u'orm.target': { 'Meta': {'object_name': 'Target'}, 'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'is_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'license_manifest_path': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}), 'target': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'orm.target_file': { 'Meta': {'object_name': 'Target_File'}, 'directory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'directory_set'", 'null': 'True', 'to': u"orm['orm.Target_File']"}), 'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inodetype': ('django.db.models.fields.IntegerField', [], {}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}), 'permission': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'size': ('django.db.models.fields.IntegerField', [], {}), 'sym_target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'symlink_set'", 'null': 'True', 'to': u"orm['orm.Target_File']"}), 'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"}) }, u'orm.target_image_file': { 'Meta': {'object_name': 'Target_Image_File'}, 'file_name': ('django.db.models.fields.FilePathField', [], {'max_length': '254'}), 'file_size': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"}) }, u'orm.target_installed_package': { 'Meta': {'object_name': 'Target_Installed_Package'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buildtargetlist_package'", 'to': u"orm['orm.Package']"}), 'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"}) }, u'orm.task': { 'Meta': {'ordering': "('order', 'recipe')", 'unique_together': "(('build', 'recipe', 'task_name'),)", 'object_name': 'Task'}, 'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_build'", 'to': u"orm['orm.Build']"}), 'cpu_usage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2'}), 'disk_io': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'elapsed_time': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'line_number': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'logfile': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '240'}), 'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'outcome': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'path_to_sstate_obj': ('django.db.models.fields.FilePathField', [], {'max_length': '500', 'blank': 'True'}), 'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'build_recipe'", 'to': u"orm['orm.Recipe']"}), 'script_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'source_url': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}), 'sstate_checksum': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'sstate_result': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'task_executed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'task_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'work_directory': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}) }, u'orm.task_dependency': { 'Meta': {'object_name': 'Task_Dependency'}, 'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_dependencies_depends'", 'to': u"orm['orm.Task']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_dependencies_task'", 'to': u"orm['orm.Task']"}) }, u'orm.variable': { 'Meta': {'object_name': 'Variable'}, 'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variable_build'", 'to': u"orm['orm.Build']"}), 'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'human_readable_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'variable_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'variable_value': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, u'orm.variablehistory': { 'Meta': {'object_name': 'VariableHistory'}, 'file_name': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'line_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'operation': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'variable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vhistory'", 'to': u"orm['orm.Variable']"}) } } complete_apps = ['orm']
gpl-2.0
tinchoss/Python_Android
python/gdata/src/gdata/tlslite/integration/SMTP_TLS.py
319
4739
"""TLS Lite + smtplib.""" from smtplib import SMTP from gdata.tlslite.TLSConnection import TLSConnection from gdata.tlslite.integration.ClientHelper import ClientHelper class SMTP_TLS(SMTP): """This class extends L{smtplib.SMTP} with TLS support.""" def starttls(self, username=None, password=None, sharedKey=None, certChain=None, privateKey=None, cryptoID=None, protocol=None, x509Fingerprint=None, x509TrustList=None, x509CommonName=None, settings=None): """Puts the connection to the SMTP server into TLS mode. If the server supports TLS, this will encrypt the rest of the SMTP session. For client authentication, use one of these argument combinations: - username, password (SRP) - username, sharedKey (shared-key) - certChain, privateKey (certificate) For server authentication, you can either rely on the implicit mutual authentication performed by SRP or shared-keys, or you can do certificate-based server authentication with one of these argument combinations: - cryptoID[, protocol] (requires cryptoIDlib) - x509Fingerprint - x509TrustList[, x509CommonName] (requires cryptlib_py) Certificate-based server authentication is compatible with SRP or certificate-based client authentication. It is not compatible with shared-keys. The caller should be prepared to handle TLS-specific exceptions. See the client handshake functions in L{tlslite.TLSConnection.TLSConnection} for details on which exceptions might be raised. @type username: str @param username: SRP or shared-key username. Requires the 'password' or 'sharedKey' argument. @type password: str @param password: SRP password for mutual authentication. Requires the 'username' argument. @type sharedKey: str @param sharedKey: Shared key for mutual authentication. Requires the 'username' argument. @type certChain: L{tlslite.X509CertChain.X509CertChain} or L{cryptoIDlib.CertChain.CertChain} @param certChain: Certificate chain for client authentication. Requires the 'privateKey' argument. Excludes the SRP or shared-key related arguments. @type privateKey: L{tlslite.utils.RSAKey.RSAKey} @param privateKey: Private key for client authentication. Requires the 'certChain' argument. Excludes the SRP or shared-key related arguments. @type cryptoID: str @param cryptoID: cryptoID for server authentication. Mutually exclusive with the 'x509...' arguments. @type protocol: str @param protocol: cryptoID protocol URI for server authentication. Requires the 'cryptoID' argument. @type x509Fingerprint: str @param x509Fingerprint: Hex-encoded X.509 fingerprint for server authentication. Mutually exclusive with the 'cryptoID' and 'x509TrustList' arguments. @type x509TrustList: list of L{tlslite.X509.X509} @param x509TrustList: A list of trusted root certificates. The other party must present a certificate chain which extends to one of these root certificates. The cryptlib_py module must be installed to use this parameter. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. @type x509CommonName: str @param x509CommonName: The end-entity certificate's 'CN' field must match this value. For a web server, this is typically a server name such as 'www.amazon.com'. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. Requires the 'x509TrustList' argument. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. """ (resp, reply) = self.docmd("STARTTLS") if resp == 220: helper = ClientHelper( username, password, sharedKey, certChain, privateKey, cryptoID, protocol, x509Fingerprint, x509TrustList, x509CommonName, settings) conn = TLSConnection(self.sock) conn.closeSocket = True helper._handshake(conn) self.sock = conn self.file = conn.makefile('rb') return (resp, reply)
apache-2.0
JamesMura/sentry
src/sentry/south_migrations/0008_auto__chg_field_message_view__add_field_groupedmessage_data__chg_field.py
36
5375
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'Message.view' db.alter_column('sentry_message', 'view', self.gf('django.db.models.fields.CharField')(max_length=200, null=True)) # Adding field 'GroupedMessage.data' db.add_column('sentry_groupedmessage', 'data', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False) # Changing field 'GroupedMessage.view' db.alter_column('sentry_groupedmessage', 'view', self.gf('django.db.models.fields.CharField')(max_length=200, null=True)) # Changing field 'FilterValue.value' db.alter_column('sentry_filtervalue', 'value', self.gf('django.db.models.fields.CharField')(max_length=200)) def backwards(self, orm): # Changing field 'Message.view' db.alter_column('sentry_message', 'view', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)) # Deleting field 'GroupedMessage.data' db.delete_column('sentry_groupedmessage', 'data') # Changing field 'GroupedMessage.view' db.alter_column('sentry_groupedmessage', 'view', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)) # Changing field 'FilterValue.value' db.alter_column('sentry_filtervalue', 'value', self.gf('django.db.models.fields.CharField')(max_length=255)) models = { 'sentry.filtervalue': { 'Meta': {'unique_together': "(('key', 'value'),)", 'object_name': 'FilterValue'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.groupedmessage': { 'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, 'sentry.message': { 'Meta': {'object_name': 'Message'}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['sentry']
bsd-3-clause
ArchiDroid/android_kernel_sony_msm8x27
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
11088
3246
# Core.py - Python extension for perf script, core functions # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. from collections import defaultdict def autodict(): return defaultdict(autodict) flag_fields = autodict() symbolic_fields = autodict() def define_flag_field(event_name, field_name, delim): flag_fields[event_name][field_name]['delim'] = delim def define_flag_value(event_name, field_name, value, field_str): flag_fields[event_name][field_name]['values'][value] = field_str def define_symbolic_field(event_name, field_name): # nothing to do, really pass def define_symbolic_value(event_name, field_name, value, field_str): symbolic_fields[event_name][field_name]['values'][value] = field_str def flag_str(event_name, field_name, value): string = "" if flag_fields[event_name][field_name]: print_delim = 0 keys = flag_fields[event_name][field_name]['values'].keys() keys.sort() for idx in keys: if not value and not idx: string += flag_fields[event_name][field_name]['values'][idx] break if idx and (value & idx) == idx: if print_delim and flag_fields[event_name][field_name]['delim']: string += " " + flag_fields[event_name][field_name]['delim'] + " " string += flag_fields[event_name][field_name]['values'][idx] print_delim = 1 value &= ~idx return string def symbol_str(event_name, field_name, value): string = "" if symbolic_fields[event_name][field_name]: keys = symbolic_fields[event_name][field_name]['values'].keys() keys.sort() for idx in keys: if not value and not idx: string = symbolic_fields[event_name][field_name]['values'][idx] break if (value == idx): string = symbolic_fields[event_name][field_name]['values'][idx] break return string trace_flags = { 0x00: "NONE", \ 0x01: "IRQS_OFF", \ 0x02: "IRQS_NOSUPPORT", \ 0x04: "NEED_RESCHED", \ 0x08: "HARDIRQ", \ 0x10: "SOFTIRQ" } def trace_flag_str(value): string = "" print_delim = 0 keys = trace_flags.keys() for idx in keys: if not value and not idx: string += "NONE" break if idx and (value & idx) == idx: if print_delim: string += " | "; string += trace_flags[idx] print_delim = 1 value &= ~idx return string def taskState(state): states = { 0 : "R", 1 : "S", 2 : "D", 64: "DEAD" } if state not in states: return "Unknown" return states[state] class EventHeaders: def __init__(self, common_cpu, common_secs, common_nsecs, common_pid, common_comm): self.cpu = common_cpu self.secs = common_secs self.nsecs = common_nsecs self.pid = common_pid self.comm = common_comm def ts(self): return (self.secs * (10 ** 9)) + self.nsecs def ts_format(self): return "%d.%d" % (self.secs, int(self.nsecs / 1000))
gpl-2.0
TheGurke/Progenitus
sleekxmpp/plugins/xep_0004/stanza/form.py
3
8060
""" SleekXMPP: The Sleek XMPP Library Copyright (C) 2011 Nathanael C. Fritz, Lance J.T. Stout This file is part of SleekXMPP. See the file LICENSE for copying permission. """ import copy import logging from sleekxmpp.thirdparty import OrderedDict from sleekxmpp.xmlstream import ElementBase, ET from sleekxmpp.plugins.xep_0004.stanza import FormField log = logging.getLogger(__name__) class Form(ElementBase): namespace = 'jabber:x:data' name = 'x' plugin_attrib = 'form' interfaces = set(('fields', 'instructions', 'items', 'reported', 'title', 'type', 'values')) sub_interfaces = set(('title',)) form_types = set(('cancel', 'form', 'result', 'submit')) def __init__(self, *args, **kwargs): title = None if 'title' in kwargs: title = kwargs['title'] del kwargs['title'] ElementBase.__init__(self, *args, **kwargs) if title is not None: self['title'] = title def setup(self, xml=None): if ElementBase.setup(self, xml): # If we had to generate xml self['type'] = 'form' @property def field(self): return self['fields'] def set_type(self, ftype): self._set_attr('type', ftype) if ftype == 'submit': fields = self['fields'] for var in fields: field = fields[var] del field['type'] del field['label'] del field['desc'] del field['required'] del field['options'] elif ftype == 'cancel': del self['fields'] def add_field(self, var='', ftype=None, label='', desc='', required=False, value=None, options=None, **kwargs): kwtype = kwargs.get('type', None) if kwtype is None: kwtype = ftype field = FormField(parent=self) field['var'] = var field['type'] = kwtype field['value'] = value if self['type'] in ('form', 'result'): field['label'] = label field['desc'] = desc field['required'] = required if options is not None: field['options'] = options else: del field['type'] return field def getXML(self, type='submit'): self['type'] = type log.warning("Form.getXML() is deprecated API compatibility " + \ "with plugins/old_0004.py") return self.xml def fromXML(self, xml): log.warning("Form.fromXML() is deprecated API compatibility " + \ "with plugins/old_0004.py") n = Form(xml=xml) return n def add_item(self, values): itemXML = ET.Element('{%s}item' % self.namespace) self.xml.append(itemXML) reported_vars = self['reported'].keys() for var in reported_vars: field = FormField() field._type = self['reported'][var]['type'] field['var'] = var field['value'] = values.get(var, None) itemXML.append(field.xml) def add_reported(self, var, ftype=None, label='', desc='', **kwargs): kwtype = kwargs.get('type', None) if kwtype is None: kwtype = ftype reported = self.xml.find('{%s}reported' % self.namespace) if reported is None: reported = ET.Element('{%s}reported' % self.namespace) self.xml.append(reported) fieldXML = ET.Element('{%s}field' % FormField.namespace) reported.append(fieldXML) field = FormField(xml=fieldXML) field['var'] = var field['type'] = kwtype field['label'] = label field['desc'] = desc return field def cancel(self): self['type'] = 'cancel' def del_fields(self): fieldsXML = self.xml.findall('{%s}field' % FormField.namespace) for fieldXML in fieldsXML: self.xml.remove(fieldXML) def del_instructions(self): instsXML = self.xml.findall('{%s}instructions') for instXML in instsXML: self.xml.remove(instXML) def del_items(self): itemsXML = self.xml.find('{%s}item' % self.namespace) for itemXML in itemsXML: self.xml.remove(itemXML) def del_reported(self): reportedXML = self.xml.find('{%s}reported' % self.namespace) if reportedXML is not None: self.xml.remove(reportedXML) def get_fields(self, use_dict=False): fields = OrderedDict() fieldsXML = self.xml.findall('{%s}field' % FormField.namespace) for fieldXML in fieldsXML: field = FormField(xml=fieldXML) fields[field['var']] = field return fields def get_instructions(self): instructions = '' instsXML = self.xml.findall('{%s}instructions' % self.namespace) return "\n".join([instXML.text for instXML in instsXML]) def get_items(self): items = [] itemsXML = self.xml.findall('{%s}item' % self.namespace) for itemXML in itemsXML: item = OrderedDict() fieldsXML = itemXML.findall('{%s}field' % FormField.namespace) for fieldXML in fieldsXML: field = FormField(xml=fieldXML) item[field['var']] = field['value'] items.append(item) return items def get_reported(self): fields = OrderedDict() xml = self.xml.findall('{%s}reported/{%s}field' % (self.namespace, FormField.namespace)) for field in xml: field = FormField(xml=field) fields[field['var']] = field return fields def get_values(self): values = OrderedDict() fields = self['fields'] for var in fields: values[var] = fields[var]['value'] return values def reply(self): if self['type'] == 'form': self['type'] = 'submit' elif self['type'] == 'submit': self['type'] = 'result' def set_fields(self, fields): del self['fields'] if not isinstance(fields, list): fields = fields.items() for var, field in fields: field['var'] = var self.add_field(**field) def set_instructions(self, instructions): del self['instructions'] if instructions in [None, '']: return instructions = instructions.split('\n') for instruction in instructions: inst = ET.Element('{%s}instructions' % self.namespace) inst.text = instruction self.xml.append(inst) def set_items(self, items): for item in items: self.add_item(item) def set_reported(self, reported): for var in reported: field = reported[var] field['var'] = var self.add_reported(var, **field) def set_values(self, values): fields = self['fields'] for field in values: fields[field]['value'] = values[field] def merge(self, other): new = copy.copy(self) if type(other) == dict: new['values'] = other return new nfields = new['fields'] ofields = other['fields'] nfields.update(ofields) new['fields'] = nfields return new Form.setType = Form.set_type Form.addField = Form.add_field Form.addItem = Form.add_item Form.addReported = Form.add_reported Form.delFields = Form.del_fields Form.delInstructions = Form.del_instructions Form.delItems = Form.del_items Form.delReported = Form.del_reported Form.getFields = Form.get_fields Form.getInstructions = Form.get_instructions Form.getItems = Form.get_items Form.getReported = Form.get_reported Form.getValues = Form.get_values Form.setFields = Form.set_fields Form.setInstructions = Form.set_instructions Form.setItems = Form.set_items Form.setReported = Form.set_reported Form.setValues = Form.set_values
gpl-3.0
fragaria/suds
suds/xsd/doctor.py
204
6308
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ The I{doctor} module provides classes for fixing broken (sick) schema(s). """ from logging import getLogger from suds.sax import splitPrefix, Namespace from suds.sax.element import Element from suds.plugin import DocumentPlugin, DocumentContext log = getLogger(__name__) class Doctor: """ Schema Doctor. """ def examine(self, root): """ Examine and repair the schema (if necessary). @param root: A schema root element. @type root: L{Element} """ pass class Practice(Doctor): """ A collection of doctors. @ivar doctors: A list of doctors. @type doctors: list """ def __init__(self): self.doctors = [] def add(self, doctor): """ Add a doctor to the practice @param doctor: A doctor to add. @type doctor: L{Doctor} """ self.doctors.append(doctor) def examine(self, root): for d in self.doctors: d.examine(root) return root class TnsFilter: """ Target Namespace filter. @ivar tns: A list of target namespaces. @type tns: [str,...] """ def __init__(self, *tns): """ @param tns: A list of target namespaces. @type tns: [str,...] """ self.tns = [] self.add(*tns) def add(self, *tns): """ Add I{targetNamesapces} to be added. @param tns: A list of target namespaces. @type tns: [str,...] """ self.tns += tns def match(self, root, ns): """ Match by I{targetNamespace} excluding those that are equal to the specified namespace to prevent adding an import to itself. @param root: A schema root. @type root: L{Element} """ tns = root.get('targetNamespace') if len(self.tns): matched = ( tns in self.tns ) else: matched = 1 itself = ( ns == tns ) return ( matched and not itself ) class Import: """ An <xs:import/> to be applied. @cvar xsdns: The XSD namespace. @type xsdns: (p,u) @ivar ns: An import namespace. @type ns: str @ivar location: An optional I{schemaLocation}. @type location: str @ivar filter: A filter used to restrict application to a particular schema. @type filter: L{TnsFilter} """ xsdns = Namespace.xsdns def __init__(self, ns, location=None): """ @param ns: An import namespace. @type ns: str @param location: An optional I{schemaLocation}. @type location: str """ self.ns = ns self.location = location self.filter = TnsFilter() def setfilter(self, filter): """ Set the filter. @param filter: A filter to set. @type filter: L{TnsFilter} """ self.filter = filter def apply(self, root): """ Apply the import (rule) to the specified schema. If the schema does not already contain an import for the I{namespace} specified here, it is added. @param root: A schema root. @type root: L{Element} """ if not self.filter.match(root, self.ns): return if self.exists(root): return node = Element('import', ns=self.xsdns) node.set('namespace', self.ns) if self.location is not None: node.set('schemaLocation', self.location) log.debug('inserting: %s', node) root.insert(node) def add(self, root): """ Add an <xs:import/> to the specified schema root. @param root: A schema root. @type root: L{Element} """ node = Element('import', ns=self.xsdns) node.set('namespace', self.ns) if self.location is not None: node.set('schemaLocation', self.location) log.debug('%s inserted', node) root.insert(node) def exists(self, root): """ Check to see if the <xs:import/> already exists in the specified schema root by matching I{namesapce}. @param root: A schema root. @type root: L{Element} """ for node in root.children: if node.name != 'import': continue ns = node.get('namespace') if self.ns == ns: return 1 return 0 class ImportDoctor(Doctor, DocumentPlugin): """ Doctor used to fix missing imports. @ivar imports: A list of imports to apply. @type imports: [L{Import},...] """ def __init__(self, *imports): """ """ self.imports = [] self.add(*imports) def add(self, *imports): """ Add a namesapce to be checked. @param imports: A list of L{Import} objects. @type imports: [L{Import},..] """ self.imports += imports def examine(self, node): for imp in self.imports: imp.apply(node) def parsed(self, context): node = context.document # xsd root if node.name == 'schema' and Namespace.xsd(node.namespace()): self.examine(node) return # look deeper context = DocumentContext() for child in node: context.document = child self.parsed(context)
lgpl-3.0
pmelendez-shomi/cb_console
node_modules/couchbase/node_modules/prebuild/node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py
1569
23354
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions to perform Xcode-style build steps. These functions are executed via gyp-mac-tool when using the Makefile generator. """ import fcntl import fnmatch import glob import json import os import plistlib import re import shutil import string import subprocess import sys import tempfile def main(args): executor = MacTool() exit_code = executor.Dispatch(args) if exit_code is not None: sys.exit(exit_code) class MacTool(object): """This class performs all the Mac tooling steps. The methods can either be executed directly, or dispatched from an argument list.""" def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) return getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like copy-info-plist to CopyInfoPlist""" return name_string.title().replace('-', '') def ExecCopyBundleResource(self, source, dest, convert_to_binary): """Copies a resource file to the bundle/Resources directory, performing any necessary compilation on each resource.""" extension = os.path.splitext(source)[1].lower() if os.path.isdir(source): # Copy tree. # TODO(thakis): This copies file attributes like mtime, while the # single-file branch below doesn't. This should probably be changed to # be consistent with the single-file branch. if os.path.exists(dest): shutil.rmtree(dest) shutil.copytree(source, dest) elif extension == '.xib': return self._CopyXIBFile(source, dest) elif extension == '.storyboard': return self._CopyXIBFile(source, dest) elif extension == '.strings': self._CopyStringsFile(source, dest, convert_to_binary) else: shutil.copy(source, dest) def _CopyXIBFile(self, source, dest): """Compiles a XIB file with ibtool into a binary plist in the bundle.""" # ibtool sometimes crashes with relative paths. See crbug.com/314728. base = os.path.dirname(os.path.realpath(__file__)) if os.path.relpath(source): source = os.path.join(base, source) if os.path.relpath(dest): dest = os.path.join(base, dest) args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices', '--output-format', 'human-readable-text', '--compile', dest, source] ibtool_section_re = re.compile(r'/\*.*\*/') ibtool_re = re.compile(r'.*note:.*is clipping its content') ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE) current_section_header = None for line in ibtoolout.stdout: if ibtool_section_re.match(line): current_section_header = line elif not ibtool_re.match(line): if current_section_header: sys.stdout.write(current_section_header) current_section_header = None sys.stdout.write(line) return ibtoolout.returncode def _ConvertToBinary(self, dest): subprocess.check_call([ 'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest]) def _CopyStringsFile(self, source, dest, convert_to_binary): """Copies a .strings file using iconv to reconvert the input into UTF-16.""" input_code = self._DetectInputEncoding(source) or "UTF-8" # Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call # CFPropertyListCreateFromXMLData() behind the scenes; at least it prints # CFPropertyListCreateFromXMLData(): Old-style plist parser: missing # semicolon in dictionary. # on invalid files. Do the same kind of validation. import CoreFoundation s = open(source, 'rb').read() d = CoreFoundation.CFDataCreate(None, s, len(s)) _, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None) if error: return fp = open(dest, 'wb') fp.write(s.decode(input_code).encode('UTF-16')) fp.close() if convert_to_binary == 'True': self._ConvertToBinary(dest) def _DetectInputEncoding(self, file_name): """Reads the first few bytes from file_name and tries to guess the text encoding. Returns None as a guess if it can't detect it.""" fp = open(file_name, 'rb') try: header = fp.read(3) except e: fp.close() return None fp.close() if header.startswith("\xFE\xFF"): return "UTF-16" elif header.startswith("\xFF\xFE"): return "UTF-16" elif header.startswith("\xEF\xBB\xBF"): return "UTF-8" else: return None def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys): """Copies the |source| Info.plist to the destination directory |dest|.""" # Read the source Info.plist into memory. fd = open(source, 'r') lines = fd.read() fd.close() # Insert synthesized key/value pairs (e.g. BuildMachineOSBuild). plist = plistlib.readPlistFromString(lines) if keys: plist = dict(plist.items() + json.loads(keys[0]).items()) lines = plistlib.writePlistToString(plist) # Go through all the environment variables and replace them as variables in # the file. IDENT_RE = re.compile(r'[/\s]') for key in os.environ: if key.startswith('_'): continue evar = '${%s}' % key evalue = os.environ[key] lines = string.replace(lines, evar, evalue) # Xcode supports various suffices on environment variables, which are # all undocumented. :rfc1034identifier is used in the standard project # template these days, and :identifier was used earlier. They are used to # convert non-url characters into things that look like valid urls -- # except that the replacement character for :identifier, '_' isn't valid # in a URL either -- oops, hence :rfc1034identifier was born. evar = '${%s:identifier}' % key evalue = IDENT_RE.sub('_', os.environ[key]) lines = string.replace(lines, evar, evalue) evar = '${%s:rfc1034identifier}' % key evalue = IDENT_RE.sub('-', os.environ[key]) lines = string.replace(lines, evar, evalue) # Remove any keys with values that haven't been replaced. lines = lines.split('\n') for i in range(len(lines)): if lines[i].strip().startswith("<string>${"): lines[i] = None lines[i - 1] = None lines = '\n'.join(filter(lambda x: x is not None, lines)) # Write out the file with variables replaced. fd = open(dest, 'w') fd.write(lines) fd.close() # Now write out PkgInfo file now that the Info.plist file has been # "compiled". self._WritePkgInfo(dest) if convert_to_binary == 'True': self._ConvertToBinary(dest) def _WritePkgInfo(self, info_plist): """This writes the PkgInfo file from the data stored in Info.plist.""" plist = plistlib.readPlist(info_plist) if not plist: return # Only create PkgInfo for executable types. package_type = plist['CFBundlePackageType'] if package_type != 'APPL': return # The format of PkgInfo is eight characters, representing the bundle type # and bundle signature, each four characters. If that is missing, four # '?' characters are used instead. signature_code = plist.get('CFBundleSignature', '????') if len(signature_code) != 4: # Wrong length resets everything, too. signature_code = '?' * 4 dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo') fp = open(dest, 'w') fp.write('%s%s' % (package_type, signature_code)) fp.close() def ExecFlock(self, lockfile, *cmd_list): """Emulates the most basic behavior of Linux's flock(1).""" # Rely on exception handling to report errors. fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666) fcntl.flock(fd, fcntl.LOCK_EX) return subprocess.call(cmd_list) def ExecFilterLibtool(self, *cmd_list): """Calls libtool and filters out '/path/to/libtool: file: foo.o has no symbols'.""" libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$') libtool_re5 = re.compile( r'^.*libtool: warning for library: ' + r'.* the table of contents is empty ' + r'\(no object file members in the library define global symbols\)$') env = os.environ.copy() # Ref: # http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c # The problem with this flag is that it resets the file mtime on the file to # epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone. env['ZERO_AR_DATE'] = '1' libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) _, err = libtoolout.communicate() for line in err.splitlines(): if not libtool_re.match(line) and not libtool_re5.match(line): print >>sys.stderr, line # Unconditionally touch the output .a file on the command line if present # and the command succeeded. A bit hacky. if not libtoolout.returncode: for i in range(len(cmd_list) - 1): if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'): os.utime(cmd_list[i+1], None) break return libtoolout.returncode def ExecPackageFramework(self, framework, version): """Takes a path to Something.framework and the Current version of that and sets up all the symlinks.""" # Find the name of the binary based on the part before the ".framework". binary = os.path.basename(framework).split('.')[0] CURRENT = 'Current' RESOURCES = 'Resources' VERSIONS = 'Versions' if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)): # Binary-less frameworks don't seem to contain symlinks (see e.g. # chromium's out/Debug/org.chromium.Chromium.manifest/ bundle). return # Move into the framework directory to set the symlinks correctly. pwd = os.getcwd() os.chdir(framework) # Set up the Current version. self._Relink(version, os.path.join(VERSIONS, CURRENT)) # Set up the root symlinks. self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary) self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES) # Back to where we were before! os.chdir(pwd) def _Relink(self, dest, link): """Creates a symlink to |dest| named |link|. If |link| already exists, it is overwritten.""" if os.path.lexists(link): os.remove(link) os.symlink(dest, link) def ExecCompileXcassets(self, keys, *inputs): """Compiles multiple .xcassets files into a single .car file. This invokes 'actool' to compile all the inputs .xcassets files. The |keys| arguments is a json-encoded dictionary of extra arguments to pass to 'actool' when the asset catalogs contains an application icon or a launch image. Note that 'actool' does not create the Assets.car file if the asset catalogs does not contains imageset. """ command_line = [ 'xcrun', 'actool', '--output-format', 'human-readable-text', '--compress-pngs', '--notices', '--warnings', '--errors', ] is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ if is_iphone_target: platform = os.environ['CONFIGURATION'].split('-')[-1] if platform not in ('iphoneos', 'iphonesimulator'): platform = 'iphonesimulator' command_line.extend([ '--platform', platform, '--target-device', 'iphone', '--target-device', 'ipad', '--minimum-deployment-target', os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile', os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']), ]) else: command_line.extend([ '--platform', 'macosx', '--target-device', 'mac', '--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'], '--compile', os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']), ]) if keys: keys = json.loads(keys) for key, value in keys.iteritems(): arg_name = '--' + key if isinstance(value, bool): if value: command_line.append(arg_name) elif isinstance(value, list): for v in value: command_line.append(arg_name) command_line.append(str(v)) else: command_line.append(arg_name) command_line.append(str(value)) # Note: actool crashes if inputs path are relative, so use os.path.abspath # to get absolute path name for inputs. command_line.extend(map(os.path.abspath, inputs)) subprocess.check_call(command_line) def ExecMergeInfoPlist(self, output, *inputs): """Merge multiple .plist files into a single .plist file.""" merged_plist = {} for path in inputs: plist = self._LoadPlistMaybeBinary(path) self._MergePlist(merged_plist, plist) plistlib.writePlist(merged_plist, output) def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning): """Code sign a bundle. This function tries to code sign an iOS bundle, following the same algorithm as Xcode: 1. copy ResourceRules.plist from the user or the SDK into the bundle, 2. pick the provisioning profile that best match the bundle identifier, and copy it into the bundle as embedded.mobileprovision, 3. copy Entitlements.plist from user or SDK next to the bundle, 4. code sign the bundle. """ resource_rules_path = self._InstallResourceRules(resource_rules) substitutions, overrides = self._InstallProvisioningProfile( provisioning, self._GetCFBundleIdentifier()) entitlements_path = self._InstallEntitlements( entitlements, substitutions, overrides) subprocess.check_call([ 'codesign', '--force', '--sign', key, '--resource-rules', resource_rules_path, '--entitlements', entitlements_path, os.path.join( os.environ['TARGET_BUILD_DIR'], os.environ['FULL_PRODUCT_NAME'])]) def _InstallResourceRules(self, resource_rules): """Installs ResourceRules.plist from user or SDK into the bundle. Args: resource_rules: string, optional, path to the ResourceRules.plist file to use, default to "${SDKROOT}/ResourceRules.plist" Returns: Path to the copy of ResourceRules.plist into the bundle. """ source_path = resource_rules target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['CONTENTS_FOLDER_PATH'], 'ResourceRules.plist') if not source_path: source_path = os.path.join( os.environ['SDKROOT'], 'ResourceRules.plist') shutil.copy2(source_path, target_path) return target_path def _InstallProvisioningProfile(self, profile, bundle_identifier): """Installs embedded.mobileprovision into the bundle. Args: profile: string, optional, short name of the .mobileprovision file to use, if empty or the file is missing, the best file installed will be used bundle_identifier: string, value of CFBundleIdentifier from Info.plist Returns: A tuple containing two dictionary: variables substitutions and values to overrides when generating the entitlements file. """ source_path, provisioning_data, team_id = self._FindProvisioningProfile( profile, bundle_identifier) target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['CONTENTS_FOLDER_PATH'], 'embedded.mobileprovision') shutil.copy2(source_path, target_path) substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.') return substitutions, provisioning_data['Entitlements'] def _FindProvisioningProfile(self, profile, bundle_identifier): """Finds the .mobileprovision file to use for signing the bundle. Checks all the installed provisioning profiles (or if the user specified the PROVISIONING_PROFILE variable, only consult it) and select the most specific that correspond to the bundle identifier. Args: profile: string, optional, short name of the .mobileprovision file to use, if empty or the file is missing, the best file installed will be used bundle_identifier: string, value of CFBundleIdentifier from Info.plist Returns: A tuple of the path to the selected provisioning profile, the data of the embedded plist in the provisioning profile and the team identifier to use for code signing. Raises: SystemExit: if no .mobileprovision can be used to sign the bundle. """ profiles_dir = os.path.join( os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles') if not os.path.isdir(profiles_dir): print >>sys.stderr, ( 'cannot find mobile provisioning for %s' % bundle_identifier) sys.exit(1) provisioning_profiles = None if profile: profile_path = os.path.join(profiles_dir, profile + '.mobileprovision') if os.path.exists(profile_path): provisioning_profiles = [profile_path] if not provisioning_profiles: provisioning_profiles = glob.glob( os.path.join(profiles_dir, '*.mobileprovision')) valid_provisioning_profiles = {} for profile_path in provisioning_profiles: profile_data = self._LoadProvisioningProfile(profile_path) app_id_pattern = profile_data.get( 'Entitlements', {}).get('application-identifier', '') for team_identifier in profile_data.get('TeamIdentifier', []): app_id = '%s.%s' % (team_identifier, bundle_identifier) if fnmatch.fnmatch(app_id, app_id_pattern): valid_provisioning_profiles[app_id_pattern] = ( profile_path, profile_data, team_identifier) if not valid_provisioning_profiles: print >>sys.stderr, ( 'cannot find mobile provisioning for %s' % bundle_identifier) sys.exit(1) # If the user has multiple provisioning profiles installed that can be # used for ${bundle_identifier}, pick the most specific one (ie. the # provisioning profile whose pattern is the longest). selected_key = max(valid_provisioning_profiles, key=lambda v: len(v)) return valid_provisioning_profiles[selected_key] def _LoadProvisioningProfile(self, profile_path): """Extracts the plist embedded in a provisioning profile. Args: profile_path: string, path to the .mobileprovision file Returns: Content of the plist embedded in the provisioning profile as a dictionary. """ with tempfile.NamedTemporaryFile() as temp: subprocess.check_call([ 'security', 'cms', '-D', '-i', profile_path, '-o', temp.name]) return self._LoadPlistMaybeBinary(temp.name) def _MergePlist(self, merged_plist, plist): """Merge |plist| into |merged_plist|.""" for key, value in plist.iteritems(): if isinstance(value, dict): merged_value = merged_plist.get(key, {}) if isinstance(merged_value, dict): self._MergePlist(merged_value, value) merged_plist[key] = merged_value else: merged_plist[key] = value else: merged_plist[key] = value def _LoadPlistMaybeBinary(self, plist_path): """Loads into a memory a plist possibly encoded in binary format. This is a wrapper around plistlib.readPlist that tries to convert the plist to the XML format if it can't be parsed (assuming that it is in the binary format). Args: plist_path: string, path to a plist file, in XML or binary format Returns: Content of the plist as a dictionary. """ try: # First, try to read the file using plistlib that only supports XML, # and if an exception is raised, convert a temporary copy to XML and # load that copy. return plistlib.readPlist(plist_path) except: pass with tempfile.NamedTemporaryFile() as temp: shutil.copy2(plist_path, temp.name) subprocess.check_call(['plutil', '-convert', 'xml1', temp.name]) return plistlib.readPlist(temp.name) def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix): """Constructs a dictionary of variable substitutions for Entitlements.plist. Args: bundle_identifier: string, value of CFBundleIdentifier from Info.plist app_identifier_prefix: string, value for AppIdentifierPrefix Returns: Dictionary of substitutions to apply when generating Entitlements.plist. """ return { 'CFBundleIdentifier': bundle_identifier, 'AppIdentifierPrefix': app_identifier_prefix, } def _GetCFBundleIdentifier(self): """Extracts CFBundleIdentifier value from Info.plist in the bundle. Returns: Value of CFBundleIdentifier in the Info.plist located in the bundle. """ info_plist_path = os.path.join( os.environ['TARGET_BUILD_DIR'], os.environ['INFOPLIST_PATH']) info_plist_data = self._LoadPlistMaybeBinary(info_plist_path) return info_plist_data['CFBundleIdentifier'] def _InstallEntitlements(self, entitlements, substitutions, overrides): """Generates and install the ${BundleName}.xcent entitlements file. Expands variables "$(variable)" pattern in the source entitlements file, add extra entitlements defined in the .mobileprovision file and the copy the generated plist to "${BundlePath}.xcent". Args: entitlements: string, optional, path to the Entitlements.plist template to use, defaults to "${SDKROOT}/Entitlements.plist" substitutions: dictionary, variable substitutions overrides: dictionary, values to add to the entitlements Returns: Path to the generated entitlements file. """ source_path = entitlements target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['PRODUCT_NAME'] + '.xcent') if not source_path: source_path = os.path.join( os.environ['SDKROOT'], 'Entitlements.plist') shutil.copy2(source_path, target_path) data = self._LoadPlistMaybeBinary(target_path) data = self._ExpandVariables(data, substitutions) if overrides: for key in overrides: if key not in data: data[key] = overrides[key] plistlib.writePlist(data, target_path) return target_path def _ExpandVariables(self, data, substitutions): """Expands variables "$(variable)" in data. Args: data: object, can be either string, list or dictionary substitutions: dictionary, variable substitutions to perform Returns: Copy of data where each references to "$(variable)" has been replaced by the corresponding value found in substitutions, or left intact if the key was not found. """ if isinstance(data, str): for key, value in substitutions.iteritems(): data = data.replace('$(%s)' % key, value) return data if isinstance(data, list): return [self._ExpandVariables(v, substitutions) for v in data] if isinstance(data, dict): return {k: self._ExpandVariables(data[k], substitutions) for k in data} return data if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
mit
ionux/linux
scripts/gdb/linux/utils.py
630
4267
# # gdb helper commands and functions for Linux kernel debugging # # common utilities # # Copyright (c) Siemens AG, 2011-2013 # # Authors: # Jan Kiszka <jan.kiszka@siemens.com> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb class CachedType: def __init__(self, name): self._type = None self._name = name def _new_objfile_handler(self, event): self._type = None gdb.events.new_objfile.disconnect(self._new_objfile_handler) def get_type(self): if self._type is None: self._type = gdb.lookup_type(self._name) if self._type is None: raise gdb.GdbError( "cannot resolve type '{0}'".format(self._name)) if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'): gdb.events.new_objfile.connect(self._new_objfile_handler) return self._type long_type = CachedType("long") def get_long_type(): global long_type return long_type.get_type() def offset_of(typeobj, field): element = gdb.Value(0).cast(typeobj) return int(str(element[field].address).split()[0], 16) def container_of(ptr, typeobj, member): return (ptr.cast(get_long_type()) - offset_of(typeobj, member)).cast(typeobj) class ContainerOf(gdb.Function): """Return pointer to containing data structure. $container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the data structure of the type TYPE in which PTR is the address of ELEMENT. Note that TYPE and ELEMENT have to be quoted as strings.""" def __init__(self): super(ContainerOf, self).__init__("container_of") def invoke(self, ptr, typename, elementname): return container_of(ptr, gdb.lookup_type(typename.string()).pointer(), elementname.string()) ContainerOf() BIG_ENDIAN = 0 LITTLE_ENDIAN = 1 target_endianness = None def get_target_endianness(): global target_endianness if target_endianness is None: endian = gdb.execute("show endian", to_string=True) if "little endian" in endian: target_endianness = LITTLE_ENDIAN elif "big endian" in endian: target_endianness = BIG_ENDIAN else: raise gdb.GdbError("unknown endianness '{0}'".format(str(endian))) return target_endianness def read_u16(buffer): if get_target_endianness() == LITTLE_ENDIAN: return ord(buffer[0]) + (ord(buffer[1]) << 8) else: return ord(buffer[1]) + (ord(buffer[0]) << 8) def read_u32(buffer): if get_target_endianness() == LITTLE_ENDIAN: return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16) else: return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16) def read_u64(buffer): if get_target_endianness() == LITTLE_ENDIAN: return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32) else: return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32) target_arch = None def is_target_arch(arch): if hasattr(gdb.Frame, 'architecture'): return arch in gdb.newest_frame().architecture().name() else: global target_arch if target_arch is None: target_arch = gdb.execute("show architecture", to_string=True) return arch in target_arch GDBSERVER_QEMU = 0 GDBSERVER_KGDB = 1 gdbserver_type = None def get_gdbserver_type(): def exit_handler(event): global gdbserver_type gdbserver_type = None gdb.events.exited.disconnect(exit_handler) def probe_qemu(): try: return gdb.execute("monitor info version", to_string=True) != "" except: return False def probe_kgdb(): try: thread_info = gdb.execute("info thread 2", to_string=True) return "shadowCPU0" in thread_info except: return False global gdbserver_type if gdbserver_type is None: if probe_qemu(): gdbserver_type = GDBSERVER_QEMU elif probe_kgdb(): gdbserver_type = GDBSERVER_KGDB if gdbserver_type is not None and hasattr(gdb, 'events'): gdb.events.exited.connect(exit_handler) return gdbserver_type
gpl-2.0
rsalmaso/django-cms
cms/toolbar/items.py
1
16937
import json from abc import ABCMeta from collections import defaultdict from django.template.loader import render_to_string from django.utils.encoding import force_str from django.utils.functional import Promise from cms.constants import RIGHT, LEFT, REFRESH_PAGE, URL_CHANGE class ItemSearchResult: def __init__(self, item, index): self.item = item self.index = index def __add__(self, other): return ItemSearchResult(self.item, self.index + other) def __sub__(self, other): return ItemSearchResult(self.item, self.index - other) def __int__(self): return self.index def may_be_lazy(thing): if isinstance(thing, Promise): return thing._proxy____args[0] else: return thing class ToolbarAPIMixin(metaclass=ABCMeta): REFRESH_PAGE = REFRESH_PAGE URL_CHANGE = URL_CHANGE LEFT = LEFT RIGHT = RIGHT def __init__(self): self.items = [] self.menus = {} self._memo = defaultdict(list) def _memoize(self, item): self._memo[item.__class__].append(item) def _unmemoize(self, item): self._memo[item.__class__].remove(item) def _item_position(self, item): return self.items.index(item) def _add_item(self, item, position): if position is not None: self.items.insert(position, item) else: self.items.append(item) def _remove_item(self, item): if item in self.items: self.items.remove(item) else: raise KeyError("Item %r not found" % item) def get_item_count(self): return len(self.items) def add_item(self, item, position=None): if not isinstance(item, BaseItem): raise ValueError("Items must be subclasses of cms.toolbar.items.BaseItem, %r isn't" % item) if isinstance(position, ItemSearchResult): position = position.index elif isinstance(position, BaseItem): position = self._item_position(position) elif not (position is None or isinstance(position, (int,))): raise ValueError("Position must be None, an integer, an item or an ItemSearchResult, got %r instead" % position) self._add_item(item, position) self._memoize(item) return item def find_items(self, item_type, **attributes): results = [] attr_items = attributes.items() notfound = object() for candidate in self._memo[item_type]: if all(may_be_lazy(getattr(candidate, key, notfound)) == value for key, value in attr_items): results.append(ItemSearchResult(candidate, self._item_position(candidate))) return results def find_first(self, item_type, **attributes): try: return self.find_items(item_type, **attributes)[0] except IndexError: return None # # This will only work if it is used to determine the insert position for # all items in the same menu. # def get_alphabetical_insert_position(self, new_menu_name, item_type, default=0): results = self.find_items(item_type) # No items yet? Use the default value provided if not len(results): return default last_position = 0 for result in sorted(results, key=lambda x: x.item.name): if result.item.name > new_menu_name: return result.index if result.index > last_position: last_position = result.index else: return last_position + 1 def remove_item(self, item): self._remove_item(item) self._unmemoize(item) def add_sideframe_item(self, name, url, active=False, disabled=False, extra_classes=None, on_close=None, side=LEFT, position=None): item = SideframeItem(name, url, active=active, disabled=disabled, extra_classes=extra_classes, on_close=on_close, side=side, ) self.add_item(item, position=position) return item def add_modal_item(self, name, url, active=False, disabled=False, extra_classes=None, on_close=REFRESH_PAGE, side=LEFT, position=None): item = ModalItem(name, url, active=active, disabled=disabled, extra_classes=extra_classes, on_close=on_close, side=side, ) self.add_item(item, position=position) return item def add_link_item(self, name, url, active=False, disabled=False, extra_classes=None, side=LEFT, position=None): item = LinkItem(name, url, active=active, disabled=disabled, extra_classes=extra_classes, side=side ) self.add_item(item, position=position) return item def add_ajax_item(self, name, action, active=False, disabled=False, extra_classes=None, data=None, question=None, side=LEFT, position=None, on_success=None, method='POST'): item = AjaxItem(name, action, self.csrf_token, active=active, disabled=disabled, extra_classes=extra_classes, data=data, question=question, side=side, on_success=on_success, method=method, ) self.add_item(item, position=position) return item class BaseItem(metaclass=ABCMeta): toolbar = None template = None def __init__(self, side=LEFT): self.side = side @property def right(self): return self.side is RIGHT def render(self): if self.toolbar: template = self.toolbar.templates.get_cached_template(self.template) return template.render(self.get_context()) # Backwards compatibility return render_to_string(self.template, self.get_context()) def get_context(self): return {} class TemplateItem(BaseItem): def __init__(self, template, extra_context=None, side=LEFT): super().__init__(side) self.template = template self.extra_context = extra_context def get_context(self): if self.extra_context: return self.extra_context return {} class SubMenu(ToolbarAPIMixin, BaseItem): template = "cms/toolbar/items/menu.html" sub_level = True active = False def __init__(self, name, csrf_token, disabled=False, side=LEFT): ToolbarAPIMixin.__init__(self) BaseItem.__init__(self, side) self.name = name self.disabled = disabled self.csrf_token = csrf_token def __repr__(self): return '<Menu:%s>' % force_str(self.name) def add_break(self, identifier=None, position=None): item = Break(identifier) self.add_item(item, position=position) return item def get_items(self): items = self.items for item in items: item.toolbar = self.toolbar if hasattr(item, 'disabled'): item.disabled = self.disabled or item.disabled return items def get_context(self): return { 'active': self.active, 'disabled': self.disabled, 'items': self.get_items(), 'title': self.name, 'sub_level': self.sub_level } class Menu(SubMenu): sub_level = False def get_or_create_menu(self, key, verbose_name, disabled=False, side=LEFT, position=None): if key in self.menus: return self.menus[key] menu = SubMenu(verbose_name, self.csrf_token, disabled=disabled, side=side) self.menus[key] = menu self.add_item(menu, position=position) return menu class LinkItem(BaseItem): template = "cms/toolbar/items/item_link.html" def __init__(self, name, url, active=False, disabled=False, extra_classes=None, side=LEFT): super().__init__(side) self.name = name self.url = url self.active = active self.disabled = disabled self.extra_classes = extra_classes or [] def __repr__(self): return '<LinkItem:%s>' % force_str(self.name) def get_context(self): return { 'url': self.url, 'name': self.name, 'active': self.active, 'disabled': self.disabled, 'extra_classes': self.extra_classes, } class FrameItem(BaseItem): # Be sure to define the correct template def __init__(self, name, url, active=False, disabled=False, extra_classes=None, on_close=None, side=LEFT): super().__init__(side) self.name = "%s..." % force_str(name) self.url = url self.active = active self.disabled = disabled self.extra_classes = extra_classes or [] self.on_close = on_close def __repr__(self): # Should be overridden return '<FrameItem:%s>' % force_str(self.name) def get_context(self): return { 'url': self.url, 'name': self.name, 'active': self.active, 'disabled': self.disabled, 'extra_classes': self.extra_classes, 'on_close': self.on_close, } class SideframeItem(FrameItem): template = "cms/toolbar/items/item_sideframe.html" def __repr__(self): return '<SideframeItem:%s>' % force_str(self.name) class ModalItem(FrameItem): template = "cms/toolbar/items/item_modal.html" def __repr__(self): return '<ModalItem:%s>' % force_str(self.name) class AjaxItem(BaseItem): template = "cms/toolbar/items/item_ajax.html" def __init__(self, name, action, csrf_token, data=None, active=False, disabled=False, extra_classes=None, question=None, side=LEFT, on_success=None, method='POST'): super().__init__(side) self.name = name self.action = action self.active = active self.disabled = disabled self.csrf_token = csrf_token self.data = data or {} self.extra_classes = extra_classes or [] self.question = question self.on_success = on_success self.method = method def __repr__(self): return '<AjaxItem:%s>' % force_str(self.name) def get_context(self): data = self.data.copy() if self.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'): data['csrfmiddlewaretoken'] = self.csrf_token return { 'action': self.action, 'name': self.name, 'active': self.active, 'disabled': self.disabled, 'extra_classes': self.extra_classes, 'data': json.dumps(data), 'question': self.question, 'on_success': self.on_success, 'method': self.method, } class Break(BaseItem): template = "cms/toolbar/items/break.html" def __init__(self, identifier=None): self.identifier = identifier class BaseButton(metaclass=ABCMeta): toolbar = None template = None def render(self): if self.toolbar: template = self.toolbar.templates.get_cached_template(self.template) return template.render(self.get_context()) # Backwards compatibility return render_to_string(self.template, self.get_context()) def get_context(self): return {} class Button(BaseButton): template = "cms/toolbar/items/button.html" def __init__(self, name, url, active=False, disabled=False, extra_classes=None): self.name = name self.url = url self.active = active self.disabled = disabled self.extra_classes = extra_classes or [] def __repr__(self): return '<Button:%s>' % force_str(self.name) def get_context(self): return { 'name': self.name, 'url': self.url, 'active': self.active, 'disabled': self.disabled, 'extra_classes': self.extra_classes, } class ModalButton(Button): template = "cms/toolbar/items/button_modal.html" def __init__(self, name, url, active=False, disabled=False, extra_classes=None, on_close=None): self.name = name self.url = url self.active = active self.disabled = disabled self.extra_classes = extra_classes or [] self.on_close = on_close def __repr__(self): return '<ModalButton:%s>' % force_str(self.name) def get_context(self): return { 'name': self.name, 'url': self.url, 'active': self.active, 'disabled': self.disabled, 'extra_classes': self.extra_classes, 'on_close': self.on_close, } class SideframeButton(ModalButton): template = "cms/toolbar/items/button_sideframe.html" def __repr__(self): return '<SideframeButton:%s>' % force_str(self.name) class ButtonList(BaseItem): template = "cms/toolbar/items/button_list.html" def __init__(self, identifier=None, extra_classes=None, side=LEFT): super().__init__(side) self.extra_classes = extra_classes or [] self.buttons = [] self.identifier = identifier def __repr__(self): return '<ButtonList:%s>' % self.identifier def add_item(self, item): if not isinstance(item, Button): raise ValueError("Expected instance of cms.toolbar.items.Button, got %r instead" % item) self.buttons.append(item) def add_button(self, name, url, active=False, disabled=False, extra_classes=None): item = Button(name, url, active=active, disabled=disabled, extra_classes=extra_classes ) self.buttons.append(item) return item def add_modal_button(self, name, url, active=False, disabled=False, extra_classes=None, on_close=REFRESH_PAGE): item = ModalButton(name, url, active=active, disabled=disabled, extra_classes=extra_classes, on_close=on_close, ) self.buttons.append(item) return item def add_sideframe_button(self, name, url, active=False, disabled=False, extra_classes=None, on_close=None): item = SideframeButton(name, url, active=active, disabled=disabled, extra_classes=extra_classes, on_close=on_close, ) self.buttons.append(item) return item def get_buttons(self): for button in self.buttons: button.toolbar = self.toolbar yield button def get_context(self): context = { 'buttons': list(self.get_buttons()), 'extra_classes': self.extra_classes } if self.toolbar: context['cms_structure_on'] = self.toolbar.structure_mode_url_on return context class Dropdown(ButtonList): template = "cms/toolbar/items/dropdown.html" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.primary_button = None def __repr__(self): return '<Dropdown:%s>' % force_str(self.name) def add_primary_button(self, button): self.primary_button = button def get_buttons(self): for button in self.buttons: button.toolbar = self.toolbar button.is_in_dropdown = True yield button def get_context(self): return { 'primary_button': self.primary_button, 'buttons': list(self.get_buttons()), 'extra_classes': self.extra_classes, } class DropdownToggleButton(BaseButton): template = "cms/toolbar/items/dropdown_button.html" has_no_action = True def __init__(self, name, active=False, disabled=False, extra_classes=None): self.name = name self.active = active self.disabled = disabled self.extra_classes = extra_classes or [] def __repr__(self): return '<DropdownToggleButton:%s>' % force_str(self.name) def get_context(self): return { 'name': self.name, 'active': self.active, 'disabled': self.disabled, 'extra_classes': self.extra_classes, }
bsd-3-clause
NeftaliYagua/gae-init
main/auth/twitter.py
10
1457
# coding: utf-8 import flask import auth import config import model import util from main import app twitter_config = dict( access_token_url='https://api.twitter.com/oauth/access_token', authorize_url='https://api.twitter.com/oauth/authorize', base_url='https://api.twitter.com/1.1/', consumer_key=config.CONFIG_DB.twitter_consumer_key, consumer_secret=config.CONFIG_DB.twitter_consumer_secret, request_token_url='https://api.twitter.com/oauth/request_token', ) twitter = auth.create_oauth_app(twitter_config, 'twitter') @app.route('/api/auth/callback/twitter/') def twitter_authorized(): response = twitter.authorized_response() if response is None: flask.flash('You denied the request to sign in.') return flask.redirect(util.get_next_url()) flask.session['oauth_token'] = ( response['oauth_token'], response['oauth_token_secret'], ) user_db = retrieve_user_from_twitter(response) return auth.signin_user_db(user_db) @twitter.tokengetter def get_twitter_token(): return flask.session.get('oauth_token') @app.route('/signin/twitter/') def signin_twitter(): return auth.signin_oauth(twitter) def retrieve_user_from_twitter(response): auth_id = 'twitter_%s' % response['user_id'] user_db = model.User.get_by('auth_ids', auth_id) return user_db or auth.create_user_db( auth_id=auth_id, name=response['screen_name'], username=response['screen_name'], )
mit
krissrex/python_projects
Projects/Oving10-itgk/main.py
1
6889
# -*- coding: utf-8 -*- """ Created on Sun Nov 9 00:06:24 2014 @author: kristian """ from skumleskogen import * import time ################## OPTIONS ################## debug_on = True write_to_file = True hukommelse = {} sti_totalt = ["inn"] noder_med_lås = set() forrige_retning = [] file = None try: del print except: pass _print = print class Print_To_File(object): def __init__(self, *text): _print(text) string = "" for t in text: string += str(t) if file: file.write("\n" + string) if write_to_file: print = Print_To_File file = open("output.txt", mode="a") class MovementException(Exception): def __init__(self, error): self.error = error def __str__(self): return str(self.error) def start_solving(): print("Er inngang:", er_inngang()) nøkler = 0 while True: debug() husk_node() if er_stank(): if gaa_tilbake(): sti_totalt.append("STANK! tilbake til " + str(nummer())) kom_fra_retning = forrige_retning.pop(len(forrige_retning) - 1) continue if er_nokkel(): if plukk_opp(): nøkler += 1 sti_totalt.append("plukket nøkkel " + str(nøkler)) continue if (not hukommelse[nummer()]["venstre"]) \ or kan_låse_opp(nummer(), nøkler, "venstre"): try: hukommelse[nummer()]["lås"][0] = False hukommelse[nummer()]["superlås"][0] = False besøk_node("venstre") except MovementException as ex: print(ex) else: forrige_retning.append("venstre") sti_totalt.append("venstre " + str(nummer())) continue if (not hukommelse[nummer()]["høyre"]) \ or kan_låse_opp(nummer(), nøkler, "høyre"): try: hukommelse[nummer()]["lås"][1] = False hukommelse[nummer()]["superlås"][1] = False besøk_node("høyre") except MovementException as ex: print(ex) else: forrige_retning.append("høyre") sti_totalt.append("høyre " + str(nummer())) continue if er_laas(): noder_med_lås.add(nummer()) if er_superlaas(): if nøkler >= 2: utfall = laas_opp() if utfall: nøkler -= 2 sti_totalt.append("låste opp sl " + str(nøkler)) if nummer() in noder_med_lås: noder_med_lås.remove(nummer()) continue else: noder_med_lås.add(nummer()) else: if nøkler >= 1: utfall = laas_opp() if utfall: nøkler -= 1 sti_totalt.append("låste opp s " + str(nøkler)) if nummer() in noder_med_lås: noder_med_lås.remove(nummer()) continue if er_utgang(): gaa_ut() return # Vi er stuck. Noen noder må være låste. har_lås = er_laas() har_superlås = er_superlaas() if har_lås and har_superlås: # Låsen var ikke en vanlig lås, men superlås. har_lås = False if barn_har_lås(nummer()): har_lås = True if barn_har_superlås(nummer()): har_superlås = True if gaa_tilbake(): sti_totalt.append("tilbake til " + str(nummer())) kom_fra_retning = forrige_retning.pop(len(forrige_retning) - 1) print("kom fra:", kom_fra_retning) if har_lås: print("har lås") if kom_fra_retning == "venstre": hukommelse[nummer()]["lås"][0] = True else: hukommelse[nummer()]["lås"][1] = True if har_superlås: print("har superlås") if kom_fra_retning == "venstre": hukommelse[nummer()]["superlås"][0] = True else: hukommelse[nummer()]["superlås"][1] = True print(hukommelse[nummer()]) else: print("KLARTE IKKE Å GÅ TILBAKE!!!") return def kan_låse_opp(n, nøkler, retning): indeks = 0 if retning == "høyre": indeks = 1 if hukommelse[n]["lås"][indeks] and (nøkler >= 1): return True if hukommelse[n]["superlås"][indeks] and (nøkler >= 2): return True return False def barn_har_lås(n): return hukommelse[n]["lås"][0] or hukommelse[n]["lås"][1] def barn_har_superlås(n): return hukommelse[n]["superlås"][0] or hukommelse[n]["superlås"][1] def husk_node(): n = nummer() if n not in hukommelse: hukommelse[n] = {"venstre": False, "høyre": False, "lås": [False, False], "superlås": [False, False]} def besøk_node(retning): n = nummer() utfall = False if retning == "venstre": utfall = gaa_venstre() elif retning == "høyre": utfall = gaa_hoyre() else: print("Ugyldig retning oppgitt!", n, retning) return if utfall: hukommelse[n][retning] = True else: if er_laas(): raise MovementException("Er låst") else: raise MovementException("Er blindvei") def debug(): if debug_on: print("/"*25 + "DEBUG:" + "/"*25) print(("Nummer: {n}\n" + "Type:\n " + "i: {i}, l: {l}, sl: {sl}, st: {st}, nk: {nk}, v: {v}, u: {u}" + "\nLabel: {la}") .format(n=nummer(), i=er_inngang(), l=er_laas(), sl=er_superlaas(), st=er_stank(), u=er_utgang(), v=er_vanlig(), nk=er_nokkel(), la=label(nummer()))) def main(): # Initialisation. def get_hours(): return time.asctime().split(' ')[4] start_time = time.time() print("Starting. Time:", get_hours()) # Start solving the maze. try: start_solving() # In case of failure, e.g. a rabbit ate you. except Exception as e: print("Exception occured:") print(e) print("Exciting. Time:", get_hours()) # Done, do final actions. finally: print("\nRan for {0} seconds.".format( abs( round(start_time - time.time(), 4)))) print("Maze completed.") print(sti_totalt) if __name__ == "__main__": main() if file: file.close()
mit
okffi/booktype
lib/booktype/convert/base.py
3
1698
# This file is part of Booktype. # Copyright (c) 2013 Borko Jandras <borko.jandras@sourcefabric.org> # # Booktype is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Booktype is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Booktype. If not, see <http://www.gnu.org/licenses/>. import os from ebooklib import epub class BaseConverter(object): def __init__(self, config, assets, sandbox_path, callback): self._config = config self._assets = assets self._sandbox_path = sandbox_path self._callback = callback @property def config(self): return self._config @property def assets(self): return self._assets @property def sandbox_path(self): return self._sandbox_path @property def callback(self): return self._callback def validate_config(self): pass def load_book(self, book_path): return epub.read_epub(book_path) def convert(self, book, output_path): pass def get_asset(self, asset_id): return self.assets.get(asset_id) def open_file(self, file_name, mode="wb"): file_path = os.path.join(self.sandbox_path, file_name) return open(file_path, mode)
agpl-3.0
openstack/nova
nova/manager.py
2
5378
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo_service import periodic_task import nova.conf import nova.db.api from nova import profiler from nova import rpc CONF = nova.conf.CONF class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class ManagerMeta(profiler.get_traced_meta(), type(PeriodicTasks)): """Metaclass to trace all children of a specific class. This metaclass wraps every public method (not starting with _ or __) of the class using it. All children classes of the class using ManagerMeta will be profiled as well. Adding this metaclass requires that the __trace_args__ attribute be added to the class we want to modify. That attribute is a dictionary with one mandatory key: "name". "name" defines the name of the action to be traced (for example, wsgi, rpc, db). The OSprofiler-based tracing, although, will only happen if profiler instance was initiated somewhere before in the thread, that can only happen if profiling is enabled in nova.conf and the API call to Nova API contained specific headers. """ class Manager(PeriodicTasks, metaclass=ManagerMeta): __trace_args__ = {"name": "rpc"} def __init__(self, host=None, service_name='undefined'): if not host: host = CONF.host self.host = host self.backdoor_port = None self.service_name = service_name self.notifier = rpc.get_notifier(self.service_name, self.host) self.additional_endpoints = [] super(Manager, self).__init__() def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Hook to do additional manager initialization when one requests the service be started. This is called before any service record is created. Child classes should override this method. """ pass def cleanup_host(self): """Hook to do cleanup work when the service shuts down. Child classes should override this method. """ pass def pre_start_hook(self): """Hook to provide the manager the ability to do additional start-up work before any RPC queues/consumers are created. This is called after other initialization has succeeded and a service record is created. Child classes should override this method. """ pass def post_start_hook(self): """Hook to provide the manager the ability to do additional start-up work immediately after a service creates RPC consumers and starts 'running'. Child classes should override this method. """ pass def reset(self): """Hook called on SIGHUP to signal the manager to re-read any dynamic configuration or do any reconfiguration tasks. """ pass
apache-2.0
vmindru/ansible
lib/ansible/modules/network/avi/avi_gslbapplicationpersistenceprofile.py
31
3686
#!/usr/bin/python # # Created on Aug 25, 2016 # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # Avi Version: 17.1.1 # # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_gslbapplicationpersistenceprofile author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com> short_description: Module for setup of GslbApplicationPersistenceProfile Avi RESTful Object description: - This module is used to configure GslbApplicationPersistenceProfile object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.4" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent","present"] description: description: - Field introduced in 17.1.1. name: description: - A user-friendly name for the persistence profile. - Field introduced in 17.1.1. required: true tenant_ref: description: - It is a reference to an object of type tenant. - Field introduced in 17.1.1. url: description: - Avi controller URL of the object. uuid: description: - Uuid of the persistence profile. - Field introduced in 17.1.1. extends_documentation_fragment: - avi ''' EXAMPLES = """ - name: Example to create GslbApplicationPersistenceProfile object avi_gslbapplicationpersistenceprofile: controller: 10.10.25.42 username: admin password: something state: present name: sample_gslbapplicationpersistenceprofile """ RETURN = ''' obj: description: GslbApplicationPersistenceProfile (api/gslbapplicationpersistenceprofile) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.network.avi.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), description=dict(type='str',), name=dict(type='str', required=True), tenant_ref=dict(type='str',), url=dict(type='str',), uuid=dict(type='str',), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'gslbapplicationpersistenceprofile', set([])) if __name__ == '__main__': main()
gpl-3.0
alikins/ansible
lib/ansible/modules/network/nxos/nxos_gir_profile_management.py
16
6135
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_gir_profile_management extends_documentation_fragment: nxos version_added: "2.2" short_description: Create a maintenance-mode or normal-mode profile for GIR. description: - Manage a maintenance-mode or normal-mode profile with configuration commands that can be applied during graceful removal or graceful insertion. author: - Gabriele Gerbino (@GGabriele) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL - C(state=absent) removes the whole profile. options: commands: description: - List of commands to be included into the profile. required: false default: null mode: description: - Configure the profile as Maintenance or Normal mode. required: true choices: ['maintenance', 'normal'] state: description: - Specify desired state of the resource. required: false default: present choices: ['present','absent'] ''' EXAMPLES = ''' # Create a maintenance-mode profile - nxos_gir_profile_management: mode: maintenance commands: - router eigrp 11 - isolate # Remove the maintenance-mode profile - nxos_gir_profile_management: mode: maintenance state: absent ''' RETURN = ''' proposed: description: list of commands passed into module. returned: verbose mode type: list sample: ["router eigrp 11", "isolate"] existing: description: list of existing profile commands. returned: verbose mode type: list sample: ["router bgp 65535","isolate","router eigrp 10","isolate", "diagnostic bootup level complete"] end_state: description: list of profile entries after module execution. returned: verbose mode type: list sample: ["router bgp 65535","isolate","router eigrp 10","isolate", "diagnostic bootup level complete","router eigrp 11", "isolate"] updates: description: commands sent to the device returned: always type: list sample: ["configure maintenance profile maintenance-mode", "router eigrp 11","isolate"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' import re from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands from ansible.module_utils.network.nxos.nxos import nxos_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.config import CustomNetworkConfig def get_existing(module): existing = [] netcfg = CustomNetworkConfig(indent=2, contents=get_config(module)) if module.params['mode'] == 'maintenance': parents = ['configure maintenance profile maintenance-mode'] else: parents = ['configure maintenance profile normal-mode'] config = netcfg.get_section(parents) if config: existing = config.splitlines() existing = [cmd.strip() for cmd in existing] existing.pop(0) return existing def state_present(module, existing, commands): cmds = list() if existing == commands: # Idempotent case return cmds cmds.extend(commands) if module.params['mode'] == 'maintenance': cmds.insert(0, 'configure maintenance profile maintenance-mode') else: cmds.insert(0, 'configure maintenance profile normal-mode') return cmds def state_absent(module, existing, commands): if module.params['mode'] == 'maintenance': cmds = ['no configure maintenance profile maintenance-mode'] else: cmds = ['no configure maintenance profile normal-mode'] return cmds def invoke(name, *args, **kwargs): func = globals().get(name) if func: return func(*args, **kwargs) def main(): argument_spec = dict( commands=dict(required=False, type='list'), mode=dict(required=True, choices=['maintenance', 'normal']), state=dict(choices=['absent', 'present'], default='present') ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() state = module.params['state'] commands = module.params['commands'] or [] if state == 'absent' and commands: module.fail_json(msg='when state is absent, no command can be used.') existing = invoke('get_existing', module) end_state = existing changed = False result = {} cmds = [] if state == 'present' or (state == 'absent' and existing): cmds = invoke('state_%s' % state, module, existing, commands) if module.check_mode: module.exit_json(changed=True, commands=cmds) else: if cmds: load_config(module, cmds) changed = True end_state = invoke('get_existing', module) result['changed'] = changed if module._verbosity > 0: end_state = invoke('get_existing', module) result['end_state'] = end_state result['existing'] = existing result['proposed'] = commands result['updates'] = cmds result['warnings'] = warnings module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
Nikoala/CouchPotatoServer
couchpotato/core/downloaders/nzbvortex.py
44
8228
from base64 import b64encode import os from uuid import uuid4 import hashlib import traceback from requests import HTTPError from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import tryUrlencode, sp from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog log = CPLog(__name__) autoload = 'NZBVortex' class NZBVortex(DownloaderBase): protocol = ['nzb'] api_level = None session_id = None def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} # Send the nzb try: nzb_filename = self.createFileName(data, filedata, media, unique_tag = True) response = self.call('nzb/add', files = {'file': (nzb_filename, filedata, 'application/octet-stream')}, parameters = { 'name': nzb_filename, 'groupname': self.conf('group') }) if response and response.get('result', '').lower() == 'ok': return self.downloadReturnId(nzb_filename) log.error('Something went wrong sending the NZB file. Response: %s', response) return False except: log.error('Something went wrong sending the NZB file: %s', traceback.format_exc()) return False def test(self): """ Check if connection works :return: bool """ try: login_result = self.login() except: return False return login_result def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ raw_statuses = self.call('nzb') release_downloads = ReleaseDownloadList(self) for nzb in raw_statuses.get('nzbs', []): nzb_id = os.path.basename(nzb['nzbFileName']) if nzb_id in ids: # Check status status = 'busy' if nzb['state'] == 20: status = 'completed' elif nzb['state'] in [21, 22, 24]: status = 'failed' release_downloads.append({ 'temp_id': nzb['id'], 'id': nzb_id, 'name': nzb['uiTitle'], 'status': status, 'original_status': nzb['state'], 'timeleft': -1, 'folder': sp(nzb['destinationPath']), }) return release_downloads def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) try: self.call('nzb/%s/cancel' % release_download['temp_id']) except: log.error('Failed deleting: %s', traceback.format_exc(0)) return False return True def login(self): nonce = self.call('auth/nonce', auth = False).get('authNonce') cnonce = uuid4().hex hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest()) params = { 'nonce': nonce, 'cnonce': cnonce, 'hash': hashed } login_data = self.call('auth/login', parameters = params, auth = False) # Save for later if login_data.get('loginResult') == 'successful': self.session_id = login_data.get('sessionID') return True log.error('Login failed, please check you api-key') return False def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs): # Login first if not parameters: parameters = {} if not self.session_id and auth: self.login() # Always add session id to request if self.session_id: parameters['sessionid'] = self.session_id params = tryUrlencode(parameters) url = cleanHost(self.conf('host')) + 'api/' + call try: data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs) if data: return data except HTTPError as e: sc = e.response.status_code if sc == 403: # Try login and do again if not is_repeat: self.login() return self.call(call, parameters = parameters, is_repeat = True, **kwargs) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) return {} def getApiLevel(self): if not self.api_level: try: data = self.call('app/apilevel', auth = False) self.api_level = float(data.get('apilevel')) except HTTPError as e: sc = e.response.status_code if sc == 403: log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher') else: log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1)) return self.api_level def isEnabled(self, manual = False, data = None): if not data: data = {} return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel() config = [{ 'name': 'nzbvortex', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'nzbvortex', 'label': 'NZBVortex', 'description': 'Use <a href="http://www.nzbvortex.com/landing/" target="_blank">NZBVortex</a> to download NZBs.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'nzb', }, { 'name': 'host', 'default': 'https://localhost:4321', 'description': 'Hostname with port. Usually <strong>https://localhost:4321</strong>', }, { 'name': 'api_key', 'label': 'Api Key', }, { 'name': 'group', 'label': 'Group', 'description': 'The group CP places the nzb in. Make sure to create it in NZBVortex.', }, { 'name': 'manual', 'default': False, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, { 'name': 'delete_failed', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Delete a release after the download has failed.', }, ], } ], }]
gpl-3.0
farhaadila/django-cms
cms/south_migrations/0022_login_required_added.py
1680
20032
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models try: from django.contrib.auth import get_user_model except ImportError: # django < 1.5 from django.contrib.auth.models import User else: User = get_user_model() user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name) user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name) user_ptr_name = '%s_ptr' % User._meta.object_name.lower() class Migration(SchemaMigration): def forwards(self, orm): # Dummy migration pass def backwards(self, orm): # Dummy migration pass models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': { 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, user_model_label: { 'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ( 'django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ( 'django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.globalpagepermission': { 'Meta': {'object_name': 'GlobalPagePermission'}, 'can_add': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change_advanced_settings': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_change_permissions': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_delete': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_moderate': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_move_page': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_publish': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_recover_page': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_view': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}) }, 'cms.page': { 'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'}, 'changed_by': ( 'django.db.models.fields.CharField', [], {'max_length': '70'}), 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'created_by': ( 'django.db.models.fields.CharField', [], {'max_length': '70'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'limit_visibility_in_menu': ( 'django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'login_required': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}), 'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}), 'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}), 'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'published': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'publisher_public': ( 'django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}), 'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}), 'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}), 'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'template': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.pagemoderator': { 'Meta': {'object_name': 'PageModerator'}, 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderate_children': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'moderate_descendants': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'moderate_page': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label}) }, 'cms.pagemoderatorstate': { 'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'}, 'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'}) }, 'cms.pagepermission': { 'Meta': {'object_name': 'PagePermission'}, 'can_add': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change_advanced_settings': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_change_permissions': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_delete': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_moderate': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_move_page': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_publish': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_view': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'grant_on': ( 'django.db.models.fields.IntegerField', [], {'default': '5'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}) }, 'cms.pageuser': { 'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}), 'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'}) }, 'cms.pageusergroup': { 'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}), 'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}) }, 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ( 'django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, 'cms.title': { 'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'}, 'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}), 'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'slug': ( 'django.db.models.fields.SlugField', [], {'max_length': '255'}), 'title': ( 'django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['cms']
bsd-3-clause
etashjian/ECE757-final
src/arch/x86/isa/insts/general_purpose/no_operation.py
91
2323
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Copyright (c) 2008 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop NOP { fault "NoFault" }; def macroop HINT_NOP { fault "NoFault" }; '''
bsd-3-clause
hortonworks/hortonworks-sandbox
desktop/core/ext-py/Django-1.2.3/tests/regressiontests/views/views.py
15
1663
import sys from django.http import HttpResponse, HttpResponseRedirect from django import forms from django.views.debug import technical_500_response from django.views.generic.create_update import create_object from django.core.urlresolvers import get_resolver from django.shortcuts import render_to_response from regressiontests.views import BrokenException, except_args from models import Article def index_page(request): """Dummy index page""" return HttpResponse('<html><body>Dummy page</body></html>') def custom_create(request): """ Calls create_object generic view with a custom form class. """ class SlugChangingArticleForm(forms.ModelForm): """Custom form class to overwrite the slug.""" class Meta: model = Article def save(self, *args, **kwargs): self.instance.slug = 'some-other-slug' return super(SlugChangingArticleForm, self).save(*args, **kwargs) return create_object(request, post_save_redirect='/views/create_update/view/article/%(slug)s/', form_class=SlugChangingArticleForm) def raises(request): try: raise Exception except Exception: return technical_500_response(request, *sys.exc_info()) def raises404(request): resolver = get_resolver(None) resolver.resolve('') def redirect(request): """ Forces an HTTP redirect. """ return HttpResponseRedirect("target/") def view_exception(request, n): raise BrokenException(except_args[int(n)]) def template_exception(request, n): return render_to_response('debug/template_exception.html', {'arg': except_args[int(n)]})
apache-2.0
sajuptpm/neutron-ipam
neutron/services/firewall/drivers/linux/iptables_fwaas.py
1
11774
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc. from neutron.agent.linux import iptables_manager from neutron.extensions import firewall as fw_ext from neutron.openstack.common import log as logging from neutron.services.firewall.drivers import fwaas_base LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'Fwaas iptables driver' FWAAS_CHAIN = 'fwaas' FWAAS_DEFAULT_CHAIN = 'fwaas-default-policy' INGRESS_DIRECTION = 'ingress' EGRESS_DIRECTION = 'egress' CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i', EGRESS_DIRECTION: 'o'} """ Firewall rules are applied on internal-interfaces of Neutron router. The packets ingressing tenant's network will be on the output direction on internal-interfaces. """ IPTABLES_DIR = {INGRESS_DIRECTION: '-o', EGRESS_DIRECTION: '-i'} IPV4 = 'ipv4' IPV6 = 'ipv6' IP_VER_TAG = {IPV4: 'v4', IPV6: 'v6'} class IptablesFwaasDriver(fwaas_base.FwaasDriverBase): """IPTables driver for Firewall As A Service.""" def __init__(self): LOG.debug(_("Initializing fwaas iptables driver")) def create_firewall(self, apply_list, firewall): LOG.debug(_('Creating firewall %(fw_id)s for tenant %(tid)s)'), {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) try: if firewall['admin_state_up']: self._setup_firewall(apply_list, firewall) else: self.apply_default_policy(apply_list, firewall) except (LookupError, RuntimeError): # catch known library exceptions and raise Fwaas generic exception LOG.exception(_("Failed to create firewall: %s"), firewall['id']) raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) def delete_firewall(self, apply_list, firewall): LOG.debug(_('Deleting firewall %(fw_id)s for tenant %(tid)s)'), {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) fwid = firewall['id'] try: for router_info in apply_list: ipt_mgr = router_info.iptables_manager self._remove_chains(fwid, ipt_mgr) self._remove_default_chains(ipt_mgr) # apply the changes immediately (no defer in firewall path) ipt_mgr.defer_apply_off() except (LookupError, RuntimeError): # catch known library exceptions and raise Fwaas generic exception LOG.exception(_("Failed to delete firewall: %s"), fwid) raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) def update_firewall(self, apply_list, firewall): LOG.debug(_('Updating firewall %(fw_id)s for tenant %(tid)s)'), {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) try: if firewall['admin_state_up']: self._setup_firewall(apply_list, firewall) else: self.apply_default_policy(apply_list, firewall) except (LookupError, RuntimeError): # catch known library exceptions and raise Fwaas generic exception LOG.exception(_("Failed to update firewall: %s"), firewall['id']) raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) def apply_default_policy(self, apply_list, firewall): LOG.debug(_('Applying firewall %(fw_id)s for tenant %(tid)s)'), {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) fwid = firewall['id'] try: for router_info in apply_list: ipt_mgr = router_info.iptables_manager # the following only updates local memory; no hole in FW self._remove_chains(fwid, ipt_mgr) self._remove_default_chains(ipt_mgr) # create default 'DROP ALL' policy chain self._add_default_policy_chain_v4v6(ipt_mgr) self._enable_policy_chain(fwid, ipt_mgr) # apply the changes immediately (no defer in firewall path) ipt_mgr.defer_apply_off() except (LookupError, RuntimeError): # catch known library exceptions and raise Fwaas generic exception LOG.exception(_("Failed to apply default policy on firewall: %s"), fwid) raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) def _setup_firewall(self, apply_list, firewall): fwid = firewall['id'] for router_info in apply_list: ipt_mgr = router_info.iptables_manager # the following only updates local memory; no hole in FW self._remove_chains(fwid, ipt_mgr) self._remove_default_chains(ipt_mgr) # create default 'DROP ALL' policy chain self._add_default_policy_chain_v4v6(ipt_mgr) #create chain based on configured policy self._setup_chains(firewall, ipt_mgr) # apply the changes immediately (no defer in firewall path) ipt_mgr.defer_apply_off() def _get_chain_name(self, fwid, ver, direction): return '%s%s%s' % (CHAIN_NAME_PREFIX[direction], IP_VER_TAG[ver], fwid) def _setup_chains(self, firewall, ipt_mgr): """Create Fwaas chain using the rules in the policy """ fw_rules_list = firewall['firewall_rule_list'] fwid = firewall['id'] #default rules for invalid packets and established sessions invalid_rule = self._drop_invalid_packets_rule() est_rule = self._allow_established_rule() for ver in [IPV4, IPV6]: if ver == IPV4: table = ipt_mgr.ipv4['filter'] else: table = ipt_mgr.ipv6['filter'] ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION) ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION) for name in [ichain_name, ochain_name]: table.add_chain(name) table.add_rule(name, invalid_rule) table.add_rule(name, est_rule) for rule in fw_rules_list: if not rule['enabled']: continue iptbl_rule = self._convert_fwaas_to_iptables_rule(rule) if rule['ip_version'] == 4: ver = IPV4 table = ipt_mgr.ipv4['filter'] else: ver = IPV6 table = ipt_mgr.ipv6['filter'] ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION) ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION) table.add_rule(ichain_name, iptbl_rule) table.add_rule(ochain_name, iptbl_rule) self._enable_policy_chain(fwid, ipt_mgr) def _remove_default_chains(self, nsid): """Remove fwaas default policy chain.""" self._remove_chain_by_name(IPV4, FWAAS_DEFAULT_CHAIN, nsid) self._remove_chain_by_name(IPV6, FWAAS_DEFAULT_CHAIN, nsid) def _remove_chains(self, fwid, ipt_mgr): """Remove fwaas policy chain.""" for ver in [IPV4, IPV6]: for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]: chain_name = self._get_chain_name(fwid, ver, direction) self._remove_chain_by_name(ver, chain_name, ipt_mgr) def _add_default_policy_chain_v4v6(self, ipt_mgr): ipt_mgr.ipv4['filter'].add_chain(FWAAS_DEFAULT_CHAIN) ipt_mgr.ipv4['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP') ipt_mgr.ipv6['filter'].add_chain(FWAAS_DEFAULT_CHAIN) ipt_mgr.ipv6['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP') def _remove_chain_by_name(self, ver, chain_name, ipt_mgr): if ver == IPV4: ipt_mgr.ipv4['filter'].ensure_remove_chain(chain_name) else: ipt_mgr.ipv6['filter'].ensure_remove_chain(chain_name) def _add_rules_to_chain(self, ipt_mgr, ver, chain_name, rules): if ver == IPV4: table = ipt_mgr.ipv4['filter'] else: table = ipt_mgr.ipv6['filter'] for rule in rules: table.add_rule(chain_name, rule) def _enable_policy_chain(self, fwid, ipt_mgr): bname = iptables_manager.binary_name for (ver, tbl) in [(IPV4, ipt_mgr.ipv4['filter']), (IPV6, ipt_mgr.ipv6['filter'])]: for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]: chain_name = self._get_chain_name(fwid, ver, direction) chain_name = iptables_manager.get_chain_name(chain_name) if chain_name in tbl.chains: jump_rule = ['%s qr-+ -j %s-%s' % (IPTABLES_DIR[direction], bname, chain_name)] self._add_rules_to_chain(ipt_mgr, ver, 'FORWARD', jump_rule) #jump to DROP_ALL policy chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN) jump_rule = ['-o qr-+ -j %s-%s' % (bname, chain_name)] self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule) self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule) #jump to DROP_ALL policy chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN) jump_rule = ['-i qr-+ -j %s-%s' % (bname, chain_name)] self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule) self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule) def _convert_fwaas_to_iptables_rule(self, rule): action = rule.get('action') == 'allow' and 'ACCEPT' or 'DROP' args = [self._protocol_arg(rule.get('protocol')), self._port_arg('dport', rule.get('protocol'), rule.get('destination_port')), self._port_arg('sport', rule.get('protocol'), rule.get('source_port')), self._ip_prefix_arg('s', rule.get('source_ip_address')), self._ip_prefix_arg('d', rule.get('destination_ip_address')), self._action_arg(action)] iptables_rule = ' '.join(args) return iptables_rule def _drop_invalid_packets_rule(self): return '-m state --state INVALID -j DROP' def _allow_established_rule(self): return '-m state --state ESTABLISHED,RELATED -j ACCEPT' def _action_arg(self, action): if action: return '-j %s' % action return '' def _protocol_arg(self, protocol): if protocol: return '-p %s' % protocol return '' def _port_arg(self, direction, protocol, port): if not (protocol in ['udp', 'tcp'] and port): return '' return '--%s %s' % (direction, port) def _ip_prefix_arg(self, direction, ip_prefix): if ip_prefix: return '-%s %s' % (direction, ip_prefix) return ''
apache-2.0
Alwnikrotikz/l5rcm
exporters/textexporter.py
3
9080
# Copyright (C) 2011 Daniele Simonetti # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. import string import textwrap import models class TextExporter(object): def __init__(self): self.model = None self.form = None def set_model(self, model): self.model = model def set_form(self, form): self.form = form def export(self, io): header ="""\ * Created by L5R: Character Manager * Author: Daniele Simonetti * All right on L5R RPG belongs to AEG""" io.write(textwrap.dedent(header)) io.write('\n\n') # begin CHARACTER SECTION self.write_character (io) #self.write_initiative_armor(io) #self.write_health (io) def write_character(self, io): m = self.model f = self.form io.write('# CHARACTER\n') io.write(str.format('{0:36}RANK : {1:>12}\n', self.get_fullname(m), m.get_insight_rank())) io.write(str.format('CLAN : {0:>24} FAMILY : {1:>12}\n', self.get_clan_name(m), self.get_family_name(m))) io.write(str.format('SCHOOL: {0:>24} INSIGHT: {1:>12}\n', self.get_school_name(m), m.get_insight())) io.write(str.format('EXP : {0:>24}\n', self.get_exp(m))) io.write('\n') # Rings & Flags io.write(str.format("EARTH: {0:>2} Stamina : {1:>2}" " Willpower : {2:>2} HONOR : {3}\n", m.get_ring_rank(models.RINGS.EARTH), m.get_attrib_rank(models.ATTRIBS.STAMINA), m.get_attrib_rank(models.ATTRIBS.WILLPOWER), m.get_honor())) io.write(str.format("AIR : {0:>2} Reflexes: {1:>2} " "Awareness : {2:>2} GLORY : {3}\n", m.get_ring_rank(models.RINGS.AIR), m.get_attrib_rank(models.ATTRIBS.REFLEXES), m.get_attrib_rank(models.ATTRIBS.AWARENESS), m.get_glory())) io.write(str.format("WATER: {0:>2} Strength: {1:>2} " "Perception : {2:>2} STATUS: {3}\n", m.get_ring_rank(models.RINGS.WATER), m.get_attrib_rank(models.ATTRIBS.STRENGTH), m.get_attrib_rank(models.ATTRIBS.PERCEPTION), m.get_status())) io.write(str.format("FIRE : {0:>2} Agility : {1:>2} " "Intelligence: {2:>2} TAINT : {3}\n", m.get_ring_rank(models.RINGS.FIRE), m.get_attrib_rank(models.ATTRIBS.AGILITY), m.get_attrib_rank(models.ATTRIBS.INTELLIGENCE), m.taint)) io.write(str.format("VOID : {0:>2} Void Points: oooooooooo\n", m.get_ring_rank(models.RINGS.VOID))) io.write('\n') io.write(str.format("# INITIATIVE # ARMOR TN " "# HEALTH / WOUNDS (X{0})\n", m.health_multiplier)) io.write(str.format("BASE : {0:<5} NAME : {1:<15}" "HEALTY: {2:>2}/{3:>2} INJURED : {4:>2}/{5:>2}\n", f.tx_base_init.text(), f.tx_armor_nm.text(), f.wounds[0][1].text(), f.wounds[0][2].text(), f.wounds[4][1].text(), f.wounds[4][2].text())) io.write(str.format("MODIFIER: {0:<5} BASE : {1:<15}" "NICKED: {2:>2}/{3:>2} GRIPPLED: {4:>2}/{5:>2}\n", f.tx_mod_init.text(), f.tx_base_tn.text(), f.wounds[1][1].text(), f.wounds[1][2].text(), f.wounds[5][1].text(), f.wounds[5][2].text())) io.write(str.format("CURRENT : {0:<5} ARMOR : {1:<15}" "GRAZED: {2:>2}/{3:>2} DOWN : {4:>2}/{5:>2}\n", f.tx_cur_init.text(), f.tx_armor_tn.text(), f.wounds[2][1].text(), f.wounds[2][2].text(), f.wounds[6][1].text(), f.wounds[6][2].text())) io.write(str.format(" REDUCTION: {0:<15}" "HURT : {1:>2}/{2:>2} OUT : {3:>2}/{4:>2}\n", f.tx_armor_rd.text(), f.wounds[3][1].text(), f.wounds[3][2].text(), f.wounds[7][1].text(), f.wounds[7][2].text())) io.write(str.format(" CURRENT : {0:<15}\n", f.tx_cur_tn.text())) # SKILLs if len(f.sk_view_model.items) > 0: io.write('\n') io.write("# SKILL # RANK # TRAIT # EMPHASES\n") for sk in f.sk_view_model.items: tx_name = sk.name + '*' if sk.is_school else sk.name io.write(str.format("{0:<18} {1:>6} {2:<12} {3:<32}\n", tx_name, sk.rank, sk.trait, ', '.join(sk.emph))) # MASTERY ABILITIES if len(f.ma_view_model.items) > 0: io.write('\n') io.write("# SKILL # RANK # EFFECT\n") for ma in f.ma_view_model.items: io.write(str.format("{0:<18} {1:>6} {2:<28}\n", ma.skill_name, ma.skill_rank, ma.desc)) # TECHS if len(f.th_view_model.items) > 0: io.write('\n') io.write("# SCHOOL # RANK # TECH\n") for th in f.th_view_model.items: io.write(str.format("{0:<32}{1:>6} {2:<35}\n", th.school_name, th.rank, th.name)) # SPELLS if len(f.sp_view_model.items) > 0: io.write('\n') io.write("# # SPELL # RING # RANGE\n") for sp in f.sp_view_model.items: io.write(str.format("{0:<2}{1:<32}{2:<6} {3:<7}\n", sp.mastery, sp.name, sp.ring, sp.range)) # ADVANTAGES if len(f.merits_view_model.items) > 0: io.write('\n') io.write("# ADVANTAGE # RANK # XP COST\n") for ad in f.merits_view_model.items: io.write(str.format("{0:<34}{1:>6} {2:>9}\n", ad.name, ad.rank, ad.cost)) # DISADVANTAGES if len(f.flaws_view_model.items) > 0: io.write('\n') io.write("# DISADVANTAGE # RANK # XP GAIN\n") for ad in f.flaws_view_model.items: io.write(str.format("{0:<34}{1:>6} {2:>9}\n", ad.name, ad.rank, -ad.cost)) # MELEE WEAPONS if len(f.melee_view_model.items) > 0: io.write('\n') io.write("# WEAPON # DR # ALT. DR\n") for weap in f.melee_view_model.items: io.write(str.format("{0:<32}{1:<12} {2:<8}\n", weap.name, weap.dr, weap.dr_alt)) # RANGED WEAPONS if len(f.ranged_view_model.items) > 0: io.write('\n') io.write("# WEAPON # RANGE # STRENGTH\n") for weap in f.ranged_view_model.items: io.write(str.format("{0:<32}{1:<12} {2:>10}\n", weap.name, weap.range, weap.strength)) # ARROWS if len(f.arrow_view_model.items) > 0: io.write('\n') io.write("# ARROW # DR # QUANTITY\n") for weap in f.arrow_view_model.items: io.write(str.format("{0:<32}{1:<12} {2:>10}\n", weap.name, weap.dr, weap.qty)) def get_clan_name(self, model): return self.form.cb_pc_clan.currentText() def get_family_name(self, model): return self.form.cb_pc_family.currentText() def get_school_name(self, model): return self.form.cb_pc_school.currentText() def get_fullname(self, model): return '%s %s' % (self.get_family_name(model), model.name) def get_exp(self, model): return '%s / %s' % (model.get_px(), model.exp_limit)
gpl-3.0
revanthkolli/osf.io
website/settings/local-dist.py
7
1111
# -*- coding: utf-8 -*- '''Example settings/local.py file. These settings override what's in website/settings/defaults.py NOTE: local.py will not be added to source control. ''' from . import defaults DEV_MODE = True DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc. SEARCH_ENGINE = 'elastic' ELASTIC_TIMEOUT = 10 # Comment out to use celery in development USE_CELERY = False # Comment out to use GnuPG in development USE_GNUPG = False # Changing this may require you to re-enter encrypted fields # Email USE_EMAIL = False MAIL_SERVER = 'localhost:1025' # For local testing MAIL_USERNAME = 'osf-smtp' MAIL_PASSWORD = 'CHANGEME' # Mailchimp email subscriptions ENABLE_EMAIL_SUBSCRIPTIONS = False # Session COOKIE_NAME = 'osf' SECRET_KEY = "CHANGEME" # Uncomment if GPG was installed with homebrew # GNUPG_BINARY = '/usr/local/bin/gpg' ##### Celery ##### ## Default RabbitMQ broker BROKER_URL = 'amqp://' # Default RabbitMQ backend CELERY_RESULT_BACKEND = 'amqp://' USE_CDN_FOR_CLIENT_LIBS = False # Example of extending default settings # defaults.IMG_FMTS += ["pdf"]
apache-2.0
shastikk/youtube-dl
youtube_dl/extractor/worldstarhiphop.py
114
2323
from __future__ import unicode_literals import re from .common import InfoExtractor class WorldStarHipHopIE(InfoExtractor): _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?v=(?P<id>.*)' _TESTS = [{ "url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO", "md5": "9d04de741161603bf7071bbf4e883186", "info_dict": { "id": "wshh6a7q1ny0G34ZwuIO", "ext": "mp4", "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!" } }, { 'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO', 'md5': 'dc1c76c83ecc4190bb1eb143899b87d3', 'info_dict': { 'id': 'wshh6a7q1ny0G34ZwuIO', 'ext': 'mp4', "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!" } }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) m_vevo_id = re.search(r'videoId=(.*?)&amp?', webpage) if m_vevo_id is not None: return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo') video_url = self._search_regex( [r'so\.addVariable\("file","(.*?)"\)', r'<div class="artlist">\s*<a[^>]+href="([^"]+)">'], webpage, 'video URL') if 'youtube' in video_url: return self.url_result(video_url, ie='Youtube') video_title = self._html_search_regex( [r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>', r'<span[^>]+class="tc-sp-pinned-title">(.*)</span>'], webpage, 'title') # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video. thumbnail = self._html_search_regex( r'rel="image_src" href="(.*)" />', webpage, 'thumbnail', default=None) if not thumbnail: _title = r'candytitles.*>(.*)</span>' mobj = re.search(_title, webpage) if mobj is not None: video_title = mobj.group(1) return { 'id': video_id, 'url': video_url, 'title': video_title, 'thumbnail': thumbnail, }
unlicense
DirtyPiece/dancestudio
Build/Tools/Python27/Tools/Scripts/diff.py
37
2024
#!/usr/bin/env python """ Command line interface to difflib.py providing diffs in four formats: * ndiff: lists every line and highlights interline changes. * context: highlights clusters of changes in a before/after format. * unified: highlights clusters of changes in an inline format. * html: generates side by side comparison with change highlights. """ import sys, os, time, difflib, optparse def main(): usage = "usage: %prog [options] fromfile tofile" parser = optparse.OptionParser(usage) parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)') parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff') parser.add_option("-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)') parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff') parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)') (options, args) = parser.parse_args() if len(args) == 0: parser.print_help() sys.exit(1) if len(args) != 2: parser.error("need to specify both a fromfile and tofile") n = options.lines fromfile, tofile = args fromdate = time.ctime(os.stat(fromfile).st_mtime) todate = time.ctime(os.stat(tofile).st_mtime) fromlines = open(fromfile, 'U').readlines() tolines = open(tofile, 'U').readlines() if options.u: diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n) elif options.n: diff = difflib.ndiff(fromlines, tolines) elif options.m: diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n) else: diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n) sys.stdout.writelines(diff) if __name__ == '__main__': main()
mit
Acidburn0zzz/servo
components/script/dom/bindings/codegen/parser/tests/test_constructor.py
8
15051
import WebIDL def WebIDLTest(parser, harness): def checkArgument(argument, QName, name, type, optional, variadic): harness.ok(isinstance(argument, WebIDL.IDLArgument), "Should be an IDLArgument") harness.check(argument.identifier.QName(), QName, "Argument has the right QName") harness.check(argument.identifier.name, name, "Argument has the right name") harness.check(str(argument.type), type, "Argument has the right return type") harness.check(argument.optional, optional, "Argument has the right optional value") harness.check(argument.variadic, variadic, "Argument has the right variadic value") def checkMethod(method, QName, name, signatures, static=True, getter=False, setter=False, deleter=False, legacycaller=False, stringifier=False, chromeOnly=False, htmlConstructor=False, secureContext=False, pref=None, func=None): harness.ok(isinstance(method, WebIDL.IDLMethod), "Should be an IDLMethod") harness.ok(method.isMethod(), "Method is a method") harness.ok(not method.isAttr(), "Method is not an attr") harness.ok(not method.isConst(), "Method is not a const") harness.check(method.identifier.QName(), QName, "Method has the right QName") harness.check(method.identifier.name, name, "Method has the right name") harness.check(method.isStatic(), static, "Method has the correct static value") harness.check(method.isGetter(), getter, "Method has the correct getter value") harness.check(method.isSetter(), setter, "Method has the correct setter value") harness.check(method.isDeleter(), deleter, "Method has the correct deleter value") harness.check(method.isLegacycaller(), legacycaller, "Method has the correct legacycaller value") harness.check(method.isStringifier(), stringifier, "Method has the correct stringifier value") harness.check(method.getExtendedAttribute("ChromeOnly") is not None, chromeOnly, "Method has the correct value for ChromeOnly") harness.check(method.isHTMLConstructor(), htmlConstructor, "Method has the correct htmlConstructor value") harness.check(len(method.signatures()), len(signatures), "Method has the correct number of signatures") harness.check(method.getExtendedAttribute("Pref"), pref, "Method has the correct pref value") harness.check(method.getExtendedAttribute("Func"), func, "Method has the correct func value") harness.check(method.getExtendedAttribute("SecureContext") is not None, secureContext, "Method has the correct SecureContext value") sigpairs = zip(method.signatures(), signatures) for (gotSignature, expectedSignature) in sigpairs: (gotRetType, gotArgs) = gotSignature (expectedRetType, expectedArgs) = expectedSignature harness.check(str(gotRetType), expectedRetType, "Method has the expected return type.") for i in range(0, len(gotArgs)): (QName, name, type, optional, variadic) = expectedArgs[i] checkArgument(gotArgs[i], QName, name, type, optional, variadic) def checkResults(results): harness.check(len(results), 3, "Should be three productions") harness.ok(isinstance(results[0], WebIDL.IDLInterface), "Should be an IDLInterface") harness.ok(isinstance(results[1], WebIDL.IDLInterface), "Should be an IDLInterface") harness.ok(isinstance(results[2], WebIDL.IDLInterface), "Should be an IDLInterface") checkMethod(results[0].ctor(), "::TestConstructorNoArgs::constructor", "constructor", [("TestConstructorNoArgs (Wrapper)", [])]) harness.check(len(results[0].members), 0, "TestConstructorNoArgs should not have members") checkMethod(results[1].ctor(), "::TestConstructorWithArgs::constructor", "constructor", [("TestConstructorWithArgs (Wrapper)", [("::TestConstructorWithArgs::constructor::name", "name", "String", False, False)])]) harness.check(len(results[1].members), 0, "TestConstructorWithArgs should not have members") checkMethod(results[2].ctor(), "::TestConstructorOverloads::constructor", "constructor", [("TestConstructorOverloads (Wrapper)", [("::TestConstructorOverloads::constructor::foo", "foo", "Object", False, False)]), ("TestConstructorOverloads (Wrapper)", [("::TestConstructorOverloads::constructor::bar", "bar", "Boolean", False, False)])]) harness.check(len(results[2].members), 0, "TestConstructorOverloads should not have members") parser.parse(""" interface TestConstructorNoArgs { constructor(); }; interface TestConstructorWithArgs { constructor(DOMString name); }; interface TestConstructorOverloads { constructor(object foo); constructor(boolean bar); }; """) results = parser.finish() checkResults(results) parser = parser.reset() parser.parse(""" interface TestPrefConstructor { [Pref="dom.webidl.test1"] constructor(); }; """) results = parser.finish() harness.check(len(results), 1, "Should be one production") harness.ok(isinstance(results[0], WebIDL.IDLInterface), "Should be an IDLInterface") checkMethod(results[0].ctor(), "::TestPrefConstructor::constructor", "constructor", [("TestPrefConstructor (Wrapper)", [])], pref=["dom.webidl.test1"]) parser = parser.reset() parser.parse(""" interface TestChromeOnlyConstructor { [ChromeOnly] constructor(); }; """) results = parser.finish() harness.check(len(results), 1, "Should be one production") harness.ok(isinstance(results[0], WebIDL.IDLInterface), "Should be an IDLInterface") checkMethod(results[0].ctor(), "::TestChromeOnlyConstructor::constructor", "constructor", [("TestChromeOnlyConstructor (Wrapper)", [])], chromeOnly=True) parser = parser.reset() parser.parse(""" interface TestSCConstructor { [SecureContext] constructor(); }; """) results = parser.finish() harness.check(len(results), 1, "Should be one production") harness.ok(isinstance(results[0], WebIDL.IDLInterface), "Should be an IDLInterface") checkMethod(results[0].ctor(), "::TestSCConstructor::constructor", "constructor", [("TestSCConstructor (Wrapper)", [])], secureContext=True) parser = parser.reset() parser.parse(""" interface TestFuncConstructor { [Func="Document::IsWebAnimationsEnabled"] constructor(); }; """) results = parser.finish() harness.check(len(results), 1, "Should be one production") harness.ok(isinstance(results[0], WebIDL.IDLInterface), "Should be an IDLInterface") checkMethod(results[0].ctor(), "::TestFuncConstructor::constructor", "constructor", [("TestFuncConstructor (Wrapper)", [])], func=["Document::IsWebAnimationsEnabled"]) parser = parser.reset() parser.parse(""" interface TestPrefChromeOnlySCFuncConstructor { [ChromeOnly, Pref="dom.webidl.test1", SecureContext, Func="Document::IsWebAnimationsEnabled"] constructor(); }; """) results = parser.finish() harness.check(len(results), 1, "Should be one production") harness.ok(isinstance(results[0], WebIDL.IDLInterface), "Should be an IDLInterface") checkMethod(results[0].ctor(), "::TestPrefChromeOnlySCFuncConstructor::constructor", "constructor", [("TestPrefChromeOnlySCFuncConstructor (Wrapper)", [])], func=["Document::IsWebAnimationsEnabled"], pref=["dom.webidl.test1"], chromeOnly=True, secureContext=True) parser = parser.reset() parser.parse(""" interface TestHTMLConstructor { [HTMLConstructor] constructor(); }; """) results = parser.finish() harness.check(len(results), 1, "Should be one production") harness.ok(isinstance(results[0], WebIDL.IDLInterface), "Should be an IDLInterface") checkMethod(results[0].ctor(), "::TestHTMLConstructor::constructor", "constructor", [("TestHTMLConstructor (Wrapper)", [])], htmlConstructor=True) parser = parser.reset() threw = False try: parser.parse(""" interface TestChromeOnlyConstructor { constructor() [ChromeOnly] constructor(DOMString a); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a constructor and a ChromeOnly constructor") # Test HTMLConstructor with argument parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorWithArgs { [HTMLConstructor] constructor(DOMString a); }; """) results = parser.finish() except: threw = True harness.ok(threw, "HTMLConstructor should take no argument") # Test HTMLConstructor on a callback interface parser = parser.reset() threw = False try: parser.parse(""" callback interface TestHTMLConstructorOnCallbackInterface { [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "HTMLConstructor can't be used on a callback interface") # Test HTMLConstructor and constructor operation parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorAndConstructor { constructor(); [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a constructor and a HTMLConstructor") parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorAndConstructor { [Throws] constructor(); [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a throwing constructor and a HTMLConstructor") parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorAndConstructor { constructor(DOMString a); [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a HTMLConstructor and a constructor operation") parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorAndConstructor { [Throws] constructor(DOMString a); [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a HTMLConstructor and a throwing constructor " "operation") # Test HTMLConstructor and [ChromeOnly] constructor operation parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorAndConstructor { [ChromeOnly] constructor(); [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a ChromeOnly constructor and a HTMLConstructor") parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorAndConstructor { [Throws, ChromeOnly] constructor(); [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a throwing chromeonly constructor and a " "HTMLConstructor") parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorAndConstructor { [ChromeOnly] constructor(DOMString a); [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a HTMLConstructor and a chromeonly constructor " "operation") parser = parser.reset() threw = False try: parser.parse(""" interface TestHTMLConstructorAndConstructor { [Throws, ChromeOnly] constructor(DOMString a); [HTMLConstructor] constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have both a HTMLConstructor and a throwing chromeonly " "constructor operation") parser = parser.reset() threw = False try: parser.parse(""" [NoInterfaceObject] interface InterfaceWithoutInterfaceObject { constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have a constructor operation on a [NoInterfaceObject] " "interface") parser = parser.reset() threw = False try: parser.parse(""" interface InterfaceWithPartial { }; partial interface InterfaceWithPartial { constructor(); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have a constructor operation on a partial interface") parser = parser.reset() threw = False try: parser.parse(""" interface InterfaceWithMixin { }; interface mixin Mixin { constructor(); }; InterfaceWithMixin includes Mixin """) results = parser.finish() except: threw = True harness.ok(threw, "Can't have a constructor operation on a mixin")
mpl-2.0
tedi3231/openerp
build/lib/openerp/addons/l10n_in/__openerp__.py
87
1809
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Indian - Accounting', 'version': '1.0', 'description': """ Indian Accounting: Chart of Account. ==================================== Indian accounting chart and localization. """, 'author': ['OpenERP SA', 'Axelor'], 'category': 'Localization/Account Charts', 'depends': [ 'account', 'account_chart' ], 'demo': [], 'data': [ 'l10n_in_tax_code_template.xml', 'l10n_in_public_chart.xml', 'l10n_in_public_tax_template.xml', 'l10n_in_private_chart.xml', 'l10n_in_private_tax_template.xml', 'l10n_in_wizard.xml', ], 'auto_install': False, 'installable': True, 'images': ['images/config_chart_l10n_in.jpeg','images/l10n_in_chart.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Aaron1992/v2ex
html5lib/utils.py
104
4959
try: frozenset except NameError: #Import from the sets module for python 2.3 from sets import Set as set from sets import ImmutableSet as frozenset class MethodDispatcher(dict): """Dict with 2 special properties: On initiation, keys that are lists, sets or tuples are converted to multiple keys so accessing any one of the items in the original list-like object returns the matching value md = MethodDispatcher({("foo", "bar"):"baz"}) md["foo"] == "baz" A default value which can be set through the default attribute. """ def __init__(self, items=()): # Using _dictEntries instead of directly assigning to self is about # twice as fast. Please do careful performance testing before changing # anything here. _dictEntries = [] for name,value in items: if type(name) in (list, tuple, frozenset, set): for item in name: _dictEntries.append((item, value)) else: _dictEntries.append((name, value)) dict.__init__(self, _dictEntries) self.default = None def __getitem__(self, key): return dict.get(self, key, self.default) #Pure python implementation of deque taken from the ASPN Python Cookbook #Original code by Raymond Hettinger class deque(object): def __init__(self, iterable=(), maxsize=-1): if not hasattr(self, 'data'): self.left = self.right = 0 self.data = {} self.maxsize = maxsize self.extend(iterable) def append(self, x): self.data[self.right] = x self.right += 1 if self.maxsize != -1 and len(self) > self.maxsize: self.popleft() def appendleft(self, x): self.left -= 1 self.data[self.left] = x if self.maxsize != -1 and len(self) > self.maxsize: self.pop() def pop(self): if self.left == self.right: raise IndexError('cannot pop from empty deque') self.right -= 1 elem = self.data[self.right] del self.data[self.right] return elem def popleft(self): if self.left == self.right: raise IndexError('cannot pop from empty deque') elem = self.data[self.left] del self.data[self.left] self.left += 1 return elem def clear(self): self.data.clear() self.left = self.right = 0 def extend(self, iterable): for elem in iterable: self.append(elem) def extendleft(self, iterable): for elem in iterable: self.appendleft(elem) def rotate(self, n=1): if self: n %= len(self) for i in xrange(n): self.appendleft(self.pop()) def __getitem__(self, i): if i < 0: i += len(self) try: return self.data[i + self.left] except KeyError: raise IndexError def __setitem__(self, i, value): if i < 0: i += len(self) try: self.data[i + self.left] = value except KeyError: raise IndexError def __delitem__(self, i): size = len(self) if not (-size <= i < size): raise IndexError data = self.data if i < 0: i += size for j in xrange(self.left+i, self.right-1): data[j] = data[j+1] self.pop() def __len__(self): return self.right - self.left def __cmp__(self, other): if type(self) != type(other): return cmp(type(self), type(other)) return cmp(list(self), list(other)) def __repr__(self, _track=[]): if id(self) in _track: return '...' _track.append(id(self)) r = 'deque(%r)' % (list(self),) _track.remove(id(self)) return r def __getstate__(self): return (tuple(self),) def __setstate__(self, s): self.__init__(s[0]) def __hash__(self): raise TypeError def __copy__(self): return self.__class__(self) def __deepcopy__(self, memo={}): from copy import deepcopy result = self.__class__() memo[id(self)] = result result.__init__(deepcopy(tuple(self), memo)) return result #Some utility functions to dal with weirdness around UCS2 vs UCS4 #python builds def encodingType(): if len() == 2: return "UCS2" else: return "UCS4" def isSurrogatePair(data): return (len(data) == 2 and ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF) def surrogatePairToCodepoint(data): char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 + (ord(data[1]) - 0xDC00)) return char_val
bsd-3-clause
retrography/scancode-toolkit
src/commoncode/fileset.py
12
6660
# # Copyright (c) 2015 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import print_function, absolute_import import fnmatch import logging import os from commoncode import fileutils from commoncode import paths DEBUG = False logger = logging.getLogger(__name__) # import sys # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) # logger.setLevel(logging.DEBUG) """ Match files and directories paths based on inclusion and exclusion glob-style patterns. For example, this can be used to skip files that match ignore patterns, similar to a version control ignore files such as .gitignore. The pattern syntax is the same as fnmatch(5) as implemented in Python. Patterns are applied to a path this way: - Paths are converted to POSIX paths before matching. - Patterns are NOT case-sensitive. - Leading slashes are ignored. - If the pattern contains a /, then the whole path must be matched; otherwise, the pattern matches if any path segment matches. - When matched, a directory content is matched recursively. For instance, when using patterns for ignoring, a matched a directory will be ignore with its file and sub-directories at full depth. - The order of patterns does not matter. - Exclusion patterns are prefixed with an exclamation mark (band or !) meaning that matched paths by that pattern will be excluded. Exclusions have precedences of inclusions. - Patterns starting with # are comments and skipped. use [#] for a literal #. - to match paths relative to some root path, you must design your patterns and the path tested accordingly. This module does not handles this. Patterns may include glob wildcards such as: - ? : matches any single character. - * : matches 0 or more characters. - [seq] : matches any character in seq - [!seq] :matches any character not in seq For a literal match, wrap the meta-characters in brackets. For example, '[?]' matches the character '?'. """ def match(path, includes, excludes): """ Return a tuple of two strings if `path` is matched or False if it does not. Matching is done based on the set of `includes` and `excludes` patterns maps. The returned tuple contains these two strings: pattern matched and associated message. The message explains why a path is included when matched. The message is always a string (possibly empty). `includes` and `excludes` are maps of (fnmtch pattern -> message). The order of the includes and excludes items does not matter. If one is empty, it is not used for matching. If the `path` is empty, return False. """ includes = includes or {} excludes = excludes or {} if not path or not path.strip(): return False included = _match(path, includes) excluded = _match(path, excludes) if DEBUG: logger.debug('in_fileset: path: %(path)r included:%(included)r, ' 'excluded:%(excluded)r .' % locals()) if excluded: return False elif included: return included else: return False def _match(path, patterns): """ Return a message if `path` is matched by a pattern from the `patterns` map or False. """ if not path or not patterns: return False path = fileutils.as_posixpath(path).lower() pathstripped = path.lstrip('/') if not pathstripped: return False segments = paths.split(pathstripped) if DEBUG: logger.debug('_match: path: %(path)r patterns:%(patterns)r.' % locals()) mtch = False for pat, msg in patterns.items(): if not pat and not pat.strip(): continue msg = msg or '' pat = pat.lstrip('/').lower() is_plain = '/' not in pat if is_plain: if any(fnmatch.fnmatchcase(s, pat) for s in segments): mtch = msg break elif (fnmatch.fnmatchcase(path, pat) or fnmatch.fnmatchcase(pathstripped, pat)): mtch = msg break if DEBUG: logger.debug('_match: match is %(mtch)r' % locals()) return mtch def load(location): """ Return a sequence of patterns from a file at location. """ if not location: return tuple() fn = os.path.abspath(os.path.normpath(os.path.expanduser(location))) msg = ('File %(location)s does not exist or not a file.') % locals() assert (os.path.exists(fn) and os.path.isfile(fn)), msg with open(fn, 'rb') as f: return [l.strip() for l in f if l and l.strip()] def includes_excludes(patterns, message): """ Return a dict of included patterns and a dict of excluded patterns from a sequence of `patterns` strings and a `message` setting the message as value in the returned mappings. Ignore pattern as comments if prefixed with #. Use an empty string is message is None. """ message = message or '' BANG = '!' DASH = '#' included = {} excluded = {} if not patterns: return included, excluded for pat in patterns: pat = pat.strip() if not pat or pat.startswith(DASH): continue if pat.startswith(BANG): cpat = pat.lstrip(BANG) if cpat: excluded[cpat] = message continue else: included.add[pat] = message return included, excluded
apache-2.0
edl00k/omim
3party/protobuf/gtest/test/gtest_catch_exceptions_test.py
403
9422
#!/usr/bin/env python # # Copyright 2010 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's exception catching behavior. This script invokes gtest_catch_exceptions_test_ and gtest_catch_exceptions_ex_test_ (programs written with Google Test) and verifies their output. """ __author__ = 'vladl@google.com (Vlad Losev)' import os import gtest_test_utils # Constants. FLAG_PREFIX = '--gtest_' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0' FILTER_FLAG = FLAG_PREFIX + 'filter' # Path to the gtest_catch_exceptions_ex_test_ binary, compiled with # exceptions enabled. EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_catch_exceptions_ex_test_') # Path to the gtest_catch_exceptions_test_ binary, compiled with # exceptions disabled. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_catch_exceptions_no_ex_test_') TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST if SUPPORTS_SEH_EXCEPTIONS: BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output # The tests. if SUPPORTS_SEH_EXCEPTIONS: # pylint:disable-msg=C6302 class CatchSehExceptionsTest(gtest_test_utils.TestCase): """Tests exception-catching behavior.""" def TestSehExceptions(self, test_output): self.assert_('SEH exception with code 0x2a thrown ' 'in the test fixture\'s constructor' in test_output) self.assert_('SEH exception with code 0x2a thrown ' 'in the test fixture\'s destructor' in test_output) self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()' in test_output) self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()' in test_output) self.assert_('SEH exception with code 0x2a thrown in SetUp()' in test_output) self.assert_('SEH exception with code 0x2a thrown in TearDown()' in test_output) self.assert_('SEH exception with code 0x2a thrown in the test body' in test_output) def testCatchesSehExceptionsWithCxxExceptionsEnabled(self): self.TestSehExceptions(EX_BINARY_OUTPUT) def testCatchesSehExceptionsWithCxxExceptionsDisabled(self): self.TestSehExceptions(BINARY_OUTPUT) class CatchCxxExceptionsTest(gtest_test_utils.TestCase): """Tests C++ exception-catching behavior. Tests in this test case verify that: * C++ exceptions are caught and logged as C++ (not SEH) exceptions * Exception thrown affect the remainder of the test work flow in the expected manner. """ def testCatchesCxxExceptionsInFixtureConstructor(self): self.assert_('C++ exception with description ' '"Standard C++ exception" thrown ' 'in the test fixture\'s constructor' in EX_BINARY_OUTPUT) self.assert_('unexpected' not in EX_BINARY_OUTPUT, 'This failure belongs in this test only if ' '"CxxExceptionInConstructorTest" (no quotes) ' 'appears on the same line as words "called unexpectedly"') if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in EX_BINARY_OUTPUT): def testCatchesCxxExceptionsInFixtureDestructor(self): self.assert_('C++ exception with description ' '"Standard C++ exception" thrown ' 'in the test fixture\'s destructor' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInSetUpTestCase(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in SetUpTestCase()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest constructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest test body ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInTearDownTestCase(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in TearDownTestCase()' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInSetUp(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in SetUp()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('unexpected' not in EX_BINARY_OUTPUT, 'This failure belongs in this test only if ' '"CxxExceptionInSetUpTest" (no quotes) ' 'appears on the same line as words "called unexpectedly"') def testCatchesCxxExceptionsInTearDown(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in TearDown()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTearDownTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInTestBody(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in the test body' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesNonStdCxxExceptions(self): self.assert_('Unknown C++ exception thrown in the test body' in EX_BINARY_OUTPUT) def testUnhandledCxxExceptionsAbortTheProgram(self): # Filters out SEH exception tests on Windows. Unhandled SEH exceptions # cause tests to show pop-up windows there. FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*' # By default, Google Test doesn't catch the exceptions. uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess( [EX_EXE_PATH, NO_CATCH_EXCEPTIONS_FLAG, FITLER_OUT_SEH_TESTS_FLAG]).output self.assert_('Unhandled C++ exception terminating the program' in uncaught_exceptions_ex_binary_output) self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output) if __name__ == '__main__': gtest_test_utils.Main()
apache-2.0
bratsche/Neutron-Drive
google_appengine/lib/django_1_2/tests/regressiontests/forms/localflavor/es.py
87
6531
from django.contrib.localflavor.es.forms import (ESPostalCodeField, ESPhoneNumberField, ESIdentityCardNumberField, ESCCCField, ESRegionSelect, ESProvinceSelect) from utils import LocalFlavorTestCase class ESLocalFlavorTests(LocalFlavorTestCase): def test_ESRegionSelect(self): f = ESRegionSelect() out = u'''<select name="regions"> <option value="AN">Andalusia</option> <option value="AR">Aragon</option> <option value="O">Principality of Asturias</option> <option value="IB">Balearic Islands</option> <option value="PV">Basque Country</option> <option value="CN">Canary Islands</option> <option value="S">Cantabria</option> <option value="CM">Castile-La Mancha</option> <option value="CL">Castile and Leon</option> <option value="CT" selected="selected">Catalonia</option> <option value="EX">Extremadura</option> <option value="GA">Galicia</option> <option value="LO">La Rioja</option> <option value="M">Madrid</option> <option value="MU">Region of Murcia</option> <option value="NA">Foral Community of Navarre</option> <option value="VC">Valencian Community</option> </select>''' self.assertEqual(f.render('regions', 'CT'), out) def test_ESProvinceSelect(self): f = ESProvinceSelect() out = u'''<select name="provinces"> <option value="01">Arava</option> <option value="02">Albacete</option> <option value="03">Alacant</option> <option value="04">Almeria</option> <option value="05">Avila</option> <option value="06">Badajoz</option> <option value="07">Illes Balears</option> <option value="08" selected="selected">Barcelona</option> <option value="09">Burgos</option> <option value="10">Caceres</option> <option value="11">Cadiz</option> <option value="12">Castello</option> <option value="13">Ciudad Real</option> <option value="14">Cordoba</option> <option value="15">A Coruna</option> <option value="16">Cuenca</option> <option value="17">Girona</option> <option value="18">Granada</option> <option value="19">Guadalajara</option> <option value="20">Guipuzkoa</option> <option value="21">Huelva</option> <option value="22">Huesca</option> <option value="23">Jaen</option> <option value="24">Leon</option> <option value="25">Lleida</option> <option value="26">La Rioja</option> <option value="27">Lugo</option> <option value="28">Madrid</option> <option value="29">Malaga</option> <option value="30">Murcia</option> <option value="31">Navarre</option> <option value="32">Ourense</option> <option value="33">Asturias</option> <option value="34">Palencia</option> <option value="35">Las Palmas</option> <option value="36">Pontevedra</option> <option value="37">Salamanca</option> <option value="38">Santa Cruz de Tenerife</option> <option value="39">Cantabria</option> <option value="40">Segovia</option> <option value="41">Seville</option> <option value="42">Soria</option> <option value="43">Tarragona</option> <option value="44">Teruel</option> <option value="45">Toledo</option> <option value="46">Valencia</option> <option value="47">Valladolid</option> <option value="48">Bizkaia</option> <option value="49">Zamora</option> <option value="50">Zaragoza</option> <option value="51">Ceuta</option> <option value="52">Melilla</option> </select>''' self.assertEqual(f.render('provinces', '08'), out) def test_ESPostalCodeField(self): error_invalid = [u'Enter a valid postal code in the range and format 01XXX - 52XXX.'] valid = { '08028': '08028', '28080': '28080', } invalid = { '53001': error_invalid, '0801': error_invalid, '080001': error_invalid, '00999': error_invalid, '08 01': error_invalid, '08A01': error_invalid, } self.assertFieldOutput(ESPostalCodeField, valid, invalid) def test_ESPhoneNumberField(self): error_invalid = [u'Enter a valid phone number in one of the formats 6XXXXXXXX, 8XXXXXXXX or 9XXXXXXXX.'] valid = { '650010101': '650010101', '931234567': '931234567', '800123123': '800123123', } invalid = { '555555555': error_invalid, '789789789': error_invalid, '99123123': error_invalid, '9999123123': error_invalid, } self.assertFieldOutput(ESPhoneNumberField, valid, invalid) def test_ESIdentityCardNumberField(self): error_invalid = [u'Please enter a valid NIF, NIE, or CIF.'] error_checksum_nif = [u'Invalid checksum for NIF.'] error_checksum_nie = [u'Invalid checksum for NIE.'] error_checksum_cif = [u'Invalid checksum for CIF.'] valid = { '78699688J': '78699688J', '78699688-J': '78699688J', '78699688 J': '78699688J', '78699688 j': '78699688J', 'X0901797J': 'X0901797J', 'X-6124387-Q': 'X6124387Q', 'X 0012953 G': 'X0012953G', 'x-3287690-r': 'X3287690R', 't-03287690r': 'T03287690R', 'P2907500I': 'P2907500I', 'B38790911': 'B38790911', 'B31234560': 'B31234560', 'B-3879091A': 'B3879091A', 'B 38790911': 'B38790911', 'P-3900800-H': 'P3900800H', 'P 39008008': 'P39008008', 'C-28795565': 'C28795565', 'C 2879556E': 'C2879556E', } invalid = { '78699688T': error_checksum_nif, 'X-03287690': error_invalid, 'X-03287690-T': error_checksum_nie, 'B 38790917': error_checksum_cif, 'C28795567': error_checksum_cif, 'I38790911': error_invalid, '78699688-2': error_invalid, } self.assertFieldOutput(ESIdentityCardNumberField, valid, invalid) def test_ESCCCField(self): error_invalid = [u'Please enter a valid bank account number in format XXXX-XXXX-XX-XXXXXXXXXX.'] error_checksum = [u'Invalid checksum for bank account number.'] valid = { '20770338793100254321': '20770338793100254321', '2077 0338 79 3100254321': '2077 0338 79 3100254321', '2077-0338-79-3100254321': '2077-0338-79-3100254321', } invalid = { '2077.0338.79.3100254321': error_invalid, '2077-0338-78-3100254321': error_checksum, '2077-0338-89-3100254321': error_checksum, '2077-03-3879-3100254321': error_invalid, } self.assertFieldOutput(ESCCCField, valid, invalid)
bsd-3-clause
cchurch/ansible
lib/ansible/modules/network/ios/ios_static_route.py
15
9688
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # # This file is part of Ansible by Red Hat # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: ios_static_route version_added: "2.4" author: "Ricardo Carrillo Cruz (@rcarrillocruz)" short_description: Manage static IP routes on Cisco IOS network devices description: - This module provides declarative management of static IP routes on Cisco IOS network devices. notes: - Tested against IOS 15.6 options: prefix: description: - Network prefix of the static route. mask: description: - Network prefix mask of the static route. next_hop: description: - Next hop IP of the static route. vrf: description: - VRF of the static route. version_added: "2.8" interface: description: - Interface of the static route. version_added: "2.8" name: description: - Name of the static route aliases: ['description'] version_added: "2.8" admin_distance: description: - Admin distance of the static route. tag: description: - Set tag of the static route. version_added: "2.8" track: description: - Tracked item to depend on for the static route. version_added: "2.8" aggregate: description: List of static route definitions. state: description: - State of the static route configuration. default: present choices: ['present', 'absent'] extends_documentation_fragment: ios """ EXAMPLES = """ - name: configure static route ios_static_route: prefix: 192.168.2.0 mask: 255.255.255.0 next_hop: 10.0.0.1 - name: configure black hole in vrf blue depending on tracked item 10 ios_static_route: prefix: 192.168.2.0 mask: 255.255.255.0 vrf: blue interface: null0 track: 10 - name: configure ultimate route with name and tag ios_static_route: prefix: 192.168.2.0 mask: 255.255.255.0 interface: GigabitEthernet1 name: hello world tag: 100 - name: remove configuration ios_static_route: prefix: 192.168.2.0 mask: 255.255.255.0 next_hop: 10.0.0.1 state: absent - name: Add static route aggregates ios_static_route: aggregate: - { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } - { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } - name: Remove static route aggregates ios_static_route: aggregate: - { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } - { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 } state: absent """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always type: list sample: - ip route 192.168.2.0 255.255.255.0 10.0.0.1 """ from copy import deepcopy from re import findall from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.utils import remove_default_spec, validate_ip_address from ansible.module_utils.network.ios.ios import get_config, load_config from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args def map_obj_to_commands(want, have): commands = list() for w in want: state = w['state'] del w['state'] # Try to match an existing config with the desired config for h in have: # To delete admin_distance param from have if not it want before comparing both fields if not w.get('admin_distance') and h.get('admin_distance'): del h['admin_distance'] diff = list(set(w.items()) ^ set(h.items())) if not diff: break # if route is present with name or name already starts with wanted name it will not change elif len(diff) == 2 and diff[0][0] == diff[1][0] == 'name' and (not w['name'] or h['name'].startswith(w['name'])): break # If no matches found, clear `h` else: h = None command = 'ip route' prefix = w['prefix'] mask = w['mask'] vrf = w.get('vrf') if vrf: command = ' '.join((command, 'vrf', vrf, prefix, mask)) else: command = ' '.join((command, prefix, mask)) for key in ['interface', 'next_hop', 'admin_distance', 'tag', 'name', 'track']: if w.get(key): if key == 'name' and len(w.get(key).split()) > 1: command = ' '.join((command, key, '"%s"' % w.get(key))) # name with multiple words needs to be quoted elif key in ('name', 'tag', 'track'): command = ' '.join((command, key, w.get(key))) else: command = ' '.join((command, w.get(key))) if state == 'absent' and h: commands.append('no %s' % command) elif state == 'present' and not h: commands.append(command) return commands def map_config_to_obj(module): obj = [] out = get_config(module, flags='| include ip route') for line in out.splitlines(): splitted_line = findall(r'[^"\s]\S*|".+?"', line) # Split by whitespace but do not split quotes, needed for name parameter if splitted_line[2] == 'vrf': route = {'vrf': splitted_line[3]} del splitted_line[:4] # Removes the words ip route vrf vrf_name else: route = {} del splitted_line[:2] # Removes the words ip route prefix = splitted_line[0] mask = splitted_line[1] route.update({'prefix': prefix, 'mask': mask, 'admin_distance': '1'}) next_word = None for word in splitted_line[2:]: if next_word: route[next_word] = word.strip('"') # Remove quotes which is needed for name next_word = None elif validate_ip_address(word): route.update(next_hop=word) elif word.isdigit(): route.update(admin_distance=word) elif word in ('tag', 'name', 'track'): next_word = word else: route.update(interface=word) obj.append(route) return obj def map_params_to_obj(module, required_together=None): keys = ['prefix', 'mask', 'state', 'next_hop', 'vrf', 'interface', 'name', 'admin_distance', 'track', 'tag'] obj = [] aggregate = module.params.get('aggregate') if aggregate: for item in aggregate: route = item.copy() for key in keys: if route.get(key) is None: route[key] = module.params.get(key) route = dict((k, v) for k, v in route.items() if v is not None) module._check_required_together(required_together, route) obj.append(route) else: module._check_required_together(required_together, module.params) route = dict() for key in keys: if module.params.get(key) is not None: route[key] = module.params.get(key) obj.append(route) return obj def main(): """ main entry point for module execution """ element_spec = dict( prefix=dict(type='str'), mask=dict(type='str'), next_hop=dict(type='str'), vrf=dict(type='str'), interface=dict(type='str'), name=dict(type='str', aliases=['description']), admin_distance=dict(type='str'), track=dict(type='str'), tag=dict(tag='str'), state=dict(default='present', choices=['present', 'absent']) ) aggregate_spec = deepcopy(element_spec) aggregate_spec['prefix'] = dict(required=True) # remove default in aggregate spec, to handle common arguments remove_default_spec(aggregate_spec) argument_spec = dict( aggregate=dict(type='list', elements='dict', options=aggregate_spec), ) argument_spec.update(element_spec) argument_spec.update(ios_argument_spec) required_one_of = [['aggregate', 'prefix']] required_together = [['prefix', 'mask']] mutually_exclusive = [['aggregate', 'prefix']] module = AnsibleModule(argument_spec=argument_spec, required_one_of=required_one_of, mutually_exclusive=mutually_exclusive, supports_check_mode=True) warnings = list() check_args(module, warnings) result = {'changed': False} if warnings: result['warnings'] = warnings want = map_params_to_obj(module, required_together=required_together) have = map_config_to_obj(module) commands = map_obj_to_commands(want, have) result['commands'] = commands if commands: if not module.check_mode: load_config(module, commands) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
2013Commons/HUE-SHARK
apps/filebrowser/src/filebrowser/lib/rwx.py
4
2152
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Utilities for dealing with file modes. import stat def filetype(mode): """ Returns "dir" or "file" according to what type path is. @param mode: file mode from "stat" command. """ if stat.S_ISLNK(mode): return "link" elif stat.S_ISDIR(mode): return "dir" elif stat.S_ISREG(mode): return "file" else: return "unknown" def rwxtype(mode): """ Returns l/d/-/? for use in "rwx" style strings. """ if stat.S_ISLNK(mode): return "l" elif stat.S_ISDIR(mode): return "d" elif stat.S_ISREG(mode): return "-" else: return "?" BITS = (stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR, stat.S_IRGRP, stat.S_IWGRP, stat.S_IXGRP, stat.S_IROTH, stat.S_IWOTH, stat.S_IXOTH, stat.S_ISVTX) def expand_mode(mode): return map(lambda y: bool(mode & y), BITS) def compress_mode(tup): mode = 0 for b, n in zip(tup, BITS): if b: mode += n return mode def rwx(mode): """ Returns "rwx"-style string like that ls would give you. I couldn't find much extant code along these lines; this is similar in spirit to the google-able "pathinfo.py". """ bools = expand_mode(mode) s = list("rwxrwxrwxt") for (i, v) in enumerate(bools[:-1]): if not v: s[i] = "-" # Sticky bit should either be 't' or no char. if not bools[-1]: s = s[:-1] return rwxtype(mode) + "".join(s)
apache-2.0
gtko/Sick-Beard
lib/hachoir_metadata/misc.py
90
8936
from lib.hachoir_metadata.metadata import RootMetadata, registerExtractor from lib.hachoir_metadata.safe import fault_tolerant from lib.hachoir_parser.container import SwfFile from lib.hachoir_parser.misc import TorrentFile, TrueTypeFontFile, OLE2_File, PcfFile from lib.hachoir_core.field import isString from lib.hachoir_core.error import warning from lib.hachoir_parser import guessParser from lib.hachoir_metadata.setter import normalizeString class TorrentMetadata(RootMetadata): KEY_TO_ATTR = { u"announce": "url", u"comment": "comment", u"creation_date": "creation_date", } INFO_TO_ATTR = { u"length": "file_size", u"name": "filename", } def extract(self, torrent): for field in torrent[0]: self.processRoot(field) @fault_tolerant def processRoot(self, field): if field.name in self.KEY_TO_ATTR: key = self.KEY_TO_ATTR[field.name] value = field.value setattr(self, key, value) elif field.name == "info" and "value" in field: for field in field["value"]: self.processInfo(field) @fault_tolerant def processInfo(self, field): if field.name in self.INFO_TO_ATTR: key = self.INFO_TO_ATTR[field.name] value = field.value setattr(self, key, value) elif field.name == "piece_length": self.comment = "Piece length: %s" % field.display class TTF_Metadata(RootMetadata): NAMEID_TO_ATTR = { 0: "copyright", # Copyright notice 3: "title", # Unique font identifier 5: "version", # Version string 8: "author", # Manufacturer name 11: "url", # URL Vendor 14: "copyright", # License info URL } def extract(self, ttf): if "header" in ttf: self.extractHeader(ttf["header"]) if "names" in ttf: self.extractNames(ttf["names"]) @fault_tolerant def extractHeader(self, header): self.creation_date = header["created"].value self.last_modification = header["modified"].value self.comment = u"Smallest readable size in pixels: %s pixels" % header["lowest"].value self.comment = u"Font direction: %s" % header["font_dir"].display @fault_tolerant def extractNames(self, names): offset = names["offset"].value for header in names.array("header"): key = header["nameID"].value foffset = offset + header["offset"].value field = names.getFieldByAddress(foffset*8) if not field or not isString(field): continue value = field.value if key not in self.NAMEID_TO_ATTR: continue key = self.NAMEID_TO_ATTR[key] if key == "version" and value.startswith(u"Version "): # "Version 1.2" => "1.2" value = value[8:] setattr(self, key, value) class OLE2_Metadata(RootMetadata): SUMMARY_ID_TO_ATTR = { 2: "title", # Title 3: "title", # Subject 4: "author", 6: "comment", 8: "author", # Last saved by 12: "creation_date", 13: "last_modification", 14: "nb_page", 18: "producer", } IGNORE_SUMMARY = set(( 1, # Code page )) DOC_SUMMARY_ID_TO_ATTR = { 3: "title", # Subject 14: "author", # Manager } IGNORE_DOC_SUMMARY = set(( 1, # Code page )) def extract(self, ole2): self._extract(ole2) def _extract(self, fieldset, main_document=True): if main_document: # _feedAll() is needed to make sure that we get all root[*] fragments fieldset._feedAll() if "root[0]" in fieldset: self.useRoot(fieldset["root[0]"]) doc_summary = self.getField(fieldset, main_document, "doc_summary[0]") if doc_summary: self.useSummary(doc_summary, True) word_doc = self.getField(fieldset, main_document, "word_doc[0]") if word_doc: self.useWordDocument(word_doc) summary = self.getField(fieldset, main_document, "summary[0]") if summary: self.useSummary(summary, False) @fault_tolerant def useRoot(self, root): stream = root.getSubIStream() ministream = guessParser(stream) if not ministream: warning("Unable to create the OLE2 mini stream parser!") return self._extract(ministream, main_document=False) def getField(self, fieldset, main_document, name): if name not in fieldset: return None # _feedAll() is needed to make sure that we get all fragments # eg. summary[0], summary[1], ..., summary[n] fieldset._feedAll() field = fieldset[name] if main_document: stream = field.getSubIStream() field = guessParser(stream) if not field: warning("Unable to create the OLE2 parser for %s!" % name) return None return field @fault_tolerant def useSummary(self, summary, is_doc_summary): if "os" in summary: self.os = summary["os"].display if "section[0]" not in summary: return summary = summary["section[0]"] for property in summary.array("property_index"): self.useProperty(summary, property, is_doc_summary) @fault_tolerant def useWordDocument(self, doc): self.comment = "Encrypted: %s" % doc["fEncrypted"].value @fault_tolerant def useProperty(self, summary, property, is_doc_summary): field = summary.getFieldByAddress(property["offset"].value*8) if not field \ or "value" not in field: return field = field["value"] if not field.hasValue(): return # Get value value = field.value if isinstance(value, (str, unicode)): value = normalizeString(value) if not value: return # Get property identifier prop_id = property["id"].value if is_doc_summary: id_to_attr = self.DOC_SUMMARY_ID_TO_ATTR ignore = self.IGNORE_DOC_SUMMARY else: id_to_attr = self.SUMMARY_ID_TO_ATTR ignore = self.IGNORE_SUMMARY if prop_id in ignore: return # Get Hachoir metadata key try: key = id_to_attr[prop_id] use_prefix = False except LookupError: key = "comment" use_prefix = True if use_prefix: prefix = property["id"].display if (prefix in ("TotalEditingTime", "LastPrinted")) \ and (not field): # Ignore null time delta return value = "%s: %s" % (prefix, value) else: if (key == "last_modification") and (not field): # Ignore null timestamp return setattr(self, key, value) class PcfMetadata(RootMetadata): PROP_TO_KEY = { 'CHARSET_REGISTRY': 'charset', 'COPYRIGHT': 'copyright', 'WEIGHT_NAME': 'font_weight', 'FOUNDRY': 'author', 'FONT': 'title', '_XMBDFED_INFO': 'producer', } def extract(self, pcf): if "properties" in pcf: self.useProperties(pcf["properties"]) def useProperties(self, properties): last = properties["total_str_length"] offset0 = last.address + last.size for index in properties.array("property"): # Search name and value value = properties.getFieldByAddress(offset0+index["value_offset"].value*8) if not value: continue value = value.value if not value: continue name = properties.getFieldByAddress(offset0+index["name_offset"].value*8) if not name: continue name = name.value if name not in self.PROP_TO_KEY: warning("Skip %s=%r" % (name, value)) continue key = self.PROP_TO_KEY[name] setattr(self, key, value) class SwfMetadata(RootMetadata): def extract(self, swf): self.height = swf["rect/ymax"].value # twips self.width = swf["rect/xmax"].value # twips self.format_version = "flash version %s" % swf["version"].value self.frame_rate = swf["frame_rate"].value self.comment = "Frame count: %s" % swf["frame_count"].value registerExtractor(TorrentFile, TorrentMetadata) registerExtractor(TrueTypeFontFile, TTF_Metadata) registerExtractor(OLE2_File, OLE2_Metadata) registerExtractor(PcfFile, PcfMetadata) registerExtractor(SwfFile, SwfMetadata)
gpl-3.0
ant31/kpm
kpm/commands/kexec.py
1
1587
from kpm.console import KubernetesExec from kpm.commands.command_base import CommandBase class ExecCmd(CommandBase): name = 'exec' help_message = "exec a command in pod from the RC or RS name.\ It executes the command on the first matching pod'" def __init__(self, options): self.output = options.output self.kind = options.kind self.container = options.container self.namespace = options.namespace self.resource = options.name self.cmd = options.cmd self.result = None super(ExecCmd, self).__init__(options) @classmethod def _add_arguments(self, parser): parser.add_argument('cmd', nargs='+', help="command to execute") parser.add_argument("--namespace", nargs="?", help="kubernetes namespace", default='default') parser.add_argument('-k', '--kind', choices=['deployment', 'rs', 'rc'], nargs="?", help="deployment, rc or rs", default='rc') parser.add_argument('-n', '--name', help="resource name", default='rs') parser.add_argument('-c', '--container', nargs='?', help="container name", default=None) def _call(self): c = KubernetesExec(self.resource, cmd=" ".join(self.cmd), namespace=self.namespace, container=self.container, kind=self.kind) self.result = c.call() def _render_json(self): pass def _render_console(self): print self.result
apache-2.0
sunjeammy/tornado
tornado/test/simple_httpclient_test.py
13
22722
from __future__ import absolute_import, division, print_function, with_statement import collections from contextlib import closing import errno import gzip import logging import os import re import socket import sys from tornado import gen from tornado.httpclient import AsyncHTTPClient from tornado.httputil import HTTPHeaders from tornado.ioloop import IOLoop from tornado.log import gen_log from tornado.netutil import Resolver, bind_sockets from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler from tornado.test import httpclient_test from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog from tornado.test.util import skipOnTravis, skipIfNoIPv6 from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase): def get_http_client(self): client = SimpleAsyncHTTPClient(io_loop=self.io_loop, force_instance=True) self.assertTrue(isinstance(client, SimpleAsyncHTTPClient)) return client class TriggerHandler(RequestHandler): def initialize(self, queue, wake_callback): self.queue = queue self.wake_callback = wake_callback @asynchronous def get(self): logging.debug("queuing trigger") self.queue.append(self.finish) if self.get_argument("wake", "true") == "true": self.wake_callback() class HangHandler(RequestHandler): @asynchronous def get(self): pass class ContentLengthHandler(RequestHandler): def get(self): self.set_header("Content-Length", self.get_argument("value")) self.write("ok") class HeadHandler(RequestHandler): def head(self): self.set_header("Content-Length", "7") class OptionsHandler(RequestHandler): def options(self): self.set_header("Access-Control-Allow-Origin", "*") self.write("ok") class NoContentHandler(RequestHandler): def get(self): if self.get_argument("error", None): self.set_header("Content-Length", "5") self.write("hello") self.set_status(204) class SeeOtherPostHandler(RequestHandler): def post(self): redirect_code = int(self.request.body) assert redirect_code in (302, 303), "unexpected body %r" % self.request.body self.set_header("Location", "/see_other_get") self.set_status(redirect_code) class SeeOtherGetHandler(RequestHandler): def get(self): if self.request.body: raise Exception("unexpected body %r" % self.request.body) self.write("ok") class HostEchoHandler(RequestHandler): def get(self): self.write(self.request.headers["Host"]) class NoContentLengthHandler(RequestHandler): @gen.coroutine def get(self): # Emulate the old HTTP/1.0 behavior of returning a body with no # content-length. Tornado handles content-length at the framework # level so we have to go around it. stream = self.request.connection.stream yield stream.write(b"HTTP/1.0 200 OK\r\n\r\n" b"hello") stream.close() class EchoPostHandler(RequestHandler): def post(self): self.write(self.request.body) @stream_request_body class RespondInPrepareHandler(RequestHandler): def prepare(self): self.set_status(403) self.finish("forbidden") class SimpleHTTPClientTestMixin(object): def get_app(self): # callable objects to finish pending /trigger requests self.triggers = collections.deque() return Application([ url("/trigger", TriggerHandler, dict(queue=self.triggers, wake_callback=self.stop)), url("/chunk", ChunkHandler), url("/countdown/([0-9]+)", CountdownHandler, name="countdown"), url("/hang", HangHandler), url("/hello", HelloWorldHandler), url("/content_length", ContentLengthHandler), url("/head", HeadHandler), url("/options", OptionsHandler), url("/no_content", NoContentHandler), url("/see_other_post", SeeOtherPostHandler), url("/see_other_get", SeeOtherGetHandler), url("/host_echo", HostEchoHandler), url("/no_content_length", NoContentLengthHandler), url("/echo_post", EchoPostHandler), url("/respond_in_prepare", RespondInPrepareHandler), ], gzip=True) def test_singleton(self): # Class "constructor" reuses objects on the same IOLoop self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is SimpleAsyncHTTPClient(self.io_loop)) # unless force_instance is used self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not SimpleAsyncHTTPClient(self.io_loop, force_instance=True)) # different IOLoops use different objects with closing(IOLoop()) as io_loop2: self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not SimpleAsyncHTTPClient(io_loop2)) def test_connection_limit(self): with closing(self.create_client(max_clients=2)) as client: self.assertEqual(client.max_clients, 2) seen = [] # Send 4 requests. Two can be sent immediately, while the others # will be queued for i in range(4): client.fetch(self.get_url("/trigger"), lambda response, i=i: (seen.append(i), self.stop())) self.wait(condition=lambda: len(self.triggers) == 2) self.assertEqual(len(client.queue), 2) # Finish the first two requests and let the next two through self.triggers.popleft()() self.triggers.popleft()() self.wait(condition=lambda: (len(self.triggers) == 2 and len(seen) == 2)) self.assertEqual(set(seen), set([0, 1])) self.assertEqual(len(client.queue), 0) # Finish all the pending requests self.triggers.popleft()() self.triggers.popleft()() self.wait(condition=lambda: len(seen) == 4) self.assertEqual(set(seen), set([0, 1, 2, 3])) self.assertEqual(len(self.triggers), 0) def test_redirect_connection_limit(self): # following redirects should not consume additional connections with closing(self.create_client(max_clients=1)) as client: client.fetch(self.get_url('/countdown/3'), self.stop, max_redirects=3) response = self.wait() response.rethrow() def test_default_certificates_exist(self): open(_default_ca_certs()).close() def test_gzip(self): # All the tests in this file should be using gzip, but this test # ensures that it is in fact getting compressed. # Setting Accept-Encoding manually bypasses the client's # decompression so we can see the raw data. response = self.fetch("/chunk", use_gzip=False, headers={"Accept-Encoding": "gzip"}) self.assertEqual(response.headers["Content-Encoding"], "gzip") self.assertNotEqual(response.body, b"asdfqwer") # Our test data gets bigger when gzipped. Oops. :) self.assertEqual(len(response.body), 34) f = gzip.GzipFile(mode="r", fileobj=response.buffer) self.assertEqual(f.read(), b"asdfqwer") def test_max_redirects(self): response = self.fetch("/countdown/5", max_redirects=3) self.assertEqual(302, response.code) # We requested 5, followed three redirects for 4, 3, 2, then the last # unfollowed redirect is to 1. self.assertTrue(response.request.url.endswith("/countdown/5")) self.assertTrue(response.effective_url.endswith("/countdown/2")) self.assertTrue(response.headers["Location"].endswith("/countdown/1")) def test_header_reuse(self): # Apps may reuse a headers object if they are only passing in constant # headers like user-agent. The header object should not be modified. headers = HTTPHeaders({'User-Agent': 'Foo'}) self.fetch("/hello", headers=headers) self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')]) def test_see_other_redirect(self): for code in (302, 303): response = self.fetch("/see_other_post", method="POST", body="%d" % code) self.assertEqual(200, response.code) self.assertTrue(response.request.url.endswith("/see_other_post")) self.assertTrue(response.effective_url.endswith("/see_other_get")) # request is the original request, is a POST still self.assertEqual("POST", response.request.method) @skipOnTravis def test_request_timeout(self): response = self.fetch('/trigger?wake=false', request_timeout=0.1) self.assertEqual(response.code, 599) self.assertTrue(0.099 < response.request_time < 0.15, response.request_time) self.assertEqual(str(response.error), "HTTP 599: Timeout") # trigger the hanging request to let it clean up after itself self.triggers.popleft()() @skipIfNoIPv6 def test_ipv6(self): try: [sock] = bind_sockets(None, '::1', family=socket.AF_INET6) port = sock.getsockname()[1] self.http_server.add_socket(sock) except socket.gaierror as e: if e.args[0] == socket.EAI_ADDRFAMILY: # python supports ipv6, but it's not configured on the network # interface, so skip this test. return raise url = '%s://[::1]:%d/hello' % (self.get_protocol(), port) # ipv6 is currently enabled by default but can be disabled self.http_client.fetch(url, self.stop, allow_ipv6=False) response = self.wait() self.assertEqual(response.code, 599) self.http_client.fetch(url, self.stop) response = self.wait() self.assertEqual(response.body, b"Hello world!") def xtest_multiple_content_length_accepted(self): response = self.fetch("/content_length?value=2,2") self.assertEqual(response.body, b"ok") response = self.fetch("/content_length?value=2,%202,2") self.assertEqual(response.body, b"ok") response = self.fetch("/content_length?value=2,4") self.assertEqual(response.code, 599) response = self.fetch("/content_length?value=2,%202,3") self.assertEqual(response.code, 599) def test_head_request(self): response = self.fetch("/head", method="HEAD") self.assertEqual(response.code, 200) self.assertEqual(response.headers["content-length"], "7") self.assertFalse(response.body) def test_options_request(self): response = self.fetch("/options", method="OPTIONS") self.assertEqual(response.code, 200) self.assertEqual(response.headers["content-length"], "2") self.assertEqual(response.headers["access-control-allow-origin"], "*") self.assertEqual(response.body, b"ok") def test_no_content(self): response = self.fetch("/no_content") self.assertEqual(response.code, 204) # 204 status doesn't need a content-length, but tornado will # add a zero content-length anyway. # # A test without a content-length header is included below # in HTTP204NoContentTestCase. self.assertEqual(response.headers["Content-length"], "0") # 204 status with non-zero content length is malformed with ExpectLog(gen_log, "Malformed HTTP message"): response = self.fetch("/no_content?error=1") self.assertEqual(response.code, 599) def test_host_header(self): host_re = re.compile(b"^localhost:[0-9]+$") response = self.fetch("/host_echo") self.assertTrue(host_re.match(response.body)) url = self.get_url("/host_echo").replace("http://", "http://me:secret@") self.http_client.fetch(url, self.stop) response = self.wait() self.assertTrue(host_re.match(response.body), response.body) def test_connection_refused(self): server_socket, port = bind_unused_port() server_socket.close() with ExpectLog(gen_log, ".*", required=False): self.http_client.fetch("http://localhost:%d/" % port, self.stop) response = self.wait() self.assertEqual(599, response.code) if sys.platform != 'cygwin': # cygwin returns EPERM instead of ECONNREFUSED here contains_errno = str(errno.ECONNREFUSED) in str(response.error) if not contains_errno and hasattr(errno, "WSAECONNREFUSED"): contains_errno = str(errno.WSAECONNREFUSED) in str(response.error) self.assertTrue(contains_errno, response.error) # This is usually "Connection refused". # On windows, strerror is broken and returns "Unknown error". expected_message = os.strerror(errno.ECONNREFUSED) self.assertTrue(expected_message in str(response.error), response.error) def test_queue_timeout(self): with closing(self.create_client(max_clients=1)) as client: client.fetch(self.get_url('/trigger'), self.stop, request_timeout=10) # Wait for the trigger request to block, not complete. self.wait() client.fetch(self.get_url('/hello'), self.stop, connect_timeout=0.1) response = self.wait() self.assertEqual(response.code, 599) self.assertTrue(response.request_time < 1, response.request_time) self.assertEqual(str(response.error), "HTTP 599: Timeout") self.triggers.popleft()() self.wait() def test_no_content_length(self): response = self.fetch("/no_content_length") self.assertEquals(b"hello", response.body) def sync_body_producer(self, write): write(b'1234') write(b'5678') @gen.coroutine def async_body_producer(self, write): yield write(b'1234') yield gen.Task(IOLoop.current().add_callback) yield write(b'5678') def test_sync_body_producer_chunked(self): response = self.fetch("/echo_post", method="POST", body_producer=self.sync_body_producer) response.rethrow() self.assertEqual(response.body, b"12345678") def test_sync_body_producer_content_length(self): response = self.fetch("/echo_post", method="POST", body_producer=self.sync_body_producer, headers={'Content-Length': '8'}) response.rethrow() self.assertEqual(response.body, b"12345678") def test_async_body_producer_chunked(self): response = self.fetch("/echo_post", method="POST", body_producer=self.async_body_producer) response.rethrow() self.assertEqual(response.body, b"12345678") def test_async_body_producer_content_length(self): response = self.fetch("/echo_post", method="POST", body_producer=self.async_body_producer, headers={'Content-Length': '8'}) response.rethrow() self.assertEqual(response.body, b"12345678") def test_100_continue(self): response = self.fetch("/echo_post", method="POST", body=b"1234", expect_100_continue=True) self.assertEqual(response.body, b"1234") def test_100_continue_early_response(self): def body_producer(write): raise Exception("should not be called") response = self.fetch("/respond_in_prepare", method="POST", body_producer=body_producer, expect_100_continue=True) self.assertEqual(response.code, 403) class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase): def setUp(self): super(SimpleHTTPClientTestCase, self).setUp() self.http_client = self.create_client() def create_client(self, **kwargs): return SimpleAsyncHTTPClient(self.io_loop, force_instance=True, **kwargs) class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase): def setUp(self): super(SimpleHTTPSClientTestCase, self).setUp() self.http_client = self.create_client() def create_client(self, **kwargs): return SimpleAsyncHTTPClient(self.io_loop, force_instance=True, defaults=dict(validate_cert=False), **kwargs) class CreateAsyncHTTPClientTestCase(AsyncTestCase): def setUp(self): super(CreateAsyncHTTPClientTestCase, self).setUp() self.saved = AsyncHTTPClient._save_configuration() def tearDown(self): AsyncHTTPClient._restore_configuration(self.saved) super(CreateAsyncHTTPClientTestCase, self).tearDown() def test_max_clients(self): AsyncHTTPClient.configure(SimpleAsyncHTTPClient) with closing(AsyncHTTPClient( self.io_loop, force_instance=True)) as client: self.assertEqual(client.max_clients, 10) with closing(AsyncHTTPClient( self.io_loop, max_clients=11, force_instance=True)) as client: self.assertEqual(client.max_clients, 11) # Now configure max_clients statically and try overriding it # with each way max_clients can be passed AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12) with closing(AsyncHTTPClient( self.io_loop, force_instance=True)) as client: self.assertEqual(client.max_clients, 12) with closing(AsyncHTTPClient( self.io_loop, max_clients=13, force_instance=True)) as client: self.assertEqual(client.max_clients, 13) with closing(AsyncHTTPClient( self.io_loop, max_clients=14, force_instance=True)) as client: self.assertEqual(client.max_clients, 14) class HTTP100ContinueTestCase(AsyncHTTPTestCase): def respond_100(self, request): self.request = request self.request.connection.stream.write( b"HTTP/1.1 100 CONTINUE\r\n\r\n", self.respond_200) def respond_200(self): self.request.connection.stream.write( b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA", self.request.connection.stream.close) def get_app(self): # Not a full Application, but works as an HTTPServer callback return self.respond_100 def test_100_continue(self): res = self.fetch('/') self.assertEqual(res.body, b'A') class HTTP204NoContentTestCase(AsyncHTTPTestCase): def respond_204(self, request): # A 204 response never has a body, even if doesn't have a content-length # (which would otherwise mean read-until-close). Tornado always # sends a content-length, so we simulate here a server that sends # no content length and does not close the connection. # # Tests of a 204 response with a Content-Length header are included # in SimpleHTTPClientTestMixin. request.connection.stream.write( b"HTTP/1.1 204 No content\r\n\r\n") def get_app(self): return self.respond_204 def test_204_no_content(self): resp = self.fetch('/') self.assertEqual(resp.code, 204) self.assertEqual(resp.body, b'') class HostnameMappingTestCase(AsyncHTTPTestCase): def setUp(self): super(HostnameMappingTestCase, self).setUp() self.http_client = SimpleAsyncHTTPClient( self.io_loop, hostname_mapping={ 'www.example.com': '127.0.0.1', ('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()), }) def get_app(self): return Application([url("/hello", HelloWorldHandler), ]) def test_hostname_mapping(self): self.http_client.fetch( 'http://www.example.com:%d/hello' % self.get_http_port(), self.stop) response = self.wait() response.rethrow() self.assertEqual(response.body, b'Hello world!') def test_port_mapping(self): self.http_client.fetch('http://foo.example.com:8000/hello', self.stop) response = self.wait() response.rethrow() self.assertEqual(response.body, b'Hello world!') class ResolveTimeoutTestCase(AsyncHTTPTestCase): def setUp(self): # Dummy Resolver subclass that never invokes its callback. class BadResolver(Resolver): def resolve(self, *args, **kwargs): pass super(ResolveTimeoutTestCase, self).setUp() self.http_client = SimpleAsyncHTTPClient( self.io_loop, resolver=BadResolver()) def get_app(self): return Application([url("/hello", HelloWorldHandler), ]) def test_resolve_timeout(self): response = self.fetch('/hello', connect_timeout=0.1) self.assertEqual(response.code, 599) class MaxHeaderSizeTest(AsyncHTTPTestCase): def get_app(self): class SmallHeaders(RequestHandler): def get(self): self.set_header("X-Filler", "a" * 100) self.write("ok") class LargeHeaders(RequestHandler): def get(self): self.set_header("X-Filler", "a" * 1000) self.write("ok") return Application([('/small', SmallHeaders), ('/large', LargeHeaders)]) def get_http_client(self): return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_header_size=1024) def test_small_headers(self): response = self.fetch('/small') response.rethrow() self.assertEqual(response.body, b'ok') def test_large_headers(self): with ExpectLog(gen_log, "Unsatisfiable read"): response = self.fetch('/large') self.assertEqual(response.code, 599)
apache-2.0
kjc88/sl4a
python/src/Lib/bsddb/test/test_compare.py
33
8510
""" TestCases for python DB Btree key comparison function. """ import sys, os, re import test_all from cStringIO import StringIO import unittest from test_all import db, dbshelve, test_support, \ get_new_environment_path, get_new_database_path lexical_cmp = cmp def lowercase_cmp(left, right): return cmp (left.lower(), right.lower()) def make_reverse_comparator (cmp): def reverse (left, right, delegate=cmp): return - delegate (left, right) return reverse _expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf'] _expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP'] class ComparatorTests (unittest.TestCase): def comparator_test_helper (self, comparator, expected_data): data = expected_data[:] import sys if sys.version_info[0] < 3 : if sys.version_info[:3] < (2, 4, 0): data.sort(comparator) else : data.sort(cmp=comparator) else : # Insertion Sort. Please, improve data2 = [] for i in data : for j, k in enumerate(data2) : r = comparator(k, i) if r == 1 : data2.insert(j, i) break else : data2.append(i) data = data2 self.failUnless (data == expected_data, "comparator `%s' is not right: %s vs. %s" % (comparator, expected_data, data)) def test_lexical_comparator (self): self.comparator_test_helper (lexical_cmp, _expected_lexical_test_data) def test_reverse_lexical_comparator (self): rev = _expected_lexical_test_data[:] rev.reverse () self.comparator_test_helper (make_reverse_comparator (lexical_cmp), rev) def test_lowercase_comparator (self): self.comparator_test_helper (lowercase_cmp, _expected_lowercase_test_data) class AbstractBtreeKeyCompareTestCase (unittest.TestCase): env = None db = None def setUp (self): self.filename = self.__class__.__name__ + '.db' self.homeDir = get_new_environment_path() env = db.DBEnv() env.open (self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_THREAD) self.env = env def tearDown (self): self.closeDB() if self.env is not None: self.env.close() self.env = None test_support.rmtree(self.homeDir) def addDataToDB (self, data): i = 0 for item in data: self.db.put (item, str (i)) i = i + 1 def createDB (self, key_comparator): self.db = db.DB (self.env) self.setupDB (key_comparator) self.db.open (self.filename, "test", db.DB_BTREE, db.DB_CREATE) def setupDB (self, key_comparator): self.db.set_bt_compare (key_comparator) def closeDB (self): if self.db is not None: self.db.close () self.db = None def startTest (self): pass def finishTest (self, expected = None): if expected is not None: self.check_results (expected) self.closeDB () def check_results (self, expected): curs = self.db.cursor () try: index = 0 rec = curs.first () while rec: key, ignore = rec self.failUnless (index < len (expected), "to many values returned from cursor") self.failUnless (expected[index] == key, "expected value `%s' at %d but got `%s'" % (expected[index], index, key)) index = index + 1 rec = curs.next () self.failUnless (index == len (expected), "not enough values returned from cursor") finally: curs.close () class BtreeKeyCompareTestCase (AbstractBtreeKeyCompareTestCase): def runCompareTest (self, comparator, data): self.startTest () self.createDB (comparator) self.addDataToDB (data) self.finishTest (data) def test_lexical_ordering (self): self.runCompareTest (lexical_cmp, _expected_lexical_test_data) def test_reverse_lexical_ordering (self): expected_rev_data = _expected_lexical_test_data[:] expected_rev_data.reverse () self.runCompareTest (make_reverse_comparator (lexical_cmp), expected_rev_data) def test_compare_function_useless (self): self.startTest () def socialist_comparator (l, r): return 0 self.createDB (socialist_comparator) self.addDataToDB (['b', 'a', 'd']) # all things being equal the first key will be the only key # in the database... (with the last key's value fwiw) self.finishTest (['b']) class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase): def test_raises_non_callable (self): self.startTest () self.assertRaises (TypeError, self.createDB, 'abc') self.assertRaises (TypeError, self.createDB, None) self.finishTest () def test_set_bt_compare_with_function (self): self.startTest () self.createDB (lexical_cmp) self.finishTest () def check_results (self, results): pass def test_compare_function_incorrect (self): self.startTest () def bad_comparator (l, r): return 1 # verify that set_bt_compare checks that comparator('', '') == 0 self.assertRaises (TypeError, self.createDB, bad_comparator) self.finishTest () def verifyStderr(self, method, successRe): """ Call method() while capturing sys.stderr output internally and call self.fail() if successRe.search() does not match the stderr output. This is used to test for uncatchable exceptions. """ stdErr = sys.stderr sys.stderr = StringIO() try: method() finally: temp = sys.stderr sys.stderr = stdErr errorOut = temp.getvalue() if not successRe.search(errorOut): self.fail("unexpected stderr output:\n"+errorOut) def _test_compare_function_exception (self): self.startTest () def bad_comparator (l, r): if l == r: # pass the set_bt_compare test return 0 raise RuntimeError, "i'm a naughty comparison function" self.createDB (bad_comparator) #print "\n*** test should print 2 uncatchable tracebacks ***" self.addDataToDB (['a', 'b', 'c']) # this should raise, but... self.finishTest () def test_compare_function_exception(self): self.verifyStderr( self._test_compare_function_exception, re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S) ) def _test_compare_function_bad_return (self): self.startTest () def bad_comparator (l, r): if l == r: # pass the set_bt_compare test return 0 return l self.createDB (bad_comparator) #print "\n*** test should print 2 errors about returning an int ***" self.addDataToDB (['a', 'b', 'c']) # this should raise, but... self.finishTest () def test_compare_function_bad_return(self): self.verifyStderr( self._test_compare_function_bad_return, re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S) ) def test_cannot_assign_twice (self): def my_compare (a, b): return 0 self.startTest () self.createDB (my_compare) try: self.db.set_bt_compare (my_compare) self.assert_(0, "this set should fail") except RuntimeError, msg: pass def test_suite (): res = unittest.TestSuite () res.addTest (unittest.makeSuite (ComparatorTests)) res.addTest (unittest.makeSuite (BtreeExceptionsTestCase)) res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase)) return res if __name__ == '__main__': unittest.main (defaultTest = 'suite')
apache-2.0
CamelBackNotation/CarnotKE
jyhton/lib-python/2.7/encodings/cp775.py
593
34732
""" Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp775', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS 0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE 0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE 0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON 0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA 0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA 0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE 0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS 0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA 0x0096: 0x00a2, # CENT SIGN 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE 0x009e: 0x00d7, # MULTIPLICATION SIGN 0x009f: 0x00a4, # CURRENCY SIGN 0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON 0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE 0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK 0x00a7: 0x00a6, # BROKEN BAR 0x00a8: 0x00a9, # COPYRIGHT SIGN 0x00a9: 0x00ae, # REGISTERED SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK 0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON 0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK 0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK 0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK 0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON 0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK 0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON 0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK 0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE 0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK 0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON 0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK 0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON 0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE 0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA 0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA 0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA 0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA 0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA 0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON 0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA 0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK 0x00f0: 0x00ad, # SOFT HYPHEN 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS 0x00f4: 0x00b6, # PILCROW SIGN 0x00f5: 0x00a7, # SECTION SIGN 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x00b9, # SUPERSCRIPT ONE 0x00fc: 0x00b3, # SUPERSCRIPT THREE 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( u'\x00' # 0x0000 -> NULL u'\x01' # 0x0001 -> START OF HEADING u'\x02' # 0x0002 -> START OF TEXT u'\x03' # 0x0003 -> END OF TEXT u'\x04' # 0x0004 -> END OF TRANSMISSION u'\x05' # 0x0005 -> ENQUIRY u'\x06' # 0x0006 -> ACKNOWLEDGE u'\x07' # 0x0007 -> BELL u'\x08' # 0x0008 -> BACKSPACE u'\t' # 0x0009 -> HORIZONTAL TABULATION u'\n' # 0x000a -> LINE FEED u'\x0b' # 0x000b -> VERTICAL TABULATION u'\x0c' # 0x000c -> FORM FEED u'\r' # 0x000d -> CARRIAGE RETURN u'\x0e' # 0x000e -> SHIFT OUT u'\x0f' # 0x000f -> SHIFT IN u'\x10' # 0x0010 -> DATA LINK ESCAPE u'\x11' # 0x0011 -> DEVICE CONTROL ONE u'\x12' # 0x0012 -> DEVICE CONTROL TWO u'\x13' # 0x0013 -> DEVICE CONTROL THREE u'\x14' # 0x0014 -> DEVICE CONTROL FOUR u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x0016 -> SYNCHRONOUS IDLE u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK u'\x18' # 0x0018 -> CANCEL u'\x19' # 0x0019 -> END OF MEDIUM u'\x1a' # 0x001a -> SUBSTITUTE u'\x1b' # 0x001b -> ESCAPE u'\x1c' # 0x001c -> FILE SEPARATOR u'\x1d' # 0x001d -> GROUP SEPARATOR u'\x1e' # 0x001e -> RECORD SEPARATOR u'\x1f' # 0x001f -> UNIT SEPARATOR u' ' # 0x0020 -> SPACE u'!' # 0x0021 -> EXCLAMATION MARK u'"' # 0x0022 -> QUOTATION MARK u'#' # 0x0023 -> NUMBER SIGN u'$' # 0x0024 -> DOLLAR SIGN u'%' # 0x0025 -> PERCENT SIGN u'&' # 0x0026 -> AMPERSAND u"'" # 0x0027 -> APOSTROPHE u'(' # 0x0028 -> LEFT PARENTHESIS u')' # 0x0029 -> RIGHT PARENTHESIS u'*' # 0x002a -> ASTERISK u'+' # 0x002b -> PLUS SIGN u',' # 0x002c -> COMMA u'-' # 0x002d -> HYPHEN-MINUS u'.' # 0x002e -> FULL STOP u'/' # 0x002f -> SOLIDUS u'0' # 0x0030 -> DIGIT ZERO u'1' # 0x0031 -> DIGIT ONE u'2' # 0x0032 -> DIGIT TWO u'3' # 0x0033 -> DIGIT THREE u'4' # 0x0034 -> DIGIT FOUR u'5' # 0x0035 -> DIGIT FIVE u'6' # 0x0036 -> DIGIT SIX u'7' # 0x0037 -> DIGIT SEVEN u'8' # 0x0038 -> DIGIT EIGHT u'9' # 0x0039 -> DIGIT NINE u':' # 0x003a -> COLON u';' # 0x003b -> SEMICOLON u'<' # 0x003c -> LESS-THAN SIGN u'=' # 0x003d -> EQUALS SIGN u'>' # 0x003e -> GREATER-THAN SIGN u'?' # 0x003f -> QUESTION MARK u'@' # 0x0040 -> COMMERCIAL AT u'A' # 0x0041 -> LATIN CAPITAL LETTER A u'B' # 0x0042 -> LATIN CAPITAL LETTER B u'C' # 0x0043 -> LATIN CAPITAL LETTER C u'D' # 0x0044 -> LATIN CAPITAL LETTER D u'E' # 0x0045 -> LATIN CAPITAL LETTER E u'F' # 0x0046 -> LATIN CAPITAL LETTER F u'G' # 0x0047 -> LATIN CAPITAL LETTER G u'H' # 0x0048 -> LATIN CAPITAL LETTER H u'I' # 0x0049 -> LATIN CAPITAL LETTER I u'J' # 0x004a -> LATIN CAPITAL LETTER J u'K' # 0x004b -> LATIN CAPITAL LETTER K u'L' # 0x004c -> LATIN CAPITAL LETTER L u'M' # 0x004d -> LATIN CAPITAL LETTER M u'N' # 0x004e -> LATIN CAPITAL LETTER N u'O' # 0x004f -> LATIN CAPITAL LETTER O u'P' # 0x0050 -> LATIN CAPITAL LETTER P u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q u'R' # 0x0052 -> LATIN CAPITAL LETTER R u'S' # 0x0053 -> LATIN CAPITAL LETTER S u'T' # 0x0054 -> LATIN CAPITAL LETTER T u'U' # 0x0055 -> LATIN CAPITAL LETTER U u'V' # 0x0056 -> LATIN CAPITAL LETTER V u'W' # 0x0057 -> LATIN CAPITAL LETTER W u'X' # 0x0058 -> LATIN CAPITAL LETTER X u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y u'Z' # 0x005a -> LATIN CAPITAL LETTER Z u'[' # 0x005b -> LEFT SQUARE BRACKET u'\\' # 0x005c -> REVERSE SOLIDUS u']' # 0x005d -> RIGHT SQUARE BRACKET u'^' # 0x005e -> CIRCUMFLEX ACCENT u'_' # 0x005f -> LOW LINE u'`' # 0x0060 -> GRAVE ACCENT u'a' # 0x0061 -> LATIN SMALL LETTER A u'b' # 0x0062 -> LATIN SMALL LETTER B u'c' # 0x0063 -> LATIN SMALL LETTER C u'd' # 0x0064 -> LATIN SMALL LETTER D u'e' # 0x0065 -> LATIN SMALL LETTER E u'f' # 0x0066 -> LATIN SMALL LETTER F u'g' # 0x0067 -> LATIN SMALL LETTER G u'h' # 0x0068 -> LATIN SMALL LETTER H u'i' # 0x0069 -> LATIN SMALL LETTER I u'j' # 0x006a -> LATIN SMALL LETTER J u'k' # 0x006b -> LATIN SMALL LETTER K u'l' # 0x006c -> LATIN SMALL LETTER L u'm' # 0x006d -> LATIN SMALL LETTER M u'n' # 0x006e -> LATIN SMALL LETTER N u'o' # 0x006f -> LATIN SMALL LETTER O u'p' # 0x0070 -> LATIN SMALL LETTER P u'q' # 0x0071 -> LATIN SMALL LETTER Q u'r' # 0x0072 -> LATIN SMALL LETTER R u's' # 0x0073 -> LATIN SMALL LETTER S u't' # 0x0074 -> LATIN SMALL LETTER T u'u' # 0x0075 -> LATIN SMALL LETTER U u'v' # 0x0076 -> LATIN SMALL LETTER V u'w' # 0x0077 -> LATIN SMALL LETTER W u'x' # 0x0078 -> LATIN SMALL LETTER X u'y' # 0x0079 -> LATIN SMALL LETTER Y u'z' # 0x007a -> LATIN SMALL LETTER Z u'{' # 0x007b -> LEFT CURLY BRACKET u'|' # 0x007c -> VERTICAL LINE u'}' # 0x007d -> RIGHT CURLY BRACKET u'~' # 0x007e -> TILDE u'\x7f' # 0x007f -> DELETE u'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE u'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS u'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE u'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE u'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON u'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA u'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA u'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE u'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS u'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA u'\xa2' # 0x0096 -> CENT SIGN u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE u'\xa3' # 0x009c -> POUND SIGN u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE u'\xd7' # 0x009e -> MULTIPLICATION SIGN u'\xa4' # 0x009f -> CURRENCY SIGN u'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON u'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE u'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE u'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE u'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE u'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK u'\xa6' # 0x00a7 -> BROKEN BAR u'\xa9' # 0x00a8 -> COPYRIGHT SIGN u'\xae' # 0x00a9 -> REGISTERED SIGN u'\xac' # 0x00aa -> NOT SIGN u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER u'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u2591' # 0x00b0 -> LIGHT SHADE u'\u2592' # 0x00b1 -> MEDIUM SHADE u'\u2593' # 0x00b2 -> DARK SHADE u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT u'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK u'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON u'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK u'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT u'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK u'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL u'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK u'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL u'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON u'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK u'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON u'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK u'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE u'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK u'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON u'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK u'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON u'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT u'\u2588' # 0x00db -> FULL BLOCK u'\u2584' # 0x00dc -> LOWER HALF BLOCK u'\u258c' # 0x00dd -> LEFT HALF BLOCK u'\u2590' # 0x00de -> RIGHT HALF BLOCK u'\u2580' # 0x00df -> UPPER HALF BLOCK u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN) u'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xb5' # 0x00e6 -> MICRO SIGN u'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE u'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA u'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA u'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA u'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA u'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA u'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON u'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA u'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK u'\xad' # 0x00f0 -> SOFT HYPHEN u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN u'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS u'\xb6' # 0x00f4 -> PILCROW SIGN u'\xa7' # 0x00f5 -> SECTION SIGN u'\xf7' # 0x00f6 -> DIVISION SIGN u'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK u'\xb0' # 0x00f8 -> DEGREE SIGN u'\u2219' # 0x00f9 -> BULLET OPERATOR u'\xb7' # 0x00fa -> MIDDLE DOT u'\xb9' # 0x00fb -> SUPERSCRIPT ONE u'\xb3' # 0x00fc -> SUPERSCRIPT THREE u'\xb2' # 0x00fd -> SUPERSCRIPT TWO u'\u25a0' # 0x00fe -> BLACK SQUARE u'\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a2: 0x0096, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a4: 0x009f, # CURRENCY SIGN 0x00a6: 0x00a7, # BROKEN BAR 0x00a7: 0x00f5, # SECTION SIGN 0x00a9: 0x00a8, # COPYRIGHT SIGN 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00ad: 0x00f0, # SOFT HYPHEN 0x00ae: 0x00a9, # REGISTERED SIGN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b3: 0x00fc, # SUPERSCRIPT THREE 0x00b5: 0x00e6, # MICRO SIGN 0x00b6: 0x00f4, # PILCROW SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00b9: 0x00fb, # SUPERSCRIPT ONE 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x00d7: 0x009e, # MULTIPLICATION SIGN 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS 0x00f7: 0x00f6, # DIVISION SIGN 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS 0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON 0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON 0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK 0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK 0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE 0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE 0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON 0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON 0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON 0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON 0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE 0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE 0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK 0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK 0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA 0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA 0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON 0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON 0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK 0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK 0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA 0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA 0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA 0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA 0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE 0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE 0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA 0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA 0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON 0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON 0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA 0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE 0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON 0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON 0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON 0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON 0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK 0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE 0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE 0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON 0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON 0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK 0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK 0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK 0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK 0x2219: 0x00f9, # BULLET OPERATOR 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
apache-2.0
jrowan/zulip
tools/setup/emoji/emoji_setup_utils.py
4
12939
from __future__ import absolute_import # This tool contains all of the rules that we use to decide which of # the various emoji names in emoji-map.json we should actually use in # autocomplete and emoji pickers. You can't do all of them, because # otherwise there will be a ton of duplicates alphabetically next to # each other, which is confusing and looks bad (e.g. `angry` and # `angry_face` or `ab` and `ab_button` will always sort next to each # other, and you really want to just pick one). See docs/emoji.md for # details on how this system works. from collections import defaultdict from itertools import permutations, chain import ujson from six.moves import range, zip from typing import Any, Dict, List, Text # Emojisets that we currently support. EMOJISETS = ['apple', 'emojione', 'google', 'twitter'] # the corresponding code point will be set to exactly these names as a # final pass, overriding any other rules. This is useful for cases # where the two names are very different, users might reasonably type # either name and be surprised when they can't find the relevant emoji. whitelisted_names = [ ['date', 'calendar'], ['shirt', 'tshirt'], ['cupid', 'heart_with_arrow'], ['tada', 'party_popper'], ['parking', 'p_button'], ['car', 'automobile'], ['mortar_board', 'graduation_cap'], ['cd', 'optical_disc'], ['tv', 'television'], ['sound', 'speaker_on'], ['mute', 'speaker_off'], ['antenna_bars', 'signal_strength'], ['mag_right', 'right_pointing_magnifying_glass'], ['mag', 'left_pointing_magnifying_glass'], ['loud_sound', 'speaker_loud'], ['rice_scene', 'moon_ceremony'], ['fast_up_button', 'arrow_double_up'], ['fast_down_button', 'arrow_double_down'], ['rewind', 'fast_reverse_button'], ['100', 'hundred_points'], ['muscle', 'flexed_biceps'], ['walking', 'pedestrian'], ['email', 'envelope'], ['dart', 'direct_hit'], ['wc', 'water_closet'], ['zap', 'high_voltage'], ['underage', 'no_one_under_eighteen'], ['vhs', 'videocassette'], ['bangbang', 'double_exclamation_mark'], ['gun', 'pistol'], ['hocho', 'kitchen_knife'], ['8ball', 'billiards'], ['pray', 'folded_hands'], ['cop', 'police_officer'], ['phone', 'telephone'], ['bee', 'honeybee'], ['lips', 'mouth'], ['boat', 'sailboat'], ['feet', 'paw_prints'], ['uk', 'gb'], ['alien_monster', 'space_invader'], ['reverse_button', 'arrow_backward'], # both github and slack remove play_button, though I think this is better ['play_button', 'arrow_forward'], # github/slack both get rid of shuffle_tracks_button, which seems wrong ['shuffle_tracks_button', 'twisted_rightwards_arrows'], ['iphone', 'mobile_phone'], # disagrees with github/slack/emojione # both github and slack remove {growing,beating}_heart, not sure what I think ['heartpulse', 'growing_heart'], ['heartbeat', 'beating_heart'], # did remove cityscape_at_dusk from (city_sunset, cityscape_at_dusk) ['sunset', 'city_sunrise'], ['punch', 'oncoming_fist'], # doesn't include facepunch ['+1', 'thumbs_up'], # doesn't include thumbsup ['-1', 'thumbs_down'], # doesn't include thumbsdown # shit, hankey. slack allows poop, shit, hankey. github calls it hankey, # and autocompletes for poop and shit. emojione calls it poop, and # autocompletes for pile_of_poo and shit. ['poop', 'pile_of_poo'], # github/slack remove cooking, but their emoji for this is an uncooked egg ['egg', 'cooking'], # to match two_{men,women}_holding_hands ['couple', 'man_and_woman_holding_hands'], # ['ocean', 'water_wave'], wave is so common that we want it to point only to :wave: ] # We blacklist certain names in cases where the algorithms below would # choose incorrectly which one to keep. For example, with `football`, # by default, our algorithm would pick just `football`, but we given # that :rugby_football: also exists, we want to keep # :american_football: instead. So we just remove the shorter names here. blacklisted_names = frozenset([ # would be chosen by words_supersets or superstrings 'football', # american_football 'post_office', # european_post_office (there's also a japanese_post_office) 'castle', # european_castle (there's also a japanese_castle) 'chart', # chart_increasing_with_yen (should rename chart_increasing to chart) 'loop', # double_curly_loop (should rename curly_loop to loop) 'massage', # face_massage 'bulb', # light_bulb 'barber', # barber_pole 'mens', # mens_room 'womens', # womens_room 'knife', # kitchen_knife (hocho also maps here) 'notes', # musical_notes 'beetle', # lady_beetle 'ab', # ab_button (due to keeping a_button, due to the one_lettered() rule) 'headphone', # headphones 'mega', # megaphone 'ski', # skis 'high_heel', # high_heeled_shoe (so that it shows up when searching for shoe) # less confident about the following 'dolls', # japanese_dolls 'moon', # waxing_gibbous_moon (should rename crescent_moon to moon) 'clapper', # clapper_board 'traffic_light', # horizontal_traffic_light (there's also a vertical_traffic_light) 'lantern', 'red_paper_lantern', # izakaya_lantern (in the future we should make sure # red_paper_lantern finds this) # would be chosen by longer 'down_button', # arrow_down_small, I think to match the other arrow_* # names. Matching what github and slack do. 'running_shoe', # athletic_shoe, both github and slack agree here. 'running', # runner. slack has both, github has running_man and running_woman, but not runner 'o2', # o_button 'star2', # glowing_star 'bright', # high_brightness, to match low_brightness, what github/slack do 'dim_button', # low_brightness, copying github/slack 'stars', # shooting_star. disagrees with github, slack, and emojione, but this seems better 'nail_care', # nail_polish. Also disagrees github/slack/emojione, is nail_polish mostly an # american thing? 'busstop', # bus_stop 'tophat', # top_hat 'old_woman', # older_woman, following github/slack/emojione on these 'old_man', # older_man 'blue_car', # recreational_vehicle 'litter_in_bin_sign', # put_litter_in_its_place 'moai', # moyai based on github/slack 'fuelpump', # fuel_pump # names not otherwise excluded by our heuristics 'left_arrow', # arrow_left, to match other arrow_* shortnames 'right_arrow', # arrow_right 'up_arrow', # arrow_up 'down_arrow', # arrow_down 'chequered_flag', # checkered_flag 'e_mail', # e-mail 'non_potable_water', # non-potable_water 'flipper', # dolphin ]) ## functions that take in a list of names at a codepoint and return a subset to exclude def blacklisted(names): # type: (List[str]) -> List[str] return [name for name in names if name in blacklisted_names] # 1 letter names don't currently show up in our autocomplete. Maybe should # change our autocomplete so that a whitelist of letters do, like j (for joy), x, etc # github uses a, ab, etc. instead of a_button, slack doesn't have any of the [letter]_buttons def one_lettered(names): # type: (List[str]) -> List[str] if len(names) == 1: return [] return [name for name in names if len(name) == 1] # If it is an ideograph (or katakana, but we'll probably deal with that # differently after 1.5), remove any names that don't have # ideograph/katakana in them def ideographless(names): # type: (List[str]) -> List[str] has_ideographs = ['ideograph' in name.split('_') or 'katakana' in name.split('_') for name in names] if not any(has_ideographs): return [] return [name for name, has_ideograph in zip(names, has_ideographs) if not has_ideograph] # In the absence of a good reason not to, we prefer :angry: over # :angry_face:, since it's shorter and communicates the same idea. # # This rule is subsumed by the longer rule, but still useful for # breaking up a hand review of the whitelist/blacklist decisions, # since these cases are much more clear than the "longer" ones. def word_superset(names): # type: (List[str]) -> List[str] bags_of_words = [frozenset(name.split('_')) for name in names] bad_names = set() for i, j in permutations(list(range(len(names))), 2): if bags_of_words[i] < bags_of_words[j]: bad_names.add(names[j]) return list(bad_names) # We prefer :dog: over :dog2: if they both point to the same unicode # character. # # This rule is subsumed by the longer rule, but still useful for # breaking up a hand review of the whitelist/blacklist decisions, # since these cases are much more clear than the "longer" ones. def superstring(names): # type: (List[str]) -> List[str] bad_names = set() for name1, name2 in permutations(names, 2): if name2[:len(name1)] == name1: bad_names.add(name2) return list(bad_names) # The shorter one is usually a better name. def longer(names): # type: (List[str]) -> List[str] lengths = [len(name) for name in names] min_length = min(lengths) return [name for name, length in zip(names, lengths) if length > min_length] # A lot of emoji that have a color in their name aren't actually the # right color, which is super confusing. A big part of the reason is # that "black" and "white" actually mean filled-in and not-filled-in # to the Unicode committee, which is a poor choice by explains why # something with "black" in its name might be any solid color. Users # want the emoji to have reasonable names, though, so we have to # correct the names with "black" or "white" in them. # # Ones found after a few minutes of inspection, and after all the other filters # have been applied. Probably others remaining. miscolored_names = frozenset(['eight_pointed_black_star', 'large_blue_diamond', 'small_blue_diamond']) def google_color_bug(names): # type: (List[str]) -> List[str] return [name for name in names if name[:5] == 'black' or name[:5] == 'white' or name in miscolored_names] def emoji_names_for_picker(emoji_map): # type: (Dict[Text, Text]) -> List[str] codepoint_to_names = defaultdict(list) # type: Dict[Text, List[str]] for name, codepoint in emoji_map.items(): codepoint_to_names[codepoint].append(str(name)) # blacklisted must come first, followed by {one_lettered, ideographless} # Each function here returns a list of names to be removed from a list of names for func in [blacklisted, one_lettered, ideographless, word_superset, superstring, longer, google_color_bug]: for codepoint, names in codepoint_to_names.items(): codepoint_to_names[codepoint] = [name for name in names if name not in func(names)] for names in whitelisted_names: codepoint = emoji_map[names[0]] for name in names: assert (emoji_map[name] == codepoint) codepoint_to_names[codepoint] = names return sorted(list(chain.from_iterable(codepoint_to_names.values()))) # Returns a dict from categories to list of codepoints. The list of # codepoints are sorted according to the `sort_order` as defined in # `emoji_data`. def generate_emoji_catalog(emoji_data): # type: (List[Dict[Text, Any]]) -> Dict[str, List[str]] sort_order = {} # type: Dict[str, int] emoji_catalog = {} # type: Dict[str, List[str]] for emoji in emoji_data: if not emoji_is_universal(emoji): continue category = str(emoji["category"]) codepoint = str(emoji["unified"]).lower() sort_order[codepoint] = emoji["sort_order"] if category in emoji_catalog: emoji_catalog[category].append(codepoint) else: emoji_catalog[category] = [codepoint, ] for category in emoji_catalog: emoji_catalog[category].sort(key=lambda codepoint: sort_order[codepoint]) return emoji_catalog # Use only those names for which images are present in all # the emoji sets so that we can switch emoji sets seemlessly. def emoji_is_universal(emoji_dict): # type: (Dict[Text, Any]) -> bool for emoji_set in EMOJISETS: if not emoji_dict['has_img_' + emoji_set]: return False return True def generate_codepoint_to_name_map(names, unified_reactions_data): # type: (List[str], Dict[Text, Text]) -> Dict[str, str] # TODO: Decide canonical names. For now, using the names # generated for emoji picker. In case of multiple names # for the same emoji, lexicographically greater name is # used, for example, `thumbs_up` is used and not `+1`. codepoint_to_name = {} # type: Dict[str, str] for name in names: codepoint_to_name[str(unified_reactions_data[name])] = str(name) return codepoint_to_name
apache-2.0
tensor-tang/Paddle
python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py
4
3797
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid import paddle.fluid.core as core class TestClipByNormOp(OpTest): def setUp(self): self.max_relative_error = 0.006 self.initTestCase() input = np.random.random(self.shape).astype("float32") input[np.abs(input) < self.max_relative_error] = 0.5 self.op_type = "clip_by_norm" self.inputs = {'X': input, } self.attrs = {} self.attrs['max_norm'] = self.max_norm norm = np.sqrt(np.sum(np.square(input))) if norm > self.max_norm: output = self.max_norm * input / norm else: output = input self.outputs = {'Out': output} def test_check_output(self): self.check_output() def initTestCase(self): self.shape = (100, ) self.max_norm = 1.0 class TestCase1(TestClipByNormOp): def initTestCase(self): self.shape = (100, ) self.max_norm = 1e20 class TestCase2(TestClipByNormOp): def initTestCase(self): self.shape = (16, 16) self.max_norm = 0.1 class TestCase3(TestClipByNormOp): def initTestCase(self): self.shape = (4, 8, 16) self.max_norm = 1.0 class TestClipByNormOpWithSelectedRows(OpTest): def check_with_place(self, place): self.config_test_case() scope = core.Scope() # set input x_selected_rows = scope.var('X').get_selected_rows() x_selected_rows.set_rows(self.grad_rows) x_tensor = x_selected_rows.get_tensor() x_np = np.random.random(self.grad_shape).astype("float32") x_np[np.abs(x_np) < self.max_relative_error] = 0.5 x_tensor.set(x_np, place) # set output out_selected_rows = scope.var('Out').get_selected_rows() # run clip_by_norm_op clip_by_norm_op = fluid.op.Operator( "clip_by_norm", max_norm=self.max_norm, X='X', Out='Out') clip_by_norm_op.run(scope, place) # check output self.assertEqual(out_selected_rows.rows(), self.grad_clipped_rows) out_tensor = out_selected_rows.get_tensor() y_np = np.zeros(self.grad_clipped_shape) y_np[0] = np.sum(x_np[0:2]) y_np[1] = x_np[2] y_np[2] = x_np[3] norm = np.sqrt(np.sum(np.square(y_np))) if norm > self.max_norm: output = self.max_norm * y_np / norm else: output = y_np self.assertTrue( np.allclose( np.array(out_tensor), output, atol=1e-5, equal_nan=False)) def test_clip_by_norm_with_selected_ros(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) for place in places: self.check_with_place(place) def config_test_case(self): self.max_norm = 1.0 self.max_relative_error = 0.006 self.grad_shape = (4, 1) self.grad_clipped_shape = (3, 1) self.grad_rows = [0, 0, 1, 2] self.grad_clipped_rows = [0, 1, 2] if __name__ == '__main__': unittest.main()
apache-2.0
DarrelHsu/cvsClient
third_party/logilab/common/pytest.py
24
45995
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of logilab-common. # # logilab-common is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 2.1 of the License, or (at your option) any # later version. # # logilab-common is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-common. If not, see <http://www.gnu.org/licenses/>. """pytest is a tool that eases test running and debugging. To be able to use pytest, you should either write tests using the logilab.common.testlib's framework or the unittest module of the Python's standard library. You can customize pytest's behaviour by defining a ``pytestconf.py`` file somewhere in your test directory. In this file, you can add options or change the way tests are run. To add command line options, you must define a ``update_parser`` function in your ``pytestconf.py`` file. The function must accept a single parameter that will be the OptionParser's instance to customize. If you wish to customize the tester, you'll have to define a class named ``CustomPyTester``. This class should extend the default `PyTester` class defined in the pytest module. Take a look at the `PyTester` and `DjangoTester` classes for more information about what can be done. For instance, if you wish to add a custom -l option to specify a loglevel, you could define the following ``pytestconf.py`` file :: import logging from logilab.common.pytest import PyTester def update_parser(parser): parser.add_option('-l', '--loglevel', dest='loglevel', action='store', choices=('debug', 'info', 'warning', 'error', 'critical'), default='critical', help="the default log level possible choices are " "('debug', 'info', 'warning', 'error', 'critical')") return parser class CustomPyTester(PyTester): def __init__(self, cvg, options): super(CustomPyTester, self).__init__(cvg, options) loglevel = options.loglevel.upper() logger = logging.getLogger('erudi') logger.setLevel(logging.getLevelName(loglevel)) In your TestCase class you can then get the value of a specific option with the ``optval`` method:: class MyTestCase(TestCase): def test_foo(self): loglevel = self.optval('loglevel') # ... You can also tag your tag your test for fine filtering With those tag:: from logilab.common.testlib import tag, TestCase class Exemple(TestCase): @tag('rouge', 'carre') def toto(self): pass @tag('carre', 'vert') def tata(self): pass @tag('rouge') def titi(test): pass you can filter the function with a simple python expression * ``toto`` and ``titi`` match ``rouge`` * ``toto``, ``tata`` and ``titi``, match ``rouge or carre`` * ``tata`` and ``titi`` match``rouge ^ carre`` * ``titi`` match ``rouge and not carre`` """ __docformat__ = "restructuredtext en" PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]] examples: pytest path/to/mytests.py pytest path/to/mytests.py TheseTests pytest path/to/mytests.py TheseTests.test_thisone pytest path/to/mytests.py -m '(not long and database) or regr' pytest one (will run both test_thisone and test_thatone) pytest path/to/mytests.py -s not (will skip test_notthisone) pytest --coverage test_foo.py (only if logilab.devtools is available) """ ENABLE_DBC = False FILE_RESTART = ".pytest.restart" import os, sys, re import os.path as osp from time import time, clock import warnings import types from logilab.common.fileutils import abspath_listdir from logilab.common import textutils from logilab.common import testlib, STD_BLACKLIST # use the same unittest module as testlib from logilab.common.testlib import unittest, start_interactive_mode from logilab.common.compat import any import doctest import unittest as unittest_legacy if not getattr(unittest_legacy, "__package__", None): try: import unittest2.suite as unittest_suite except ImportError: sys.exit("You have to install python-unittest2 to use this module") else: import unittest.suite as unittest_suite try: import django from logilab.common.modutils import modpath_from_file, load_module_from_modpath DJANGO_FOUND = True except ImportError: DJANGO_FOUND = False CONF_FILE = 'pytestconf.py' ## coverage hacks, do not read this, do not read this, do not read this # hey, but this is an aspect, right ?!!! class TraceController(object): nesting = 0 def pause_tracing(cls): if not cls.nesting: cls.tracefunc = staticmethod(getattr(sys, '__settrace__', sys.settrace)) cls.oldtracer = getattr(sys, '__tracer__', None) sys.__notrace__ = True cls.tracefunc(None) cls.nesting += 1 pause_tracing = classmethod(pause_tracing) def resume_tracing(cls): cls.nesting -= 1 assert cls.nesting >= 0 if not cls.nesting: cls.tracefunc(cls.oldtracer) delattr(sys, '__notrace__') resume_tracing = classmethod(resume_tracing) pause_tracing = TraceController.pause_tracing resume_tracing = TraceController.resume_tracing def nocoverage(func): if hasattr(func, 'uncovered'): return func func.uncovered = True def not_covered(*args, **kwargs): pause_tracing() try: return func(*args, **kwargs) finally: resume_tracing() not_covered.uncovered = True return not_covered ## end of coverage hacks TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$") def this_is_a_testfile(filename): """returns True if `filename` seems to be a test file""" return TESTFILE_RE.match(osp.basename(filename)) TESTDIR_RE = re.compile("^(unit)?tests?$") def this_is_a_testdir(dirpath): """returns True if `filename` seems to be a test directory""" return TESTDIR_RE.match(osp.basename(dirpath)) def load_pytest_conf(path, parser): """loads a ``pytestconf.py`` file and update default parser and / or tester. """ namespace = {} execfile(path, namespace) if 'update_parser' in namespace: namespace['update_parser'](parser) return namespace.get('CustomPyTester', PyTester) def project_root(parser, projdir=os.getcwd()): """try to find project's root and add it to sys.path""" previousdir = curdir = osp.abspath(projdir) testercls = PyTester conf_file_path = osp.join(curdir, CONF_FILE) if osp.isfile(conf_file_path): testercls = load_pytest_conf(conf_file_path, parser) while this_is_a_testdir(curdir) or \ osp.isfile(osp.join(curdir, '__init__.py')): newdir = osp.normpath(osp.join(curdir, os.pardir)) if newdir == curdir: break previousdir = curdir curdir = newdir conf_file_path = osp.join(curdir, CONF_FILE) if osp.isfile(conf_file_path): testercls = load_pytest_conf(conf_file_path, parser) return previousdir, testercls class GlobalTestReport(object): """this class holds global test statistics""" def __init__(self): self.ran = 0 self.skipped = 0 self.failures = 0 self.errors = 0 self.ttime = 0 self.ctime = 0 self.modulescount = 0 self.errmodules = [] def feed(self, filename, testresult, ttime, ctime): """integrates new test information into internal statistics""" ran = testresult.testsRun self.ran += ran self.skipped += len(getattr(testresult, 'skipped', ())) self.failures += len(testresult.failures) self.errors += len(testresult.errors) self.ttime += ttime self.ctime += ctime self.modulescount += 1 if not testresult.wasSuccessful(): problems = len(testresult.failures) + len(testresult.errors) self.errmodules.append((filename[:-3], problems, ran)) def failed_to_test_module(self, filename): """called when the test module could not be imported by unittest """ self.errors += 1 self.modulescount += 1 self.ran += 1 self.errmodules.append((filename[:-3], 1, 1)) def skip_module(self, filename): self.modulescount += 1 self.ran += 1 self.errmodules.append((filename[:-3], 0, 0)) def __str__(self): """this is just presentation stuff""" line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)' % (self.ran, self.ttime, self.ctime)] if self.errors: line1.append('%s errors' % self.errors) if self.failures: line1.append('%s failures' % self.failures) if self.skipped: line1.append('%s skipped' % self.skipped) modulesok = self.modulescount - len(self.errmodules) if self.errors or self.failures: line2 = '%s modules OK (%s failed)' % (modulesok, len(self.errmodules)) descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules]) line3 = '\nfailures: %s' % descr elif modulesok: line2 = 'All %s modules OK' % modulesok line3 = '' else: return '' return '%s\n%s%s' % (', '.join(line1), line2, line3) def remove_local_modules_from_sys(testdir): """remove all modules from cache that come from `testdir` This is used to avoid strange side-effects when using the testall() mode of pytest. For instance, if we run pytest on this tree:: A/test/test_utils.py B/test/test_utils.py we **have** to clean sys.modules to make sure the correct test_utils module is ran in B """ for modname, mod in sys.modules.items(): if mod is None: continue if not hasattr(mod, '__file__'): # this is the case of some built-in modules like sys, imp, marshal continue modfile = mod.__file__ # if modfile is not an absolute path, it was probably loaded locally # during the tests if not osp.isabs(modfile) or modfile.startswith(testdir): del sys.modules[modname] class PyTester(object): """encapsulates testrun logic""" def __init__(self, cvg, options): self.report = GlobalTestReport() self.cvg = cvg self.options = options self.firstwrite = True self._errcode = None def show_report(self): """prints the report and returns appropriate exitcode""" # everything has been ran, print report print "*" * 79 print self.report def get_errcode(self): # errcode set explicitly if self._errcode is not None: return self._errcode return self.report.failures + self.report.errors def set_errcode(self, errcode): self._errcode = errcode errcode = property(get_errcode, set_errcode) def testall(self, exitfirst=False): """walks through current working directory, finds something which can be considered as a testdir and runs every test there """ here = os.getcwd() for dirname, dirs, _ in os.walk(here): for skipped in STD_BLACKLIST: if skipped in dirs: dirs.remove(skipped) basename = osp.basename(dirname) if this_is_a_testdir(basename): print "going into", dirname # we found a testdir, let's explore it ! if not self.testonedir(dirname, exitfirst): break dirs[:] = [] if self.report.ran == 0: print "no test dir found testing here:", here # if no test was found during the visit, consider # the local directory as a test directory even if # it doesn't have a traditional test directory name self.testonedir(here) def testonedir(self, testdir, exitfirst=False): """finds each testfile in the `testdir` and runs it return true when all tests has been executed, false if exitfirst and some test has failed. """ for filename in abspath_listdir(testdir): if this_is_a_testfile(filename): if self.options.exitfirst and not self.options.restart: # overwrite restart file try: restartfile = open(FILE_RESTART, "w") restartfile.close() except Exception, e: print >> sys.__stderr__, "Error while overwriting \ succeeded test file :", osp.join(os.getcwd(), FILE_RESTART) raise e # run test and collect information prog = self.testfile(filename, batchmode=True) if exitfirst and (prog is None or not prog.result.wasSuccessful()): return False self.firstwrite = True # clean local modules remove_local_modules_from_sys(testdir) return True def testfile(self, filename, batchmode=False): """runs every test in `filename` :param filename: an absolute path pointing to a unittest file """ here = os.getcwd() dirname = osp.dirname(filename) if dirname: os.chdir(dirname) # overwrite restart file if it has not been done already if self.options.exitfirst and not self.options.restart and self.firstwrite: try: restartfile = open(FILE_RESTART, "w") restartfile.close() except Exception, e: print >> sys.__stderr__, "Error while overwriting \ succeeded test file :", osp.join(os.getcwd(), FILE_RESTART) raise e modname = osp.basename(filename)[:-3] try: print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=') except TypeError: # < py 2.4 bw compat print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70) try: tstart, cstart = time(), clock() try: testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg, options=self.options, outstream=sys.stderr) except KeyboardInterrupt: raise except SystemExit, exc: self.errcode = exc.code raise except testlib.SkipTest: print "Module skipped:", filename self.report.skip_module(filename) return None except Exception: self.report.failed_to_test_module(filename) print >> sys.stderr, 'unhandled exception occurred while testing', modname import traceback traceback.print_exc(file=sys.stderr) return None tend, cend = time(), clock() ttime, ctime = (tend - tstart), (cend - cstart) self.report.feed(filename, testprog.result, ttime, ctime) return testprog finally: if dirname: os.chdir(here) class DjangoTester(PyTester): def load_django_settings(self, dirname): """try to find project's setting and load it""" curdir = osp.abspath(dirname) previousdir = curdir while not osp.isfile(osp.join(curdir, 'settings.py')) and \ osp.isfile(osp.join(curdir, '__init__.py')): newdir = osp.normpath(osp.join(curdir, os.pardir)) if newdir == curdir: raise AssertionError('could not find settings.py') previousdir = curdir curdir = newdir # late django initialization settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, 'settings.py'))) from django.core.management import setup_environ setup_environ(settings) settings.DEBUG = False self.settings = settings # add settings dir to pythonpath since it's the project's root if curdir not in sys.path: sys.path.insert(1, curdir) def before_testfile(self): # Those imports must be done **after** setup_environ was called from django.test.utils import setup_test_environment from django.test.utils import create_test_db setup_test_environment() create_test_db(verbosity=0) self.dbname = self.settings.TEST_DATABASE_NAME def after_testfile(self): # Those imports must be done **after** setup_environ was called from django.test.utils import teardown_test_environment from django.test.utils import destroy_test_db teardown_test_environment() print 'destroying', self.dbname destroy_test_db(self.dbname, verbosity=0) def testall(self, exitfirst=False): """walks through current working directory, finds something which can be considered as a testdir and runs every test there """ for dirname, dirs, files in os.walk(os.getcwd()): for skipped in ('CVS', '.svn', '.hg'): if skipped in dirs: dirs.remove(skipped) if 'tests.py' in files: if not self.testonedir(dirname, exitfirst): break dirs[:] = [] else: basename = osp.basename(dirname) if basename in ('test', 'tests'): print "going into", dirname # we found a testdir, let's explore it ! if not self.testonedir(dirname, exitfirst): break dirs[:] = [] def testonedir(self, testdir, exitfirst=False): """finds each testfile in the `testdir` and runs it return true when all tests has been executed, false if exitfirst and some test has failed. """ # special django behaviour : if tests are splitted in several files, # remove the main tests.py file and tests each test file separately testfiles = [fpath for fpath in abspath_listdir(testdir) if this_is_a_testfile(fpath)] if len(testfiles) > 1: try: testfiles.remove(osp.join(testdir, 'tests.py')) except ValueError: pass for filename in testfiles: # run test and collect information prog = self.testfile(filename, batchmode=True) if exitfirst and (prog is None or not prog.result.wasSuccessful()): return False # clean local modules remove_local_modules_from_sys(testdir) return True def testfile(self, filename, batchmode=False): """runs every test in `filename` :param filename: an absolute path pointing to a unittest file """ here = os.getcwd() dirname = osp.dirname(filename) if dirname: os.chdir(dirname) self.load_django_settings(dirname) modname = osp.basename(filename)[:-3] print >>sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=') try: try: tstart, cstart = time(), clock() self.before_testfile() testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg) tend, cend = time(), clock() ttime, ctime = (tend - tstart), (cend - cstart) self.report.feed(filename, testprog.result, ttime, ctime) return testprog except SystemExit: raise except Exception, exc: import traceback traceback.print_exc() self.report.failed_to_test_module(filename) print 'unhandled exception occurred while testing', modname print 'error: %s' % exc return None finally: self.after_testfile() if dirname: os.chdir(here) def make_parser(): """creates the OptionParser instance """ from optparse import OptionParser parser = OptionParser(usage=PYTEST_DOC) parser.newargs = [] def rebuild_cmdline(option, opt, value, parser): """carry the option to unittest_main""" parser.newargs.append(opt) def rebuild_and_store(option, opt, value, parser): """carry the option to unittest_main and store the value on current parser """ parser.newargs.append(opt) setattr(parser.values, option.dest, True) def capture_and_rebuild(option, opt, value, parser): warnings.simplefilter('ignore', DeprecationWarning) rebuild_cmdline(option, opt, value, parser) # pytest options parser.add_option('-t', dest='testdir', default=None, help="directory where the tests will be found") parser.add_option('-d', dest='dbc', default=False, action="store_true", help="enable design-by-contract") # unittest_main options provided and passed through pytest parser.add_option('-v', '--verbose', callback=rebuild_cmdline, action="callback", help="Verbose output") parser.add_option('-i', '--pdb', callback=rebuild_and_store, dest="pdb", action="callback", help="Enable test failure inspection (conflicts with --coverage)") parser.add_option('-x', '--exitfirst', callback=rebuild_and_store, dest="exitfirst", default=False, action="callback", help="Exit on first failure " "(only make sense when pytest run one test file)") parser.add_option('-R', '--restart', callback=rebuild_and_store, dest="restart", default=False, action="callback", help="Restart tests from where it failed (implies exitfirst) " "(only make sense if tests previously ran with exitfirst only)") parser.add_option('--color', callback=rebuild_cmdline, action="callback", help="colorize tracebacks") parser.add_option('-s', '--skip', # XXX: I wish I could use the callback action but it # doesn't seem to be able to get the value # associated to the option action="store", dest="skipped", default=None, help="test names matching this name will be skipped " "to skip several patterns, use commas") parser.add_option('-q', '--quiet', callback=rebuild_cmdline, action="callback", help="Minimal output") parser.add_option('-P', '--profile', default=None, dest='profile', help="Profile execution and store data in the given file") parser.add_option('-m', '--match', default=None, dest='tags_pattern', help="only execute test whose tag match the current pattern") try: from logilab.devtools.lib.coverage import Coverage parser.add_option('--coverage', dest="coverage", default=False, action="store_true", help="run tests with pycoverage (conflicts with --pdb)") except ImportError: pass if DJANGO_FOUND: parser.add_option('-J', '--django', dest='django', default=False, action="store_true", help='use pytest for django test cases') return parser def parseargs(parser): """Parse the command line and return (options processed), (options to pass to unittest_main()), (explicitfile or None). """ # parse the command line options, args = parser.parse_args() if options.pdb and getattr(options, 'coverage', False): parser.error("'pdb' and 'coverage' options are exclusive") filenames = [arg for arg in args if arg.endswith('.py')] if filenames: if len(filenames) > 1: parser.error("only one filename is acceptable") explicitfile = filenames[0] args.remove(explicitfile) else: explicitfile = None # someone wants DBC testlib.ENABLE_DBC = options.dbc newargs = parser.newargs if options.skipped: newargs.extend(['--skip', options.skipped]) # restart implies exitfirst if options.restart: options.exitfirst = True # append additional args to the new sys.argv and let unittest_main # do the rest newargs += args return options, explicitfile def run(): parser = make_parser() rootdir, testercls = project_root(parser) options, explicitfile = parseargs(parser) # mock a new command line sys.argv[1:] = parser.newargs covermode = getattr(options, 'coverage', None) cvg = None if not '' in sys.path: sys.path.insert(0, '') if covermode: # control_import_coverage(rootdir) from logilab.devtools.lib.coverage import Coverage cvg = Coverage([rootdir]) cvg.erase() cvg.start() if DJANGO_FOUND and options.django: tester = DjangoTester(cvg, options) else: tester = testercls(cvg, options) if explicitfile: cmd, args = tester.testfile, (explicitfile,) elif options.testdir: cmd, args = tester.testonedir, (options.testdir, options.exitfirst) else: cmd, args = tester.testall, (options.exitfirst,) try: try: if options.profile: import hotshot prof = hotshot.Profile(options.profile) prof.runcall(cmd, *args) prof.close() print 'profile data saved in', options.profile else: cmd(*args) except SystemExit: raise except: import traceback traceback.print_exc() finally: if covermode: cvg.stop() cvg.save() tester.show_report() if covermode: print 'coverage information stored, use it with pycoverage -ra' sys.exit(tester.errcode) class SkipAwareTestProgram(unittest.TestProgram): # XXX: don't try to stay close to unittest.py, use optparse USAGE = """\ Usage: %(progName)s [options] [test] [...] Options: -h, --help Show this message -v, --verbose Verbose output -i, --pdb Enable test failure inspection -x, --exitfirst Exit on first failure -s, --skip skip test matching this pattern (no regexp for now) -q, --quiet Minimal output --color colorize tracebacks -m, --match Run only test whose tag match this pattern -P, --profile FILE: Run the tests using cProfile and saving results in FILE Examples: %(progName)s - run default set of tests %(progName)s MyTestSuite - run suite 'MyTestSuite' %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething %(progName)s MyTestCase - run all 'test*' test methods in MyTestCase """ def __init__(self, module='__main__', defaultTest=None, batchmode=False, cvg=None, options=None, outstream=sys.stderr): self.batchmode = batchmode self.cvg = cvg self.options = options self.outstream = outstream super(SkipAwareTestProgram, self).__init__( module=module, defaultTest=defaultTest, testLoader=NonStrictTestLoader()) def parseArgs(self, argv): self.pdbmode = False self.exitfirst = False self.skipped_patterns = [] self.test_pattern = None self.tags_pattern = None self.colorize = False self.profile_name = None import getopt try: options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:', ['help', 'verbose', 'quiet', 'pdb', 'exitfirst', 'restart', 'skip=', 'color', 'match=', 'profile=']) for opt, value in options: if opt in ('-h', '-H', '--help'): self.usageExit() if opt in ('-i', '--pdb'): self.pdbmode = True if opt in ('-x', '--exitfirst'): self.exitfirst = True if opt in ('-r', '--restart'): self.restart = True self.exitfirst = True if opt in ('-q', '--quiet'): self.verbosity = 0 if opt in ('-v', '--verbose'): self.verbosity = 2 if opt in ('-s', '--skip'): self.skipped_patterns = [pat.strip() for pat in value.split(', ')] if opt == '--color': self.colorize = True if opt in ('-m', '--match'): #self.tags_pattern = value self.options["tag_pattern"] = value if opt in ('-P', '--profile'): self.profile_name = value self.testLoader.skipped_patterns = self.skipped_patterns if len(args) == 0 and self.defaultTest is None: suitefunc = getattr(self.module, 'suite', None) if isinstance(suitefunc, (types.FunctionType, types.MethodType)): self.test = self.module.suite() else: self.test = self.testLoader.loadTestsFromModule(self.module) return if len(args) > 0: self.test_pattern = args[0] self.testNames = args else: self.testNames = (self.defaultTest, ) self.createTests() except getopt.error, msg: self.usageExit(msg) def runTests(self): if self.profile_name: import cProfile cProfile.runctx('self._runTests()', globals(), locals(), self.profile_name ) else: return self._runTests() def _runTests(self): self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity, stream=self.outstream, exitfirst=self.exitfirst, pdbmode=self.pdbmode, cvg=self.cvg, test_pattern=self.test_pattern, skipped_patterns=self.skipped_patterns, colorize=self.colorize, batchmode=self.batchmode, options=self.options) def removeSucceededTests(obj, succTests): """ Recursive function that removes succTests from a TestSuite or TestCase """ if isinstance(obj, unittest.TestSuite): removeSucceededTests(obj._tests, succTests) if isinstance(obj, list): for el in obj[:]: if isinstance(el, unittest.TestSuite): removeSucceededTests(el, succTests) elif isinstance(el, unittest.TestCase): descr = '.'.join((el.__class__.__module__, el.__class__.__name__, el._testMethodName)) if descr in succTests: obj.remove(el) # take care, self.options may be None if getattr(self.options, 'restart', False): # retrieve succeeded tests from FILE_RESTART try: restartfile = open(FILE_RESTART, 'r') try: succeededtests = list(elem.rstrip('\n\r') for elem in restartfile.readlines()) removeSucceededTests(self.test, succeededtests) finally: restartfile.close() except Exception, ex: raise Exception("Error while reading succeeded tests into %s: %s" % (osp.join(os.getcwd(), FILE_RESTART), ex)) result = self.testRunner.run(self.test) # help garbage collection: we want TestSuite, which hold refs to every # executed TestCase, to be gc'ed del self.test if getattr(result, "debuggers", None) and \ getattr(self, "pdbmode", None): start_interactive_mode(result) if not getattr(self, "batchmode", None): sys.exit(not result.wasSuccessful()) self.result = result class SkipAwareTextTestRunner(unittest.TextTestRunner): def __init__(self, stream=sys.stderr, verbosity=1, exitfirst=False, pdbmode=False, cvg=None, test_pattern=None, skipped_patterns=(), colorize=False, batchmode=False, options=None): super(SkipAwareTextTestRunner, self).__init__(stream=stream, verbosity=verbosity) self.exitfirst = exitfirst self.pdbmode = pdbmode self.cvg = cvg self.test_pattern = test_pattern self.skipped_patterns = skipped_patterns self.colorize = colorize self.batchmode = batchmode self.options = options def _this_is_skipped(self, testedname): return any([(pat in testedname) for pat in self.skipped_patterns]) def _runcondition(self, test, skipgenerator=True): if isinstance(test, testlib.InnerTest): testname = test.name else: if isinstance(test, testlib.TestCase): meth = test._get_test_method() func = meth.im_func testname = '%s.%s' % (meth.im_class.__name__, func.__name__) elif isinstance(test, types.FunctionType): func = test testname = func.__name__ elif isinstance(test, types.MethodType): func = test.im_func testname = '%s.%s' % (test.im_class.__name__, func.__name__) else: return True # Not sure when this happens if testlib.is_generator(test) and skipgenerator: return self.does_match_tags(test) # Let inner tests decide at run time if self._this_is_skipped(testname): return False # this was explicitly skipped if self.test_pattern is not None: try: classpattern, testpattern = self.test_pattern.split('.') klass, name = testname.split('.') if classpattern not in klass or testpattern not in name: return False except ValueError: if self.test_pattern not in testname: return False return self.does_match_tags(test) def does_match_tags(self, test): if self.options is not None: tags_pattern = getattr(self.options, 'tags_pattern', None) if tags_pattern is not None: tags = getattr(test, 'tags', testlib.Tags()) if tags.inherit and isinstance(test, types.MethodType): tags = tags | getattr(test.im_class, 'tags', testlib.Tags()) return tags.match(tags_pattern) return True # no pattern def _makeResult(self): return testlib.SkipAwareTestResult(self.stream, self.descriptions, self.verbosity, self.exitfirst, self.pdbmode, self.cvg, self.colorize) def run(self, test): "Run the given test case or test suite." result = self._makeResult() startTime = time() test(result, runcondition=self._runcondition, options=self.options) stopTime = time() timeTaken = stopTime - startTime result.printErrors() if not self.batchmode: self.stream.writeln(result.separator2) run = result.testsRun self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() if not result.wasSuccessful(): if self.colorize: self.stream.write(textutils.colorize_ansi("FAILED", color='red')) else: self.stream.write("FAILED") else: if self.colorize: self.stream.write(textutils.colorize_ansi("OK", color='green')) else: self.stream.write("OK") failed, errored, skipped = map(len, (result.failures, result.errors, result.skipped)) det_results = [] for name, value in (("failures", result.failures), ("errors",result.errors), ("skipped", result.skipped)): if value: det_results.append("%s=%i" % (name, len(value))) if det_results: self.stream.write(" (") self.stream.write(', '.join(det_results)) self.stream.write(")") self.stream.writeln("") return result class NonStrictTestLoader(unittest.TestLoader): """ Overrides default testloader to be able to omit classname when specifying tests to run on command line. For example, if the file test_foo.py contains :: class FooTC(TestCase): def test_foo1(self): # ... def test_foo2(self): # ... def test_bar1(self): # ... class BarTC(TestCase): def test_bar2(self): # ... 'python test_foo.py' will run the 3 tests in FooTC 'python test_foo.py FooTC' will run the 3 tests in FooTC 'python test_foo.py test_foo' will run test_foo1 and test_foo2 'python test_foo.py test_foo1' will run test_foo1 'python test_foo.py test_bar' will run FooTC.test_bar1 and BarTC.test_bar2 """ def __init__(self): self.skipped_patterns = () # some magic here to accept empty list by extending # and to provide callable capability def loadTestsFromNames(self, names, module=None): suites = [] for name in names: suites.extend(self.loadTestsFromName(name, module)) return self.suiteClass(suites) def _collect_tests(self, module): tests = {} for obj in vars(module).values(): if (issubclass(type(obj), (types.ClassType, type)) and issubclass(obj, unittest.TestCase)): classname = obj.__name__ if classname[0] == '_' or self._this_is_skipped(classname): continue methodnames = [] # obj is a TestCase class for attrname in dir(obj): if attrname.startswith(self.testMethodPrefix): attr = getattr(obj, attrname) if callable(attr): methodnames.append(attrname) # keep track of class (obj) for convenience tests[classname] = (obj, methodnames) return tests def loadTestsFromSuite(self, module, suitename): try: suite = getattr(module, suitename)() except AttributeError: return [] assert hasattr(suite, '_tests'), \ "%s.%s is not a valid TestSuite" % (module.__name__, suitename) # python2.3 does not implement __iter__ on suites, we need to return # _tests explicitly return suite._tests def loadTestsFromName(self, name, module=None): parts = name.split('.') if module is None or len(parts) > 2: # let the base class do its job here return [super(NonStrictTestLoader, self).loadTestsFromName(name)] tests = self._collect_tests(module) collected = [] if len(parts) == 1: pattern = parts[0] if callable(getattr(module, pattern, None) ) and pattern not in tests: # consider it as a suite return self.loadTestsFromSuite(module, pattern) if pattern in tests: # case python unittest_foo.py MyTestTC klass, methodnames = tests[pattern] for methodname in methodnames: collected = [klass(methodname) for methodname in methodnames] else: # case python unittest_foo.py something for klass, methodnames in tests.values(): # skip methodname if matched by skipped_patterns for skip_pattern in self.skipped_patterns: methodnames = [methodname for methodname in methodnames if skip_pattern not in methodname] collected += [klass(methodname) for methodname in methodnames if pattern in methodname] elif len(parts) == 2: # case "MyClass.test_1" classname, pattern = parts klass, methodnames = tests.get(classname, (None, [])) for methodname in methodnames: collected = [klass(methodname) for methodname in methodnames if pattern in methodname] return collected def _this_is_skipped(self, testedname): return any([(pat in testedname) for pat in self.skipped_patterns]) def getTestCaseNames(self, testCaseClass): """Return a sorted sequence of method names found within testCaseClass """ is_skipped = self._this_is_skipped classname = testCaseClass.__name__ if classname[0] == '_' or is_skipped(classname): return [] testnames = super(NonStrictTestLoader, self).getTestCaseNames( testCaseClass) return [testname for testname in testnames if not is_skipped(testname)] def _ts_run(self, result, runcondition=None, options=None): self._wrapped_run(result,runcondition=runcondition, options=options) self._tearDownPreviousClass(None, result) self._handleModuleTearDown(result) return result def _ts_wrapped_run(self, result, debug=False, runcondition=None, options=None): for test in self: if result.shouldStop: break if unittest_suite._isnotsuite(test): self._tearDownPreviousClass(test, result) self._handleModuleFixture(test, result) self._handleClassSetUp(test, result) result._previousTestClass = test.__class__ if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue if hasattr(test, '_wrapped_run'): try: test._wrapped_run(result, debug, runcondition=runcondition, options=options) except TypeError: test._wrapped_run(result, debug) elif not debug: try: test(result, runcondition, options) except TypeError: test(result) else: test.debug() def enable_dbc(*args): """ Without arguments, return True if contracts can be enabled and should be enabled (see option -d), return False otherwise. With arguments, return False if contracts can't or shouldn't be enabled, otherwise weave ContractAspect with items passed as arguments. """ if not ENABLE_DBC: return False try: from logilab.aspects.weaver import weaver from logilab.aspects.lib.contracts import ContractAspect except ImportError: sys.stderr.write( 'Warning: logilab.aspects is not available. Contracts disabled.') return False for arg in args: weaver.weave_module(arg, ContractAspect) return True # monkeypatch unittest and doctest (ouch !) unittest._TextTestResult = testlib.SkipAwareTestResult unittest.TextTestRunner = SkipAwareTextTestRunner unittest.TestLoader = NonStrictTestLoader unittest.TestProgram = SkipAwareTestProgram if sys.version_info >= (2, 4): doctest.DocTestCase.__bases__ = (testlib.TestCase,) # XXX check python2.6 compatibility #doctest.DocTestCase._cleanups = [] #doctest.DocTestCase._out = [] else: unittest.FunctionTestCase.__bases__ = (testlib.TestCase,) unittest.TestSuite.run = _ts_run unittest.TestSuite._wrapped_run = _ts_wrapped_run
bsd-3-clause
tudorian/eden
modules/tests/web2unittest.py
20
42947
# -*- coding: utf-8 -*- """ Sahana Eden Test Framework @copyright: 2011-2012 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import datetime import sys import time import unittest from unittest.case import SkipTest, _ExpectedFailure, _UnexpectedSuccess from dateutil.relativedelta import relativedelta from selenium.common.exceptions import NoSuchElementException, TimeoutException from selenium.webdriver.support.ui import Select, WebDriverWait from gluon import current from gluon.storage import Storage from s3.s3query import FS from s3.s3utils import s3_unicode from s3.s3widgets import * from tests.core import * current.data = Storage() current.data["auth"] = { "normal" : { "email": "test@example.com", "password": "eden", "first_name": "Test", "last_name": "User", }, "admin" : { "email": "admin@example.com", "password": "testing", "first_name": "Admin", "last_name": "User", }, } # ============================================================================= class Web2UnitTest(unittest.TestCase): """ Web2Py Unit Test """ def __init__(self, methodName="runTest"): unittest.TestCase.__init__(self, methodName) #current should always be looked up from gluon.current #self.current = current self.config = current.test_config self.browser = self.config.browser self.app = current.request.application self.url = self.config.url self.user = "admin" self.stdout = sys.stdout self.stderr = sys.stderr # ------------------------------------------------------------------------- def s3_debug(self, message, value=None): """ Provide an easy, safe, systematic way of handling Debug output (stdout/stderr are normally redirected within tests) """ # Restore stderr stderr_redirector = sys.stderr sys.stderr = self._resultForDoCleanups.stderr0 output = s3_unicode(message) if value: output = "%s: %s" % (output, s3_unicode(value)) try: print >> sys.stderr, output except: # Unicode string print >> sys.stderr, "Debug crashed" # Redirect stderr back again sys.stderr = stderr_redirector # ------------------------------------------------------------------------- @staticmethod def today(): return datetime.date.today().strftime("%Y-%m-%d") # ------------------------------------------------------------------------- @staticmethod def now(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") # ------------------------------------------------------------------------- @staticmethod def now_1_day(): return (datetime.datetime.now() + relativedelta( days = +1 )).strftime("%Y-%m-%d %H:%M:%S") # ------------------------------------------------------------------------- @staticmethod def now_1_week(): return (datetime.date.today() + relativedelta( weeks = +1 )).strftime("%Y-%m-%d %H:%M:%S") # ------------------------------------------------------------------------- def reporter(self, msg, verbose_level = 1): if self.config.verbose >= verbose_level: print >> sys.stderr, msg # ============================================================================= class SeleniumUnitTest(Web2UnitTest): """ Selenium Unit Test """ # ------------------------------------------------------------------------- def login(self, account=None, nexturl=None): if account is None: account = self.user config = current.test_config browser = config.browser data = current.data["auth"] if account in data: email = data[account]["email"] password = data[account]["password"] elif isinstance(account, (tuple, list)): email = account[0] password = account[1] else: raise NotImplementedError # If the user is already logged in no need to do anything so return # We'd like to be able to access current.auth, however these are different threads #user = current.auth.user #if user and user.email == email: # # If the URL is different then move to the new URL # if not browser.current_url.endswith(nexturl): # url = "%s/%s" % (config.url, nexturl) # browser.get(url) # return # auth_menu_email is used by the default template # username fright is used by the IFRC template if browser.page_source is not None and \ (browser.page_source.find("<a id=\"auth_menu_email\">%s</a>" % email) > 0 or browser.page_source.find("<div class=\"username fright\">%s</div>" % email) > 0): # If the URL is different then move to the new URL if not browser.current_url.endswith(nexturl): url = "%s/%s" % (config.url, nexturl) browser.get(url) return if nexturl: url = "%s/default/user/login?_next=/%s/%s" % \ (config.url, current.request.application, nexturl) else: url = "%s/default/user/login" % config.url browser.get(url) # Login elem = browser.find_element_by_id("auth_user_email") elem.send_keys(email) elem = browser.find_element_by_id("auth_user_password") elem.send_keys(password) elem = browser.find_element_by_xpath("//input[contains(@value,'Login')]") elem.click() # Check the result try: elem = self.get_confirmation(3) except NoSuchElementException: self.reporter("Login failed.. so registering account") # Try registering self.register(account) else: self.reporter(elem.text) return True # ------------------------------------------------------------------------- def logout(self): """ Logout """ config = current.test_config browser = config.browser url = "%s/default/user/login" % config.url browser.get(url) browser.find_element_by_id("auth_menu_email").click() try: elem = browser.find_element_by_id("auth_menu_logout") except NoSuchElementException: self.reporter("Logged-out already") return True elem.click() # Check the result try: elem = self.get_confirmation(3) except NoSuchElementException: assert 0, "Logout unsuccesful" else: self.reporter(elem.text) return True # ----------------------------------------------------------------------------- def register(self, account="normal"): """ Register on the system """ config = current.test_config browser = config.browser data = current.data["auth"] if account in data: email = data[account]["email"] first_name = data[account]["first_name"] last_name = data[account]["last_name"] password = data[account]["password"] else: raise NotImplementedError # Load homepage homepage() # Register user elem = browser.find_element_by_id("auth_user_first_name") elem.send_keys(first_name) elem = browser.find_element_by_id("auth_user_last_name") elem.send_keys(last_name) elem = browser.find_element_by_id("auth_user_email") elem.send_keys(email) elem = browser.find_element_by_id("auth_user_password") elem.send_keys(password) elem = browser.find_element_by_id("auth_user_password_two") elem.send_keys(password) elem = browser.find_element_by_xpath("//input[contains(@value,'Register')]") elem.click() # Check the result try: elem = self.get_confirmation(3) except NoSuchElementException: assert 0, "Registration unsuccesful" else: self.reporter(elem.text) return True # ------------------------------------------------------------------------- def get_confirmation(self, wait_time): """ @param wait_time: The time in seconds to wait for confirmation to appear. """ return WebDriverWait(self.browser, wait_time).until( lambda driver: driver.find_element_by_xpath("//div[@class='alert alert-success']")) # ------------------------------------------------------------------------- def getRows (self, table, data, dbcallback, components): """ Get a copy of all the records that match the data passed in this can be modified by the callback function """ def add_component_to_query(self, table, query, details, components): """ If possible add the component to the query. This uses the parameter components which is a map keyed on the name of the html control, which will give a list of three elements. The name of the related table, the key field and the field where the data will be added. For example see CreatePerson "sub_person_details_marital_status":["pr_person_details", "person_id", "marital_status" ] If the field is not in the components then a warning is written see http://eden.sahanafoundation.org/ticket/1475 """ if details[0] in components: component = components[details[0]] ctable = current.s3db[component[0]] query = query & (table.id == ctable[component[1]]) query = query & (ctable[component[2]] == details[1]) else: self.reporter("WARNING: Unable to check field %s" % details[0]) return query # Commit to start a new transaction: if MySQL gets straight the # same query straight within the same transaction, it wouldn't # even look at the table, but just return the cached response, so # the actual test condition (e.g. in create) gets optimized away... current.db.commit() query = (table.deleted != True) for details in data: if details[0][0:3] == "sub": query = add_component_to_query(self, table, query, details, components) else: query = query & (table[details[0]] == details[1]) rows = current.db(query).select(orderby=~table.id) if rows == None: rows = [] if dbcallback: rows = dbcallback(table, data, rows) return rows # ------------------------------------------------------------------------- def search(self, form_type, results_expected, fields, row_count, **kwargs): ''' Generic method to test the validity of search results. @param form_type: This can either be search.simple_form or search.advanced_form @param results_expected: Are results expected? @param fields: See the `fields` function. For search.simple_form, an empty list [] can be pass. The field will be taken from s3resource. @param row_count: Expected row count For search.simple_form, {"tablename":tablename, "key":key, "filters":[(field,value),...]} can be pass to get the resource and eventually the DB row count. Keyword arguments: These let you specify the kinds of checks to perform on the resulting datatable. Here are some of the options: 1. data - You can pass in your own function here that receives the data from the results table as an argument. Return true if your check is successful and false otherwise. This directly corresponds to the 'dt_data' function. 2. manual_check - You can pass in your own function here, and it'll receive this instance as an argument. This can be used for all other kinds of checks. 3. match_row - You can use this to match a series of values to a row in the result data table. The first value must be the index of the row to match against. 4. match_column - You can use this to match a series of values to a column in the result data table. The first value must be the index of the row to match against. ''' current.auth.override = True if isinstance(row_count, dict) and form_type == self.search.simple_form: key = row_count["key"] resource = current.s3db.resource(row_count["tablename"]) simpleSearch = resource.search_method().simple[0] if len(fields) == 0: fields = ({"name":simpleSearch[0],"value":key},) searchFields = simpleSearch[1].field for i in xrange(len(searchFields)): if i == 0: query = (FS(searchFields[i]).like("%" + key + "%")) else: query |= (FS(searchFields[i]).like("%" + key + "%")) filters = row_count.get("filters", None) if filters is not None: for filter in filters: qfilter = (resource.table[filter[0]] == filter[1]) resource.add_filter(qfilter) resource.add_filter(query) row_count = resource.count() browser = self.browser clear_button = browser.find_elements_by_xpath("//a[text()='Clear']") if clear_button[0].is_displayed() : clear_button[0].click() else: clear_button[1].click() try: if form_type == self.search.advanced_form: link = browser.find_element_by_xpath("//a[@class='action-lnk advanced-lnk']") elif form_type == self.search.simple_form: link = browser.find_element_by_xpath("//a[@class='action-lnk simple-lnk']") except NoSuchElementException: # There might be no link if one of the forms is the only option link = None if link and link.is_displayed(): link.click() time.sleep(1) self.fill_fields(fields) if isinstance(row_count, dict) and form_type == self.search.advanced_form: resource = current.s3db.resource(row_count["tablename"]) search_list = resource.search_method().advanced for search in search_list: widget = search[1] if isinstance(widget, S3SearchOptionsWidget): values = [] elem_list = browser.find_elements_by_name(widget.attr._name) for elem in elem_list: if elem.get_attribute("checked"): values.append(int(s3_unicode(elem.get_attribute("value")))) if len(values) > 0: query = widget.query(resource,values) resource.add_filter(query) filters = row_count.get("filters", None) if filters is not None: for filter in filters: qfilter = (resource.table[filter[0]] == filter[1]) resource.add_filter(qfilter) row_count = resource.count() browser.find_element_by_name(("simple_submit", "advanced_submit")[form_type]).click() time.sleep(1) if results_expected: self.assertFalse( browser.find_element_by_id("table-container").text == "No Records Found", "No results found, when results expected.") else: return # We"re done entering and submitting data; now we need to check if the # results produced are valid. htmlRowCount = self.dt_row_cnt()[2] successMsg = "DB row count (" + str(row_count) + ") matches the HTML datatable row count (" + str(htmlRowCount) + ")." failMsg = "DB row count (" + str(row_count) + ") does not match the HTML datatable row count (" + str(htmlRowCount) + ")." self.assertTrue(row_count == htmlRowCount, failMsg) self.reporter(successMsg) if "data" in kwargs.keys(): self.assertTrue(bool(kwargs["data"](self.dt_data())), "Data verification failed.") if "manual_check" in kwargs.keys(): self.assertTrue(bool(kwargs["manual_check"](self)), "Manual checks failed.") if "match_row" in kwargs.keys(): data = self.dt_data(row_list=(kwargs["match_row"][0])) kwargs["match_row"] = kwargs["match_row"][1:] for a, b in zip(kwargs["match_row"], data): self.assertTrue(a == b, "Row match failed.") if "match_column" in kwargs.keys(): column_index = kwargs["match_column"][0] kwargs["match_column"] = kwargs["match_column"][1:] shown_items = [dt_data_item(column=column_index, row=r) for r in xrange(1, len(kwargs["match_column"]) + 1)] for item in kwargs["match_column"]: self.assertTrue(item in shown_items) return self.dt_data() search.simple_form = 0 search.advanced_form = 1 # ------------------------------------------------------------------------- def create(self, tablename, data, success=True, dbcallback=None, components={} ): """ Generic method to create a record from the data passed in @param tablename: The table where the record belongs @param data: The data that is to be inserted @param success: The expectation that this create will succeed @param dbcallback: Used by getRows to return extra data from the database before & after the create This will return a dictionary of rows before and after the create """ browser = self.browser result = {} id_data = [] table = current.s3db[tablename] settings = current.deployment_settings date_format = str(settings.get_L10n_date_format()) datetime_format = str(settings.get_L10n_datetime_format()) # If a confirmation is shown then clear it so that it doesn't give a false positive later try: elem = self.get_confirmation(0.2) elem.click() time.sleep(1) # Give it time to dissolve except: pass # Fill in the Form for details in data: el_id = "%s_%s" % (tablename, details[0]) el_value = details[1] if len(details) >= 4: time.sleep(details[3]) if len(details) >= 3: el_type = details[2] if el_type == "checkbox": for value in el_value: self.browser.find_element_by_xpath("//label[contains(text(),'%s')]" % value).click() # @ToDo: Add value to id_data to check for create function elif el_type == "inv_widget": raw_value = self.w_inv_item_select(el_value, tablename, details[0], ) elif el_type == "supply_widget": raw_value = self.w_supply_select(el_value, tablename, details[0], ) elif el_type == "facility_widget": raw_value = self.w_facility_select(el_value, tablename, details[0], ) elif el_type == "gis_location": self.w_gis_location(el_value, details[0], ) raw_value = None else: # Embedded form fields el_id = "%s_%s" % (el_type, details[0]) el = browser.find_element_by_id(el_id) el.send_keys(el_value) raw_value = None else: # Look for autocomplete input field. el = browser.find_elements_by_id("dummy_"+el_id) if len(el) != 0: # Autocomplete input found. el = el[0] else: el = browser.find_element_by_id(el_id) class_name = el.get_attribute("class") if "generic-widget" in class_name: # Dropdown option raw_value = False options_list = el.find_elements_by_tag_name("option") # Find the Longest Word Trimmed Match that matches with el_value option = self.find_max_match(options_list, el_value) if option is None: raise NoSuchElementException("%s option could not be found in %s" % (el_value, el_id)) option.click() raw_value = option.get_attribute("value") try: raw_value = int(raw_value) except: pass # Test that we have an id that can be used in the database if el_value and el_value != "-": self.assertTrue(raw_value, "%s option cannot be found in %s" % (el_value, el_id)) elif "ui-autocomplete-input" in class_name: # Autocomplete field raw_value = self.w_autocomplete(el_value, el_id, el_value, ) elif isinstance(table[details[0]].widget, S3DateWidget): el_value_date = datetime.datetime.strptime(el_value, "%Y-%m-%d") # %H:%M:%S") el_value = el_value_date.strftime(date_format) el.clear() el.send_keys(el_value) raw_value = el_value_date elif isinstance(table[details[0]].widget, S3DateTimeWidget): el_value_datetime = datetime.datetime.strptime(el_value, "%Y-%m-%d %H:%M:%S") el_value = el_value_datetime.strftime(datetime_format) el.clear() el.send_keys(el_value) #raw_value = el_value_datetime raw_value = None # @ToDo: Fix hack to stop checking datetime field. This is because the field does not support data entry by key press # Use the raw value to check that the record was added succesfully else: # Normal text input el.clear() el.send_keys(el_value) raw_value = el_value if raw_value: id_data.append([details[0], raw_value]) result["before"] = self.getRows(table, id_data, dbcallback, components) # Submit the Form submit_btn = browser.find_element_by_css_selector("input[type='submit']") submit_btn.click() #self.wait_for_page_to_load() # Check & Report the results confirm = True try: elem = self.get_confirmation(3) self.reporter(elem.text) except (NoSuchElementException, TimeoutException): confirm = False if (confirm != success): # Do we have a validation error? try: elem_error = browser.find_element_by_xpath("//div[@class='error']") if elem_error: msg = "%s %s" % (elem_error.get_attribute("id"), elem_error.text) self.reporter(msg) except NoSuchElementException: pass if success: self.assertTrue(confirm, "Confirmation of record creation not received.\nRecord - %s" % data) else: self.assertFalse(confirm, "Unexpected confirmation of record creation received.\nRecord - %s" % data) # Database Checks result["after"] = self.getRows(table, id_data, dbcallback, components) successMsg = "Records added to database: %s" % id_data failMsg = "Records not added to database %s" % id_data if success: self.assertTrue((len(result["after"]) - len(result["before"])) == 1, failMsg) self.reporter(successMsg) else: self.assertTrue((len(result["after"]) == len(result["before"])), successMsg) self.reporter(failMsg) return result # ------------------------------------------------------------------------- def find_max_match(self, options_list, el_value): """ Finds the Longest Word Trimmed Match for selecting text in options field. @param options_list: The list of options in the options field. @param el_value: The text to be matched in the options. """ el_value_list = el_value.split() # Remove all words of length = 1 such as hyphens. el_value_list = filter(lambda x: len(x) > 1, el_value_list) # Initialise max_len as 0 and matchec_option = None. max_len = 0 matched_option = None for option in options_list: text = option.text text_list = text.split() # Remove all words of length = 1 such as hyphens. text_list = filter(lambda x: len(x) > 1, text_list) # Find intersection of el_value_list and text_list matched_list = list(set(el_value_list).intersection(text_list)) # matched_len is number of matching words for the current option. matched_len = len(matched_list) # Save the maximum matched option in matched_option. if matched_len > max_len: matched_option = option max_len = matched_len # Return the maximum matched option. return matched_option # ------------------------------------------------------------------------- def select_option(self, select_elem, option_label): if select_elem: select = Select(select_elem) try: select.select_by_visible_text(option_label) except NoSuchElementException: return False else: return True # ------------------------------------------------------------------------- def wait_for_page_to_load(self, timeout=10000): w = WebDriverWait(self.browser, timeout/1000.0) w.until(lambda browser: browser.execute_script("return document.readyState") == "complete") # ------------------------------------------------------------------------- class InvalidReportOrGroupException(Exception): pass # ------------------------------------------------------------------------- def report(self, fields, report_of, grouped_by, report_fact, *args, **kwargs): browser = self.browser show_totals = True browser.find_element_by_xpath("//a[text()='Reset all filters']").click() # Open the filter options fieldset, if not done so. filter_options = browser.find_elements_by_css_selector("#filter_options button") if filter_options[0].is_displayed(): # Click the 'Show' button filter_options[0].click() if fields: self.fill_fields(fields) # Open the report options fieldset, if not done so. report_options = browser.find_elements_by_css_selector("#report_options button") if report_options[0].is_displayed(): # Click the 'Show' button report_options[0].click() # Select the item to make a report of: rows_select = browser.find_element_by_id("report-rows") if not self.select_option(rows_select, report_of): raise self.InvalidReportOrGroupException("%s not found in 'Report of' option" % report_of) # Select the value to group by: cols_select = browser.find_element_by_id("report-cols") if not self.select_option(cols_select, grouped_by): raise self.InvalidReportOrGroupException("%s not found in 'Grouped By' option" % grouped_by) # Select the value to base the report on if report_fact: fact_select = browser.find_element_by_id("report-fact") if not self.select_option(fact_select, report_fact): raise self.InvalidReportOrGroupException("%s not found in 'Value' option" % report_fact) submit_btn = browser.find_element_by_xpath("//input[@type='submit']") submit_btn.click() time.sleep(1) # Now, check the generated report: for check in args: row = self.dt_find(check[0]) if not row: raise self.InvalidReportOrGroupException("Row with %s could not be found in the datatable" % check[0]) else: row = row[0][0] col = 1 e = browser.find_element_by_xpath(".//*[@id='datatable']/thead/tr[2]/th[1]") while True: if e.text.strip().lower() == check[1].lower(): break else: col += 1 try: e = browser.find_element_by_xpath( ".//*[@id='datatable']/thead/tr[2]/th[{0}]".format(col)) except NoSuchElementException: raise self.InvalidReportOrGroupException("Column with %s could not be found in the datatable." % check[1]) import collections if isinstance(check[2], collections.Iterable): td = browser.find_element_by_xpath( ".//*[@id='datatable']/tbody/tr[{0}]/td[{1}]".format(row, col)) shown_items = [item.text.lower() for item in td.find_elements_by_tag_name("li")] for item in check[2]: self.assertTrue(item.lower() in shown_items, u"Report check failed.") else: self.assertTrue(str(dt_data_item(row, col)) == str(check[2]), "Report check failed.") if 'row_count' in kwargs: self.assertEqual(kwargs['row_count'], len(browser.find_elements_by_xpath( "//table[@id='datatable']/tbody/tr")), "Row Count given and calculated do not match.") # ------------------------------------------------------------------------- def fill_fields(self, fields): """ Fills form fields with values. @param fields A list of dicts that specifies the fields to fill. """ browser = self.browser for field_spec in fields: value = field_spec["value"] for query_type in ("xpath", "class", "name", "id"): if query_type in field_spec.keys(): field = getattr(browser, 'find_element_by_' + query_type) field = field(field_spec[query_type]) if ("label" in field_spec) and ("name" in field_spec): splitted_label = field_spec["label"].split() xpath = "//*[contains(@for,'{name}') ".format(**field_spec) for word in splitted_label: xpath += "and contains(text(), '" + word + "') " xpath = xpath + "]" field = browser.find_element_by_xpath(xpath) elif "label" in field_spec: xpath = "//*[ " splitted_label = field_spec["label"].split() for word in splitted_label[0 : -1]: xpath += "contains(text(), '" + word + "') and " xpath = xpath + "contains(text(), '" + splitted_label[-1] + "')]" field = browser.find_element_by_xpath(xpath) if isinstance(value, basestring): # Text inputs field.send_keys(value) elif isinstance(value, bool) and value: # Checkboxes and radios field.click() # ------------------------------------------------------------------------- def dt_filter(self, search_string = " ", forceClear = True, quiet = True): return dt_filter(self.reporter, search_string, forceClear, quiet) # ------------------------------------------------------------------------- def dt_row_cnt(self, check = (), quiet = True): return dt_row_cnt(self.reporter,check, quiet, self) # ------------------------------------------------------------------------- def dt_data(self, row_list = None, add_header = False): return dt_data(row_list, add_header) # ------------------------------------------------------------------------- def dt_data_item(self, row = 1, column = 1, tableID = "datatable", ): return dt_data_item(row, column, tableID) # ------------------------------------------------------------------------- def dt_find(self, search = "", row = None, column = None, cellList = None, tableID = "datatable", first = False, ): return dt_find(search, row, column, cellList, tableID, first) # ------------------------------------------------------------------------- def dt_links(self, row = 1, tableID = "datatable", quiet = True ): return dt_links(self.reporter, row, tableID, quiet) # ------------------------------------------------------------------------- def dt_action(self, row = 1, action = None, column = 1, tableID = "datatable", ): return dt_action(row, action, column, tableID) # ------------------------------------------------------------------------- def w_autocomplete(self, value, autocomplete, needle = None, quiet = True, ): config = current.test_config browser = config.browser autocomplete_id = "dummy_%s" % autocomplete throbber_id = "dummy_%s_throbber" % autocomplete if needle == None: needle = value elem = browser.find_element_by_id(autocomplete_id) elem.clear() elem.send_keys(value) # Give time for the throbber to appear time.sleep(1) # Now wait for throbber to close giveup = 0.0 sleeptime = 0.2 while browser.find_element_by_id(throbber_id).is_displayed(): time.sleep(sleeptime) giveup += sleeptime if giveup > 60: return False # Throbber has closed and data was found, return for i in range(10): # For each autocomplete on the form the menu will have an id starting from 1 autocomplete_xpath = "//ul[contains(@class,'ui-autocomplete')]" results_xpath = "/li[@class='ui-menu-item']/a" autocomplete_results = browser.find_elements_by_xpath(autocomplete_xpath + results_xpath) for j in range(len(autocomplete_results)): # If the value is in the result - might not be a match as AC may be a represent if value in autocomplete_results[j].text: autocomplete_results[j].click() time.sleep(3) db_id = browser.find_element_by_id(autocomplete) id = db_id.get_attribute("value") if id: return int(id) else: return False time.sleep(sleeptime) # ------------------------------------------------------------------------- def w_inv_item_select(self, item_repr, tablename, field, quiet = True, ): config = current.test_config browser = config.browser el_id = "%s_%s" % (tablename, field) el = browser.find_element_by_id(el_id) raw_value = None options_list = el.find_elements_by_tag_name("option") # Find the Longest Word Trimmed Match that matches with item_repr option = self.find_max_match(options_list, item_repr) if option is None: raise NoSuchElementException("%s option could not be found in %s" % (item_repr, el_id)) option.click() raw_value = int(option.get_attribute("value")) # Now wait for the pack_item to be populated el_id = "%s_%s" % (tablename, "item_pack_id") _autocomple_finish(el_id, browser) return raw_value # ------------------------------------------------------------------------- def w_gis_location(self, item_repr, field, quiet = True, ): config = current.test_config browser = config.browser if field == "L0": el_id = "gis_location_%s" % field el = browser.find_element_by_id(el_id) options_list = el.find_elements_by_tag_name("option") # Find the Longest Word Trimmed Match that matches with item_repr option = self.find_max_match(options_list, item_repr) if option is None: raise NoSuchElementException("%s option could not be found in %s" % (item_repr, el_id)) option.click() raw_value = int(option.get_attribute("value")) elif field[0] == "L": # @todo make this a proper autocomplete widget (select or add) el_id = "gis_location_%s_ac" % field el = browser.find_element_by_id(el_id) el.send_keys(item_repr) raw_value = None # can't get the id at the moment (see the todo) else: el_id = "gis_location_%s" % field el = browser.find_element_by_id(el_id) el.send_keys(item_repr) raw_value = item_repr return raw_value # ------------------------------------------------------------------------- def w_supply_select(self, item_repr, tablename, field, quiet = True, ): el_id = "%s_%s" % (tablename, field) raw_value = self.w_autocomplete(item_repr, el_id) # Now wait for the pack_item to be populated browser = current.test_config.browser el_id = "%s_%s" % (tablename, "item_pack_id") _autocomple_finish(el_id, browser) return raw_value # ------------------------------------------------------------------------- def w_facility_select(self, org_repr, tablename, field, quiet = True, ): el_id = "%s_%s" % (tablename, field) raw_value = self.w_autocomplete(item_repr, el_id) # Now wait for the pack_item to be populated browser = current.test_config.browser el_id = "%s_%s" % (tablename, "site_id") _autocomple_finish(el_id, browser) return raw_value # ============================================================================= def _autocomple_finish(el_id, browser): """ Helper function """ giveup = 0.0 sleeptime = 0.2 el = browser.find_element_by_id(el_id) while giveup < 60: try: if el.find_elements_by_tag_name("option")[0].text != "": return except: # StaleElementReferenceException print "StaleElementReferenceException %s" % giveup el = browser.find_element_by_id(el_id) # The pack drop down hasn't been populated yet so sleep time.sleep(sleeptime) giveup += sleeptime # END =========================================================================
mit
xiangel/hue
desktop/core/ext-py/boto-2.38.0/boto/sdb/connection.py
153
26088
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import xml.sax import threading import boto from boto import handler from boto.connection import AWSQueryConnection from boto.sdb.domain import Domain, DomainMetaData from boto.sdb.item import Item from boto.sdb.regioninfo import SDBRegionInfo from boto.exception import SDBResponseError class ItemThread(threading.Thread): """ A threaded :class:`Item <boto.sdb.item.Item>` retriever utility class. Retrieved :class:`Item <boto.sdb.item.Item>` objects are stored in the ``items`` instance variable after :py:meth:`run() <run>` is called. .. tip:: The item retrieval will not start until the :func:`run() <boto.sdb.connection.ItemThread.run>` method is called. """ def __init__(self, name, domain_name, item_names): """ :param str name: A thread name. Used for identification. :param str domain_name: The name of a SimpleDB :class:`Domain <boto.sdb.domain.Domain>` :type item_names: string or list of strings :param item_names: The name(s) of the items to retrieve from the specified :class:`Domain <boto.sdb.domain.Domain>`. :ivar list items: A list of items retrieved. Starts as empty list. """ super(ItemThread, self).__init__(name=name) #print 'starting %s with %d items' % (name, len(item_names)) self.domain_name = domain_name self.conn = SDBConnection() self.item_names = item_names self.items = [] def run(self): """ Start the threaded retrieval of items. Populates the ``items`` list with :class:`Item <boto.sdb.item.Item>` objects. """ for item_name in self.item_names: item = self.conn.get_attributes(self.domain_name, item_name) self.items.append(item) #boto.set_stream_logger('sdb') class SDBConnection(AWSQueryConnection): """ This class serves as a gateway to your SimpleDB region (defaults to us-east-1). Methods within allow access to SimpleDB :class:`Domain <boto.sdb.domain.Domain>` objects and their associated :class:`Item <boto.sdb.item.Item>` objects. .. tip:: While you may instantiate this class directly, it may be easier to go through :py:func:`boto.connect_sdb`. """ DefaultRegionName = 'us-east-1' DefaultRegionEndpoint = 'sdb.us-east-1.amazonaws.com' APIVersion = '2009-04-15' ResponseError = SDBResponseError def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', converter=None, security_token=None, validate_certs=True, profile_name=None): """ For any keywords that aren't documented, refer to the parent class, :py:class:`boto.connection.AWSAuthConnection`. You can avoid having to worry about these keyword arguments by instantiating these objects via :py:func:`boto.connect_sdb`. :type region: :class:`boto.sdb.regioninfo.SDBRegionInfo` :keyword region: Explicitly specify a region. Defaults to ``us-east-1`` if not specified. You may also specify the region in your ``boto.cfg``: .. code-block:: cfg [SDB] region = eu-west-1 """ if not region: region_name = boto.config.get('SDB', 'region', self.DefaultRegionName) for reg in boto.sdb.regions(): if reg.name == region_name: region = reg break self.region = region super(SDBConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token=security_token, validate_certs=validate_certs, profile_name=profile_name) self.box_usage = 0.0 self.converter = converter self.item_cls = Item def _required_auth_capability(self): return ['sdb'] def set_item_cls(self, cls): """ While the default item class is :py:class:`boto.sdb.item.Item`, this default may be overridden. Use this method to change a connection's item class. :param object cls: The new class to set as this connection's item class. See the default item class for inspiration as to what your replacement should/could look like. """ self.item_cls = cls def _build_name_value_list(self, params, attributes, replace=False, label='Attribute'): keys = sorted(attributes.keys()) i = 1 for key in keys: value = attributes[key] if isinstance(value, list): for v in value: params['%s.%d.Name' % (label, i)] = key if self.converter: v = self.converter.encode(v) params['%s.%d.Value' % (label, i)] = v if replace: params['%s.%d.Replace' % (label, i)] = 'true' i += 1 else: params['%s.%d.Name' % (label, i)] = key if self.converter: value = self.converter.encode(value) params['%s.%d.Value' % (label, i)] = value if replace: params['%s.%d.Replace' % (label, i)] = 'true' i += 1 def _build_expected_value(self, params, expected_value): params['Expected.1.Name'] = expected_value[0] if expected_value[1] is True: params['Expected.1.Exists'] = 'true' elif expected_value[1] is False: params['Expected.1.Exists'] = 'false' else: params['Expected.1.Value'] = expected_value[1] def _build_batch_list(self, params, items, replace=False): item_names = items.keys() i = 0 for item_name in item_names: params['Item.%d.ItemName' % i] = item_name j = 0 item = items[item_name] if item is not None: attr_names = item.keys() for attr_name in attr_names: value = item[attr_name] if isinstance(value, list): for v in value: if self.converter: v = self.converter.encode(v) params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name params['Item.%d.Attribute.%d.Value' % (i, j)] = v if replace: params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' j += 1 else: params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name if self.converter: value = self.converter.encode(value) params['Item.%d.Attribute.%d.Value' % (i, j)] = value if replace: params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' j += 1 i += 1 def _build_name_list(self, params, attribute_names): i = 1 attribute_names.sort() for name in attribute_names: params['Attribute.%d.Name' % i] = name i += 1 def get_usage(self): """ Returns the BoxUsage (in USD) accumulated on this specific SDBConnection instance. .. tip:: This can be out of date, and should only be treated as a rough estimate. Also note that this estimate only applies to the requests made on this specific connection instance. It is by no means an account-wide estimate. :rtype: float :return: The accumulated BoxUsage of all requests made on the connection. """ return self.box_usage def print_usage(self): """ Print the BoxUsage and approximate costs of all requests made on this specific SDBConnection instance. .. tip:: This can be out of date, and should only be treated as a rough estimate. Also note that this estimate only applies to the requests made on this specific connection instance. It is by no means an account-wide estimate. """ print('Total Usage: %f compute seconds' % self.box_usage) cost = self.box_usage * 0.14 print('Approximate Cost: $%f' % cost) def get_domain(self, domain_name, validate=True): """ Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name matches ``domain_name``. :param str domain_name: The name of the domain to retrieve :keyword bool validate: When ``True``, check to see if the domain actually exists. If ``False``, blindly return a :py:class:`Domain <boto.sdb.domain.Domain>` object with the specified name set. :raises: :py:class:`boto.exception.SDBResponseError` if ``validate`` is ``True`` and no match could be found. :rtype: :py:class:`boto.sdb.domain.Domain` :return: The requested domain """ domain = Domain(self, domain_name) if validate: self.select(domain, """select * from `%s` limit 1""" % domain_name) return domain def lookup(self, domain_name, validate=True): """ Lookup an existing SimpleDB domain. This differs from :py:meth:`get_domain` in that ``None`` is returned if ``validate`` is ``True`` and no match was found (instead of raising an exception). :param str domain_name: The name of the domain to retrieve :param bool validate: If ``True``, a ``None`` value will be returned if the specified domain can't be found. If ``False``, a :py:class:`Domain <boto.sdb.domain.Domain>` object will be dumbly returned, regardless of whether it actually exists. :rtype: :class:`boto.sdb.domain.Domain` object or ``None`` :return: The Domain object or ``None`` if the domain does not exist. """ try: domain = self.get_domain(domain_name, validate) except: domain = None return domain def get_all_domains(self, max_domains=None, next_token=None): """ Returns a :py:class:`boto.resultset.ResultSet` containing all :py:class:`boto.sdb.domain.Domain` objects associated with this connection's Access Key ID. :keyword int max_domains: Limit the returned :py:class:`ResultSet <boto.resultset.ResultSet>` to the specified number of members. :keyword str next_token: A token string that was returned in an earlier call to this method as the ``next_token`` attribute on the returned :py:class:`ResultSet <boto.resultset.ResultSet>` object. This attribute is set if there are more than Domains than the value specified in the ``max_domains`` keyword. Pass the ``next_token`` value from you earlier query in this keyword to get the next 'page' of domains. """ params = {} if max_domains: params['MaxNumberOfDomains'] = max_domains if next_token: params['NextToken'] = next_token return self.get_list('ListDomains', params, [('DomainName', Domain)]) def create_domain(self, domain_name): """ Create a SimpleDB domain. :type domain_name: string :param domain_name: The name of the new domain :rtype: :class:`boto.sdb.domain.Domain` object :return: The newly created domain """ params = {'DomainName': domain_name} d = self.get_object('CreateDomain', params, Domain) d.name = domain_name return d def get_domain_and_name(self, domain_or_name): """ Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a ``tuple`` with the following members (in order): * In instance of :class:`boto.sdb.domain.Domain` for the requested domain * The domain's name as a ``str`` :type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain` :param domain_or_name: The domain or domain name to get the domain and name for. :raises: :class:`boto.exception.SDBResponseError` when an invalid domain name is specified. :rtype: tuple :return: A ``tuple`` with contents outlined as per above. """ if (isinstance(domain_or_name, Domain)): return (domain_or_name, domain_or_name.name) else: return (self.get_domain(domain_or_name), domain_or_name) def delete_domain(self, domain_or_name): """ Delete a SimpleDB domain. .. caution:: This will delete the domain and all items within the domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name} return self.get_status('DeleteDomain', params) def domain_metadata(self, domain_or_name): """ Get the Metadata for a SimpleDB domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :rtype: :class:`boto.sdb.domain.DomainMetaData` object :return: The newly created domain metadata object """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name} d = self.get_object('DomainMetadata', params, DomainMetaData) d.domain = domain return d def put_attributes(self, domain_or_name, item_name, attributes, replace=True, expected_value=None): """ Store attributes for a given item in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type item_name: string :param item_name: The name of the item whose attributes are being stored. :type attribute_names: dict or dict-like object :param attribute_names: The name/value pairs to store as attributes :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] which will simply check for the existence (True) or non-existence (False) of the attribute. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name, 'ItemName': item_name} self._build_name_value_list(params, attributes, replace) if expected_value: self._build_expected_value(params, expected_value) return self.get_status('PutAttributes', params) def batch_put_attributes(self, domain_or_name, items, replace=True): """ Store attributes for multiple items in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are themselves dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name} self._build_batch_list(params, items, replace) return self.get_status('BatchPutAttributes', params, verb='POST') def get_attributes(self, domain_or_name, item_name, attribute_names=None, consistent_read=False, item=None): """ Retrieve attributes for a given item in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type item_name: string :param item_name: The name of the item whose attributes are being retrieved. :type attribute_names: string or list of strings :param attribute_names: An attribute name or list of attribute names. This parameter is optional. If not supplied, all attributes will be retrieved for the item. :type consistent_read: bool :param consistent_read: When set to true, ensures that the most recent data is returned. :type item: :class:`boto.sdb.item.Item` :keyword item: Instead of instantiating a new Item object, you may specify one to update. :rtype: :class:`boto.sdb.item.Item` :return: An Item with the requested attribute name/values set on it """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name, 'ItemName': item_name} if consistent_read: params['ConsistentRead'] = 'true' if attribute_names: if not isinstance(attribute_names, list): attribute_names = [attribute_names] self.build_list_params(params, attribute_names, 'AttributeName') response = self.make_request('GetAttributes', params) body = response.read() if response.status == 200: if item is None: item = self.item_cls(domain, item_name) h = handler.XmlHandler(item, self) xml.sax.parseString(body, h) return item else: raise SDBResponseError(response.status, response.reason, body) def delete_attributes(self, domain_or_name, item_name, attr_names=None, expected_value=None): """ Delete attributes from a given item in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type item_name: string :param item_name: The name of the item whose attributes are being deleted. :type attributes: dict, list or :class:`boto.sdb.item.Item` :param attributes: Either a list containing attribute names which will cause all values associated with that attribute name to be deleted or a dict or Item containing the attribute names and keys and list of values to delete as the value. If no value is supplied, all attribute name/values for the item will be deleted. :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] which will simply check for the existence (True) or non-existence (False) of the attribute. :rtype: bool :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name, 'ItemName': item_name} if attr_names: if isinstance(attr_names, list): self._build_name_list(params, attr_names) elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls): self._build_name_value_list(params, attr_names) if expected_value: self._build_expected_value(params, expected_value) return self.get_status('DeleteAttributes', params) def batch_delete_attributes(self, domain_or_name, items): """ Delete multiple items in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are either: * dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. The attribute name/value pairs will only be deleted if they match the name/value pairs passed in. * None which means that all attributes associated with the item should be deleted. :return: True if successful """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name} self._build_batch_list(params, items, False) return self.get_status('BatchDeleteAttributes', params, verb='POST') def select(self, domain_or_name, query='', next_token=None, consistent_read=False): """ Returns a set of Attributes for item names within domain_name that match the query. The query must be expressed in using the SELECT style syntax rather than the original SimpleDB query language. Even though the select request does not require a domain object, a domain object must be passed into this method so the Item objects returned can point to the appropriate domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object :param domain_or_name: Either the name of a domain or a Domain object :type query: string :param query: The SimpleDB query to be performed. :type consistent_read: bool :param consistent_read: When set to true, ensures that the most recent data is returned. :rtype: ResultSet :return: An iterator containing the results. """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'SelectExpression': query} if consistent_read: params['ConsistentRead'] = 'true' if next_token: params['NextToken'] = next_token try: return self.get_list('Select', params, [('Item', self.item_cls)], parent=domain) except SDBResponseError as e: e.body = "Query: %s\n%s" % (query, e.body) raise e
apache-2.0
mlperf/training_results_v0.6
Google/benchmarks/ssd/implementations/tpu-v3-1024-ssd/object_detection/tf_example_decoder.py
12
8485
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tensorflow Example proto decoder for object detection. A decoder to decode string tensors containing serialized tensorflow.Example protos for object detection. """ import tensorflow as tf slim_example_decoder = tf.contrib.slim.tfexample_decoder class TfExampleDecoder(object): """Tensorflow Example proto decoder.""" def __init__(self): """Constructor sets keys_to_features and items_to_handlers.""" self.keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''), 'image/key/sha256': tf.FixedLenFeature((), tf.string, default_value=''), 'image/source_id': tf.FixedLenFeature((), tf.string, default_value=''), 'image/height': tf.FixedLenFeature((), tf.int64, 1), 'image/width': tf.FixedLenFeature((), tf.int64, 1), # Object boxes and classes. 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32), 'image/object/class/label': tf.VarLenFeature(tf.int64), 'image/object/class/text': tf.VarLenFeature(tf.string), 'image/object/area': tf.VarLenFeature(tf.float32), 'image/object/is_crowd': tf.VarLenFeature(tf.int64), 'image/object/difficult': tf.VarLenFeature(tf.int64), 'image/object/group_of': tf.VarLenFeature(tf.int64), 'image/object/weight': tf.VarLenFeature(tf.float32), } self.items_to_handlers = { 'image': slim_example_decoder.Image( image_key='image/encoded', format_key='image/format', channels=3), 'source_id': ( slim_example_decoder.Tensor('image/source_id')), 'key': ( slim_example_decoder.Tensor('image/key/sha256')), 'filename': ( slim_example_decoder.Tensor('image/filename')), # Object boxes and classes. 'groundtruth_boxes': ( slim_example_decoder.BoundingBox( ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/')), 'groundtruth_area': slim_example_decoder.Tensor( 'image/object/area'), 'groundtruth_is_crowd': ( slim_example_decoder.Tensor('image/object/is_crowd')), 'groundtruth_difficult': ( slim_example_decoder.Tensor('image/object/difficult')), 'groundtruth_group_of': ( slim_example_decoder.Tensor('image/object/group_of')), 'groundtruth_weights': ( slim_example_decoder.Tensor('image/object/weight')), } label_handler = slim_example_decoder.Tensor('image/object/class/label') self.items_to_handlers['groundtruth_classes'] = label_handler def decode(self, tf_example_string_tensor): """Decodes serialized tensorflow example and returns a tensor dictionary. Args: tf_example_string_tensor: a string tensor holding a serialized tensorflow example proto. Returns: A dictionary of the following tensors. image - 3D uint8 tensor of shape [None, None, 3] containing image. source_id - string tensor containing original image id. key - string tensor with unique sha256 hash key. filename - string tensor with original dataset filename. groundtruth_boxes - 2D float32 tensor of shape [None, 4] containing box corners. groundtruth_classes - 1D int64 tensor of shape groundtruth_weights - 1D float32 tensor of shape [None] indicating the weights of groundtruth boxes. [None] containing classes for the boxes. groundtruth_area - 1D float32 tensor of shape [None] containing containing object mask area in pixel squared. groundtruth_is_crowd - 1D bool tensor of shape [None] indicating if the boxes enclose a crowd. Optional: groundtruth_difficult - 1D bool tensor of shape [None] indicating if the boxes represent `difficult` instances. groundtruth_group_of - 1D bool tensor of shape [None] indicating if the boxes represent `group_of` instances. groundtruth_instance_masks - 3D float32 tensor of shape [None, None, None] containing instance masks. """ serialized_example = tf.reshape(tf_example_string_tensor, shape=[]) decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features, self.items_to_handlers) keys = sorted(decoder.list_items()) tensors = decoder.decode(serialized_example, items=keys) tensor_dict = dict(zip(keys, tensors)) is_crowd = 'groundtruth_is_crowd' tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool) tensor_dict['image'].set_shape([None, None, 3]) def default_groundtruth_weights(): return tf.ones( tf.shape(tensor_dict['groundtruth_boxes'])[0], dtype=tf.float32) tensor_dict['groundtruth_weights'] = tf.cond( tf.greater( tf.shape( tensor_dict['groundtruth_weights'])[0], 0), lambda: tensor_dict['groundtruth_weights'], default_groundtruth_weights) return tensor_dict class TfExampleSegmentationDecoder(object): """Tensorflow Example proto decoder.""" def __init__(self): """Constructor sets keys_to_features and items_to_handlers.""" self.keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/height': tf.FixedLenFeature((), tf.int64, default_value=0), 'image/width': tf.FixedLenFeature((), tf.int64, default_value=0), 'image/segmentation/class/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/segmentation/class/format': tf.FixedLenFeature((), tf.string, default_value='png'), } self.items_to_handlers = { 'image': slim_example_decoder.Image( image_key='image/encoded', format_key='image/format', channels=3), 'labels_class': slim_example_decoder.Image( image_key='image/segmentation/class/encoded', format_key='image/segmentation/class/format', channels=1) } def decode(self, tf_example_string_tensor): """Decodes serialized tensorflow example and returns a tensor dictionary. Args: tf_example_string_tensor: a string tensor holding a serialized tensorflow example proto. Returns: A dictionary of the following tensors. image - 3D uint8 tensor of shape [None, None, 3] containing image. labels_class - 2D unit8 tensor of shape [None, None] containing pixel-wise class labels. """ serialized_example = tf.reshape(tf_example_string_tensor, shape=[]) decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features, self.items_to_handlers) keys = sorted(decoder.list_items()) keys = ['image', 'labels_class'] tensors = decoder.decode(serialized_example, items=keys) tensor_dict = dict(zip(keys, tensors)) tensor_dict['image'].set_shape([None, None, 3]) return tensor_dict
apache-2.0
josesanch/django-oscar
src/oscar/apps/offer/utils.py
5
4015
from itertools import chain import logging from django.db.models import Q from django.utils.timezone import now from oscar.core.loading import get_model from oscar.apps.offer import results ConditionalOffer = get_model('offer', 'ConditionalOffer') logger = logging.getLogger('oscar.offers') class OfferApplicationError(Exception): pass class Applicator(object): def apply(self, request, basket): """ Apply all relevant offers to the given basket. The request is passed too as sometimes the available offers are dependent on the user (eg session-based offers). """ offers = self.get_offers(request, basket) self.apply_offers(basket, offers) def apply_offers(self, basket, offers): applications = results.OfferApplications() for offer in offers: num_applications = 0 # Keep applying the offer until either # (a) We reach the max number of applications for the offer. # (b) The benefit can't be applied successfully. while num_applications < offer.get_max_applications(basket.owner): result = offer.apply_benefit(basket) num_applications += 1 if not result.is_successful: break applications.add(offer, result) if result.is_final: break # Store this list of discounts with the basket so it can be # rendered in templates basket.offer_applications = applications def get_offers(self, request, basket): """ Return all offers to apply to the basket. This method should be subclassed and extended to provide more sophisticated behaviour. For instance, you could load extra offers based on the session or the user type. """ site_offers = self.get_site_offers() basket_offers = self.get_basket_offers(basket, request.user) user_offers = self.get_user_offers(request.user) session_offers = self.get_session_offers(request) return list(sorted(chain( session_offers, basket_offers, user_offers, site_offers), key=lambda o: o.priority, reverse=True)) def get_site_offers(self): """ Return site offers that are available to all users """ cutoff = now() date_based = Q( Q(start_datetime__lte=cutoff), Q(end_datetime__gte=cutoff) | Q(end_datetime=None), ) nondate_based = Q(start_datetime=None, end_datetime=None) qs = ConditionalOffer.objects.filter( date_based | nondate_based, offer_type=ConditionalOffer.SITE, status=ConditionalOffer.OPEN) # Using select_related with the condition/benefit ranges doesn't seem # to work. I think this is because both the related objects have the # FK to range with the same name. return qs.select_related('condition', 'benefit') def get_basket_offers(self, basket, user): """ Return basket-linked offers such as those associated with a voucher code """ offers = [] if not basket.id: return offers for voucher in basket.vouchers.all(): if voucher.is_active() and voucher.is_available_to_user(user): basket_offers = voucher.offers.all() for offer in basket_offers: offer.set_voucher(voucher) offers = list(chain(offers, basket_offers)) return offers def get_user_offers(self, user): """ Returns offers linked to this particular user. Eg: student users might get 25% off """ return [] def get_session_offers(self, request): """ Returns temporary offers linked to the current session. Eg: visitors coming from an affiliate site get a 10% discount """ return []
bsd-3-clause
Codlydodly/python-client
venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py
116
17364
"""setuptools.command.bdist_egg Build .egg distributions""" # This module should be kept compatible with Python 2.3 import sys import os import marshal import textwrap from setuptools import Command from distutils.dir_util import remove_tree, mkpath try: # Python 2.7 or >=3.2 from sysconfig import get_path, get_python_version def _get_purelib(): return get_path("purelib") except ImportError: from distutils.sysconfig import get_python_lib, get_python_version def _get_purelib(): return get_python_lib(False) from distutils import log from distutils.errors import DistutilsSetupError from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint from types import CodeType from setuptools.compat import basestring, next from setuptools.extension import Library def strip_module(filename): if '.' in filename: filename = os.path.splitext(filename)[0] if filename.endswith('module'): filename = filename[:-6] return filename def write_stub(resource, pyfile): _stub_template = textwrap.dedent(""" def __bootstrap__(): global __bootstrap__, __loader__, __file__ import sys, pkg_resources, imp __file__ = pkg_resources.resource_filename(__name__, %r) __loader__ = None; del __bootstrap__, __loader__ imp.load_dynamic(__name__,__file__) __bootstrap__() """).lstrip() with open(pyfile, 'w') as f: f.write(_stub_template % resource) class bdist_egg(Command): description = "create an \"egg\" distribution" user_options = [ ('bdist-dir=', 'b', "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_build_platform()), ('exclude-source-files', None, "remove all .py files from the generated egg"), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ] boolean_options = [ 'keep-temp', 'skip-build', 'exclude-source-files' ] def initialize_options(self): self.bdist_dir = None self.plat_name = None self.keep_temp = 0 self.dist_dir = None self.skip_build = 0 self.egg_output = None self.exclude_source_files = None def finalize_options(self): ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") self.egg_info = ei_cmd.egg_info if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'egg') if self.plat_name is None: self.plat_name = get_build_platform() self.set_undefined_options('bdist',('dist_dir', 'dist_dir')) if self.egg_output is None: # Compute filename of the output egg basename = Distribution( None, None, ei_cmd.egg_name, ei_cmd.egg_version, get_python_version(), self.distribution.has_ext_modules() and self.plat_name ).egg_name() self.egg_output = os.path.join(self.dist_dir, basename+'.egg') def do_install_data(self): # Hack for packages that install data to install's --install-lib self.get_finalized_command('install').install_lib = self.bdist_dir site_packages = os.path.normcase(os.path.realpath(_get_purelib())) old, self.distribution.data_files = self.distribution.data_files,[] for item in old: if isinstance(item,tuple) and len(item)==2: if os.path.isabs(item[0]): realpath = os.path.realpath(item[0]) normalized = os.path.normcase(realpath) if normalized==site_packages or normalized.startswith( site_packages+os.sep ): item = realpath[len(site_packages)+1:], item[1] # XXX else: raise ??? self.distribution.data_files.append(item) try: log.info("installing package data to %s" % self.bdist_dir) self.call_command('install_data', force=0, root=None) finally: self.distribution.data_files = old def get_outputs(self): return [self.egg_output] def call_command(self,cmdname,**kw): """Invoke reinitialized command `cmdname` with keyword args""" for dirname in INSTALL_DIRECTORY_ATTRS: kw.setdefault(dirname,self.bdist_dir) kw.setdefault('skip_build',self.skip_build) kw.setdefault('dry_run', self.dry_run) cmd = self.reinitialize_command(cmdname, **kw) self.run_command(cmdname) return cmd def run(self): # Generate metadata first self.run_command("egg_info") # We run install_lib before install_data, because some data hacks # pull their data path from the install_lib command. log.info("installing library code to %s" % self.bdist_dir) instcmd = self.get_finalized_command('install') old_root = instcmd.root instcmd.root = None if self.distribution.has_c_libraries() and not self.skip_build: self.run_command('build_clib') cmd = self.call_command('install_lib', warn_dir=0) instcmd.root = old_root all_outputs, ext_outputs = self.get_ext_outputs() self.stubs = [] to_compile = [] for (p,ext_name) in enumerate(ext_outputs): filename,ext = os.path.splitext(ext_name) pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py') self.stubs.append(pyfile) log.info("creating stub loader for %s" % ext_name) if not self.dry_run: write_stub(os.path.basename(ext_name), pyfile) to_compile.append(pyfile) ext_outputs[p] = ext_name.replace(os.sep,'/') if to_compile: cmd.byte_compile(to_compile) if self.distribution.data_files: self.do_install_data() # Make the EGG-INFO directory archive_root = self.bdist_dir egg_info = os.path.join(archive_root,'EGG-INFO') self.mkpath(egg_info) if self.distribution.scripts: script_dir = os.path.join(egg_info, 'scripts') log.info("installing scripts to %s" % script_dir) self.call_command('install_scripts',install_dir=script_dir,no_ep=1) self.copy_metadata_to(egg_info) native_libs = os.path.join(egg_info, "native_libs.txt") if all_outputs: log.info("writing %s" % native_libs) if not self.dry_run: ensure_directory(native_libs) libs_file = open(native_libs, 'wt') libs_file.write('\n'.join(all_outputs)) libs_file.write('\n') libs_file.close() elif os.path.isfile(native_libs): log.info("removing %s" % native_libs) if not self.dry_run: os.unlink(native_libs) write_safety_flag( os.path.join(archive_root,'EGG-INFO'), self.zip_safe() ) if os.path.exists(os.path.join(self.egg_info,'depends.txt')): log.warn( "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" "Use the install_requires/extras_require setup() args instead." ) if self.exclude_source_files: self.zap_pyfiles() # Make the archive make_zipfile(self.egg_output, archive_root, verbose=self.verbose, dry_run=self.dry_run, mode=self.gen_header()) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) # Add to 'Distribution.dist_files' so that the "upload" command works getattr(self.distribution,'dist_files',[]).append( ('bdist_egg',get_python_version(),self.egg_output)) def zap_pyfiles(self): log.info("Removing .py files from temporary directory") for base,dirs,files in walk_egg(self.bdist_dir): for name in files: if name.endswith('.py'): path = os.path.join(base,name) log.debug("Deleting %s", path) os.unlink(path) def zip_safe(self): safe = getattr(self.distribution,'zip_safe',None) if safe is not None: return safe log.warn("zip_safe flag not set; analyzing archive contents...") return analyze_egg(self.bdist_dir, self.stubs) def gen_header(self): epm = EntryPoint.parse_map(self.distribution.entry_points or '') ep = epm.get('setuptools.installation',{}).get('eggsecutable') if ep is None: return 'w' # not an eggsecutable, do it the usual way. if not ep.attrs or ep.extras: raise DistutilsSetupError( "eggsecutable entry point (%r) cannot have 'extras' " "or refer to a module" % (ep,) ) pyver = sys.version[:3] pkg = ep.module_name full = '.'.join(ep.attrs) base = ep.attrs[0] basename = os.path.basename(self.egg_output) header = ( "#!/bin/sh\n" 'if [ `basename $0` = "%(basename)s" ]\n' 'then exec python%(pyver)s -c "' "import sys, os; sys.path.insert(0, os.path.abspath('$0')); " "from %(pkg)s import %(base)s; sys.exit(%(full)s())" '" "$@"\n' 'else\n' ' echo $0 is not the correct name for this egg file.\n' ' echo Please rename it back to %(basename)s and try again.\n' ' exec false\n' 'fi\n' ) % locals() if not self.dry_run: mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run) f = open(self.egg_output, 'w') f.write(header) f.close() return 'a' def copy_metadata_to(self, target_dir): "Copy metadata (egg info) to the target_dir" # normalize the path (so that a forward-slash in egg_info will # match using startswith below) norm_egg_info = os.path.normpath(self.egg_info) prefix = os.path.join(norm_egg_info,'') for path in self.ei_cmd.filelist.files: if path.startswith(prefix): target = os.path.join(target_dir, path[len(prefix):]) ensure_directory(target) self.copy_file(path, target) def get_ext_outputs(self): """Get a list of relative paths to C extensions in the output distro""" all_outputs = [] ext_outputs = [] paths = {self.bdist_dir:''} for base, dirs, files in os.walk(self.bdist_dir): for filename in files: if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: all_outputs.append(paths[base]+filename) for filename in dirs: paths[os.path.join(base,filename)] = paths[base]+filename+'/' if self.distribution.has_ext_modules(): build_cmd = self.get_finalized_command('build_ext') for ext in build_cmd.extensions: if isinstance(ext,Library): continue fullname = build_cmd.get_ext_fullname(ext.name) filename = build_cmd.get_ext_filename(fullname) if not os.path.basename(filename).startswith('dl-'): if os.path.exists(os.path.join(self.bdist_dir,filename)): ext_outputs.append(filename) return all_outputs, ext_outputs NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) def walk_egg(egg_dir): """Walk an unpacked egg's contents, skipping the metadata directory""" walker = os.walk(egg_dir) base,dirs,files = next(walker) if 'EGG-INFO' in dirs: dirs.remove('EGG-INFO') yield base,dirs,files for bdf in walker: yield bdf def analyze_egg(egg_dir, stubs): # check for existing flag in EGG-INFO for flag,fn in safety_flags.items(): if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)): return flag if not can_scan(): return False safe = True for base, dirs, files in walk_egg(egg_dir): for name in files: if name.endswith('.py') or name.endswith('.pyw'): continue elif name.endswith('.pyc') or name.endswith('.pyo'): # always scan, even if we already know we're not safe safe = scan_module(egg_dir, base, name, stubs) and safe return safe def write_safety_flag(egg_dir, safe): # Write or remove zip safety flag file(s) for flag,fn in safety_flags.items(): fn = os.path.join(egg_dir, fn) if os.path.exists(fn): if safe is None or bool(safe) != flag: os.unlink(fn) elif safe is not None and bool(safe)==flag: f = open(fn,'wt') f.write('\n') f.close() safety_flags = { True: 'zip-safe', False: 'not-zip-safe', } def scan_module(egg_dir, base, name, stubs): """Check whether module possibly uses unsafe-for-zipfile stuff""" filename = os.path.join(base,name) if filename[:-1] in stubs: return True # Extension module pkg = base[len(egg_dir)+1:].replace(os.sep,'.') module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] if sys.version_info < (3, 3): skip = 8 # skip magic & date else: skip = 12 # skip magic & date & file size f = open(filename,'rb') f.read(skip) code = marshal.load(f) f.close() safe = True symbols = dict.fromkeys(iter_symbols(code)) for bad in ['__file__', '__path__']: if bad in symbols: log.warn("%s: module references %s", module, bad) safe = False if 'inspect' in symbols: for bad in [ 'getsource', 'getabsfile', 'getsourcefile', 'getfile' 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', 'getinnerframes', 'getouterframes', 'stack', 'trace' ]: if bad in symbols: log.warn("%s: module MAY be using inspect.%s", module, bad) safe = False if '__name__' in symbols and '__main__' in symbols and '.' not in module: if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 log.warn("%s: top-level module may be 'python -m' script", module) safe = False return safe def iter_symbols(code): """Yield names and strings used by `code` and its nested code objects""" for name in code.co_names: yield name for const in code.co_consts: if isinstance(const,basestring): yield const elif isinstance(const,CodeType): for name in iter_symbols(const): yield name def can_scan(): if not sys.platform.startswith('java') and sys.platform != 'cli': # CPython, PyPy, etc. return True log.warn("Unable to analyze compiled code on this platform.") log.warn("Please ask the author to include a 'zip_safe'" " setting (either True or False) in the package's setup.py") # Attribute names of options for commands that might need to be convinced to # install to the egg build directory INSTALL_DIRECTORY_ATTRS = [ 'install_lib', 'install_dir', 'install_data', 'install_base' ] def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None, mode='w'): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ import zipfile mkpath(os.path.dirname(zip_filename), dry_run=dry_run) log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit(z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): p = path[len(base_dir)+1:] if not dry_run: z.write(path, p) log.debug("adding '%s'" % p) if compress is None: compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)] if not dry_run: z = zipfile.ZipFile(zip_filename, mode, compression=compression) for dirname, dirs, files in os.walk(base_dir): visit(z, dirname, files) z.close() else: for dirname, dirs, files in os.walk(base_dir): visit(None, dirname, files) return zip_filename
mit
JulienMcJay/eclock
windows/Python27/Lib/site-packages/docutils/languages/zh_cn.py
148
2026
# -*- coding: utf-8 -*- # $Id: zh_cn.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: Pan Junyong <panjy@zopechina.com> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Simplified Chinese language mappings for language-dependent features of Docutils. """ __docformat__ = 'reStructuredText' labels = { # fixed: language-dependent 'author': u'作者', 'authors': u'作者群', 'organization': u'组织', 'address': u'地址', 'contact': u'联系', 'version': u'版本', 'revision': u'修订', 'status': u'状态', 'date': u'日期', 'copyright': u'版权', 'dedication': u'献辞', 'abstract': u'摘要', 'attention': u'注意', 'caution': u'小心', 'danger': u'危险', 'error': u'错误', 'hint': u'提示', 'important': u'重要', 'note': u'注解', 'tip': u'技巧', 'warning': u'警告', 'contents': u'目录', } """Mapping of node class name to label text.""" bibliographic_fields = { # language-dependent: fixed u'作者': 'author', u'作者群': 'authors', u'组织': 'organization', u'地址': 'address', u'联系': 'contact', u'版本': 'version', u'修订': 'revision', u'状态': 'status', u'时间': 'date', u'版权': 'copyright', u'献辞': 'dedication', u'摘要': 'abstract'} """Simplified Chinese to canonical name mapping for bibliographic fields.""" author_separators = [';', ',', u'\uff1b', # ';' u'\uff0c', # ',' u'\u3001', # '、' ] """List of separator strings for the 'Authors' bibliographic field. Tried in order."""
gpl-2.0
jim-easterbrook/pywws
src/pywws/process.py
1
29244
# pywws - Python software for USB Wireless Weather Stations # http://github.com/jim-easterbrook/pywws # Copyright (C) 2008-21 pywws contributors # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Generate hourly, daily & monthly summaries of raw weather station data :: %s This module takes raw weather station data (typically sampled every five or ten minutes) and generates hourly, daily and monthly summary data, which is useful when creating tables and graphs. Before computing the data summaries, raw data is "calibrated" using a user-programmable function. See :doc:`pywws.calib` for details. The hourly data is derived from all the records in one hour, e.g. from 18:00:00 to 18:59:59, and is given the index of the last complete record in that hour. The daily data summarises the weather over a 24 hour period typically ending at 2100 or 0900 hours, local (non DST) time, though midnight is another popular convention. It is also indexed by the last complete record in the period. Daytime and nighttime, as used when computing maximum and minimum temperatures, are assumed to start at 0900 and 2100 local time, or 1000 and 2200 when DST is in effect, regardless of the meteorological day. To adjust the meteorological day to your preference, or that used by your local official weather station, edit the "day end hour" line in your ``weather.ini`` file, then run :mod:`pywws.reprocess` to regenerate the summaries. Monthly summary data is computed from the daily summary data. If the meteorological day does not end at midnight, then each month may begin and end up to 12 hours before or after midnight. Wind speed data is averaged over the hour (or day) and the maximum gust speed during the hour (or day) is recorded. The predominant wind direction is calculated using vector arithmetic. Rainfall is converted from the raw "total since last reset" figure to a more useful total in the last hour, day or month. """ from __future__ import absolute_import, print_function __docformat__ = "restructuredtext en" __usage__ = """ usage: python -m pywws.process [options] data_dir options are: -h or --help display this help -v or --verbose increase number of informative messages data_dir is the root directory of the weather data """ __doc__ %= __usage__ __usage__ = __doc__.split('\n')[0] + __usage__ from ast import literal_eval from collections import deque from datetime import date, datetime, timedelta import getopt import logging import math import os import sys from pywws.calib import Calib from pywws.constants import HOUR, DAY, SECOND import pywws.logger import pywws.storage from pywws.timezone import time_zone logger = logging.getLogger(__name__) TIME_ERR = timedelta(seconds=45) MINUTEx5 = timedelta(minutes=5) HOURx3 = timedelta(hours=3) WEEK = timedelta(days=7) class Average(object): """Compute average of multiple data values.""" def __init__(self): self.acc = 0.0 self.count = 0 def add(self, value): if value is None: return self.acc += value self.count += 1 def result(self): if self.count == 0: return None return self.acc / float(self.count) class Minimum(object): """Compute minimum value and timestamp of multiple data values.""" def __init__(self): self.value = None self.time = None def add(self, value, time): if not self.time or value <= self.value: self.value = value self.time = time def result(self): if self.time: return self.value, self.time return None, None class Maximum(object): """Compute maximum value and timestamp of multiple data values.""" def __init__(self): self.value = None self.time = None def add(self, value, time): if not self.time or value > self.value: self.value = value self.time = time def result(self): if self.time: return self.value, self.time return None, None sin_LUT = list(map( lambda x: math.sin(math.radians(float(x * 360) / 16.0)), range(16))) cos_LUT = list(map( lambda x: math.cos(math.radians(float(x * 360) / 16.0)), range(16))) class WindFilter(object): """Compute average wind speed and direction. The wind speed and direction of each data item is converted to a vector before averaging, so the result reflects the dominant wind direction during the time period covered by the data. Setting the ``decay`` parameter converts the filter from a simple averager to one where the most recent sample carries the highest weight, and earlier samples have a lower weight according to how long ago they were. This process is an approximation of "exponential smoothing". See `Wikipedia <http://en.wikipedia.org/wiki/Exponential_smoothing>`_ for a detailed discussion. The parameter ``decay`` corresponds to the value ``(1 - alpha)`` in the Wikipedia description. Because the weather data being smoothed may not be at regular intervals this parameter is the decay over 5 minutes. Weather data at other intervals will have its weight scaled accordingly. The return value is a (speed, direction) tuple. :param decay: filter coefficient decay rate. :type decay: float :rtype: (float, float) """ def __init__(self, decay=1.0): self.decay = decay self.Ve = None self.Vn = 0.0 self.total = 0.0 self.weight = 1.0 self.total_weight = 0.0 self.last_idx = None def add(self, data): speed = data['wind_ave'] if speed is None: return if self.last_idx and self.decay != 1.0: interval = data['idx'] - self.last_idx assert interval.days == 0 decay = self.decay if interval != MINUTEx5: decay = decay ** (float(interval.seconds) / float(MINUTEx5.seconds)) self.weight = self.weight / decay self.last_idx = data['idx'] speed = speed * self.weight self.total += speed self.total_weight += self.weight direction = data['wind_dir'] if direction is None: return if self.Ve is None: self.Ve = 0.0 if isinstance(direction, int): self.Ve -= speed * sin_LUT[direction] self.Vn -= speed * cos_LUT[direction] else: direction = math.radians(float(direction) * 22.5) self.Ve -= speed * math.sin(direction) self.Vn -= speed * math.cos(direction) def result(self): if self.total_weight == 0.0: return (None, None) if self.Ve is None: return (self.total / self.total_weight, None) return (self.total / self.total_weight, (math.degrees(math.atan2(self.Ve, self.Vn)) + 180.0) / 22.5) class HourAcc(object): """'Accumulate' raw weather data to produce hourly summary. Compute average wind speed and maximum wind gust, find dominant wind direction and compute total rainfall. """ def __init__(self, last_rain): self.last_rain = last_rain self.copy_keys = ['idx', 'hum_in', 'temp_in', 'hum_out', 'temp_out', 'abs_pressure', 'rel_pressure'] self.reset() def reset(self): self.wind_fil = WindFilter() self.wind_gust = (-2.0, None) self.rain = 0.0 self.retval = {} def add_raw(self, data): idx = data['idx'] self.wind_fil.add(data) wind_gust = data['wind_gust'] if wind_gust is not None and wind_gust > self.wind_gust[0]: self.wind_gust = (wind_gust, idx) rain = data['rain'] if rain is not None: if self.last_rain is not None: diff = rain - self.last_rain if diff < -0.001: logger.warning( '%s rain reset %.1f -> %.1f', str(idx), self.last_rain, rain) elif diff > float(data['delay'] * 5): # rain exceeds 5mm / minute, assume corrupt data and ignore it logger.warning( '%s rain jump %.1f -> %.1f', str(idx), self.last_rain, rain) else: self.rain += max(0.0, diff) self.last_rain = rain # copy some current readings if 'illuminance' in data and not 'illuminance' in self.copy_keys: self.copy_keys.append('illuminance') self.copy_keys.append('uv') # if already have data to return, ignore 'lost contact' readings if data['temp_out'] is not None or not self.retval: for key in self.copy_keys: self.retval[key] = data[key] def result(self): if not self.retval: return None self.retval['wind_ave'], self.retval['wind_dir'] = self.wind_fil.result() if self.wind_gust[1]: self.retval['wind_gust'] = self.wind_gust[0] else: self.retval['wind_gust'] = None self.retval['rain'] = self.rain return self.retval class DayAcc(object): """'Accumulate' weather data to produce daily summary. Compute average wind speed, maximum wind gust and daytime max & nighttime min temperatures, find dominant wind direction and compute total rainfall. Daytime is assumed to be 0900-2100 and nighttime to be 2100-0900, local time (1000-2200 and 2200-1000 during DST), regardless of the "day end hour" setting. """ def __init__(self): self.has_illuminance = False self.ave = {} self.max = {} self.min = {} self.reset() def reset(self): self.wind_fil = WindFilter() self.wind_gust = (-1.0, None) self.rain = 0.0 for i in ('temp_in', 'temp_out', 'hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): self.ave[i] = Average() self.max[i] = Maximum() self.min[i] = Minimum() for i in ('illuminance', 'uv'): self.ave[i] = Average() self.max[i] = Maximum() self.retval = dict() def add_raw(self, data): idx = data['idx'] local_hour = time_zone.utc_to_nodst(idx).hour wind_gust = data['wind_gust'] if wind_gust is not None and wind_gust > self.wind_gust[0]: self.wind_gust = (wind_gust, idx) for i in ('temp_in', 'temp_out'): temp = data[i] if temp is not None: self.ave[i].add(temp) if local_hour >= 9 and local_hour < 21: # daytime max temperature self.max[i].add(temp, idx) else: # nighttime min temperature self.min[i].add(temp, idx) for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): value = data[i] if value is not None: self.ave[i].add(value) self.max[i].add(value, idx) self.min[i].add(value, idx) if 'illuminance' in data: self.has_illuminance = True for i in ('illuminance', 'uv'): value = data[i] if value is not None: self.ave[i].add(value) self.max[i].add(value, idx) def add_hourly(self, data): self.wind_fil.add(data) rain = data['rain'] if rain is not None: self.rain += rain self.retval['idx'] = data['idx'] def result(self): if not self.retval: return None self.retval['wind_ave'], self.retval['wind_dir'] = self.wind_fil.result() if self.wind_gust[1]: self.retval['wind_gust'] = self.wind_gust[0] else: self.retval['wind_gust'] = None self.retval['wind_gust_t'] = self.wind_gust[1] self.retval['rain'] = self.rain for i in ('temp_in', 'temp_out', 'hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): self.retval['%s_ave' % i] = self.ave[i].result() (self.retval['%s_max' % i], self.retval['%s_max_t' % i]) = self.max[i].result() (self.retval['%s_min' % i], self.retval['%s_min_t' % i]) = self.min[i].result() if self.has_illuminance: for i in ('illuminance', 'uv'): self.retval['%s_ave' % i] = self.ave[i].result() (self.retval['%s_max' % i], self.retval['%s_max_t' % i]) = self.max[i].result() return self.retval class MonthAcc(object): """'Accumulate' daily weather data to produce monthly summary. Compute daytime max & nighttime min temperatures. """ def __init__(self, rain_day_threshold): self.rain_day_threshold = rain_day_threshold self.has_illuminance = False self.ave = {} self.min = {} self.max = {} self.min_lo = {} self.min_hi = {} self.min_ave = {} self.max_lo = {} self.max_hi = {} self.max_ave = {} self.reset() def reset(self): for i in ('temp_in', 'temp_out'): self.ave[i] = Average() self.min_lo[i] = Minimum() self.min_hi[i] = Maximum() self.min_ave[i] = Average() self.max_lo[i] = Minimum() self.max_hi[i] = Maximum() self.max_ave[i] = Average() for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): self.ave[i] = Average() self.max[i] = Maximum() self.min[i] = Minimum() for i in ('illuminance', 'uv'): self.ave[i] = Average() self.max_lo[i] = Minimum() self.max_hi[i] = Maximum() self.max_ave[i] = Average() self.wind_fil = WindFilter() self.wind_gust = (-1.0, None) self.rain = 0.0 self.rain_days = 0 self.valid = False def add_daily(self, data): self.idx = data['idx'] for i in ('temp_in', 'temp_out'): temp = data['%s_ave' % i] if temp is not None: self.ave[i].add(temp) temp = data['%s_min' % i] if temp is not None: self.min_lo[i].add(temp, data['%s_min_t' % i]) self.min_hi[i].add(temp, data['%s_min_t' % i]) self.min_ave[i].add(temp) temp = data['%s_max' % i] if temp is not None: self.max_lo[i].add(temp, data['%s_max_t' % i]) self.max_hi[i].add(temp, data['%s_max_t' % i]) self.max_ave[i].add(temp) for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): value = data['%s_ave' % i] if value is not None: self.ave[i].add(value) value = data['%s_min' % i] if value is not None: self.min[i].add(value, data['%s_min_t' % i]) value = data['%s_max' % i] if value is not None: self.max[i].add(value, data['%s_max_t' % i]) self.wind_fil.add(data) wind_gust = data['wind_gust'] if wind_gust is not None and wind_gust > self.wind_gust[0]: self.wind_gust = (wind_gust, data['wind_gust_t']) if 'illuminance_ave' in data: self.has_illuminance = True for i in ('illuminance', 'uv'): value = data['%s_ave' % i] if value is not None: self.ave[i].add(value) value = data['%s_max' % i] if value is not None: self.max_lo[i].add(value, data['%s_max_t' % i]) self.max_hi[i].add(value, data['%s_max_t' % i]) self.max_ave[i].add(value) self.rain += data['rain'] if data['rain'] >= self.rain_day_threshold: self.rain_days += 1 self.valid = True def result(self): if not self.valid: return None result = {} result['idx'] = self.idx result['rain'] = self.rain result['rain_days'] = self.rain_days for i in ('temp_in', 'temp_out'): result['%s_ave' % i] = self.ave[i].result() result['%s_min_ave' % i] = self.min_ave[i].result() (result['%s_min_lo' % i], result['%s_min_lo_t' % i]) = self.min_lo[i].result() (result['%s_min_hi' % i], result['%s_min_hi_t' % i]) = self.min_hi[i].result() result['%s_max_ave' % i] = self.max_ave[i].result() (result['%s_max_lo' % i], result['%s_max_lo_t' % i]) = self.max_lo[i].result() (result['%s_max_hi' % i], result['%s_max_hi_t' % i]) = self.max_hi[i].result() for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): result['%s_ave' % i] = self.ave[i].result() (result['%s_max' % i], result['%s_max_t' % i]) = self.max[i].result() (result['%s_min' % i], result['%s_min_t' % i]) = self.min[i].result() result['wind_ave'], result['wind_dir'] = self.wind_fil.result() if self.wind_gust[1]: result['wind_gust'] = self.wind_gust[0] else: result['wind_gust'] = None result['wind_gust_t'] = self.wind_gust[1] if self.has_illuminance: for i in ('illuminance', 'uv'): result['%s_ave' % i] = self.ave[i].result() result['%s_max_ave' % i] = self.max_ave[i].result() (result['%s_max_lo' % i], result['%s_max_lo_t' % i]) = self.max_lo[i].result() (result['%s_max_hi' % i], result['%s_max_hi_t' % i]) = self.max_hi[i].result() return result def calibrate_data(params, raw_data, calib_data): """'Calibrate' raw data, using a user-supplied function.""" start = calib_data.before(datetime.max) if start is None: start = datetime.min start = raw_data.after(start + SECOND) if start is None: return start del calib_data[start:] calibrator = Calib(params, raw_data) def calibgen(inputdata): """Internal generator function""" count = 0 for data in inputdata: idx = data['idx'] count += 1 if count % 10000 == 0: logger.info("calib: %s", idx.isoformat(' ')) elif count % 500 == 0: logger.debug("calib: %s", idx.isoformat(' ')) for key in ('rain', 'abs_pressure', 'temp_in'): if data[key] is None: logger.error('Ignoring invalid data at %s', idx.isoformat(' ')) break else: yield calibrator.calib(data) calib_data.update(calibgen(raw_data[start:])) return start def generate_hourly(calib_data, hourly_data, process_from): """Generate hourly summaries from calibrated data.""" start = hourly_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # set start of hour in local time (not all time offsets are integer hours) start = time_zone.hour_start(start) del hourly_data[start:] # preload pressure history, and find last valid rain prev = None pressure_history = deque() last_rain = None for data in calib_data[start - HOURx3:start]: if data['rel_pressure']: pressure_history.append((data['idx'], data['rel_pressure'])) if data['rain'] is not None: last_rain = data['rain'] prev = data # iterate over data in one hour chunks stop = calib_data.before(datetime.max) acc = HourAcc(last_rain) def hourlygen(inputdata, prev): """Internal generator function""" hour_start = start count = 0 while hour_start <= stop: count += 1 if count % 1008 == 0: logger.info("hourly: %s", hour_start.isoformat(' ')) elif count % 24 == 0: logger.debug("hourly: %s", hour_start.isoformat(' ')) hour_end = hour_start + HOUR acc.reset() for data in inputdata[hour_start:hour_end]: if data['rel_pressure']: pressure_history.append((data['idx'], data['rel_pressure'])) if prev: err = data['idx'] - prev['idx'] if abs(err - timedelta(minutes=data['delay'])) > TIME_ERR: logger.info('unexpected data interval %s %s', data['idx'].isoformat(' '), str(err)) acc.add_raw(data) prev = data new_data = acc.result() if new_data and (new_data['idx'] - hour_start) >= timedelta(minutes=9): # compute pressure trend new_data['pressure_trend'] = None if new_data['rel_pressure']: target = new_data['idx'] - HOURx3 while (len(pressure_history) >= 2 and abs(pressure_history[0][0] - target) > abs(pressure_history[1][0] - target)): pressure_history.popleft() if (pressure_history and abs(pressure_history[0][0] - target) < HOUR): new_data['pressure_trend'] = ( new_data['rel_pressure'] - pressure_history[0][1]) # store new hourly data yield new_data hour_start = hour_end hourly_data.update(hourlygen(calib_data, prev)) return start def generate_daily(day_end_hour, use_dst, calib_data, hourly_data, daily_data, process_from): """Generate daily summaries from calibrated and hourly data.""" start = daily_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # round to start of this day, in local time start = time_zone.day_start(start, day_end_hour, use_dst=use_dst) del daily_data[start:] stop = calib_data.before(datetime.max) acc = DayAcc() def dailygen(inputdata): """Internal generator function""" day_start = start count = 0 while day_start <= stop: count += 1 if count % 30 == 0: logger.info("daily: %s", day_start.isoformat(' ')) else: logger.debug("daily: %s", day_start.isoformat(' ')) day_end = day_start + DAY if use_dst: # day might be 23 or 25 hours long day_end = time_zone.day_start( day_end + HOURx3, day_end_hour, use_dst=use_dst) acc.reset() for data in inputdata[day_start:day_end]: acc.add_raw(data) for data in hourly_data[day_start:day_end]: acc.add_hourly(data) new_data = acc.result() if new_data: new_data['start'] = day_start yield new_data day_start = day_end daily_data.update(dailygen(calib_data)) return start def generate_monthly(rain_day_threshold, day_end_hour, use_dst, daily_data, monthly_data, process_from): """Generate monthly summaries from daily data.""" start = monthly_data.before(datetime.max) if start is None: start = datetime.min start = daily_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # set start to noon on start of first day of month (local time) local_start = time_zone.utc_to_local(start).replace(tzinfo=None) local_start = local_start.replace(day=1, hour=12, minute=0, second=0) # go back to UTC and get start of day (which might be previous day) start = time_zone.local_to_utc(local_start) start = time_zone.day_start(start, day_end_hour, use_dst=use_dst) del monthly_data[start:] stop = daily_data.before(datetime.max) if stop is None: return None def monthlygen(inputdata, start, local_start): """Internal generator function""" acc = MonthAcc(rain_day_threshold) month_start = start count = 0 while month_start <= stop: count += 1 if count % 12 == 0: logger.info("monthly: %s", month_start.isoformat(' ')) else: logger.debug("monthly: %s", month_start.isoformat(' ')) if local_start.month < 12: local_start = local_start.replace(month=local_start.month+1) else: local_start = local_start.replace( month=1, year=local_start.year+1) month_end = time_zone.local_to_utc(local_start) month_end = time_zone.day_start( month_end, day_end_hour, use_dst=use_dst) acc.reset() for data in inputdata[month_start:month_end]: acc.add_daily(data) new_data = acc.result() if new_data: new_data['start'] = month_start yield new_data month_start = month_end monthly_data.update(monthlygen(daily_data, start, local_start)) return start def get_day_end_hour(params): # get daytime end hour (in local time) day_end_hour, use_dst = literal_eval( params.get('config', 'day end hour', '9, False')) day_end_hour = day_end_hour % 24 return day_end_hour, use_dst def process_data(context): """Generate summaries from raw weather station data. The meteorological day end (typically 2100 or 0900 local time) is set in the preferences file ``weather.ini``. The default value is 2100 (2200 during DST), following the historical convention for weather station readings. """ logger.info('Generating summary data') # get time of last record last_raw = context.raw_data.before(datetime.max) if last_raw is None: raise IOError('No data found. Check data directory parameter.') # get daytime end hour (in local time) day_end_hour, use_dst = get_day_end_hour(context.params) # get other config rain_day_threshold = float( context.params.get('config', 'rain day threshold', '0.2')) # calibrate raw data start = calibrate_data(context.params, context.raw_data, context.calib_data) # generate hourly data start = generate_hourly(context.calib_data, context.hourly_data, start) # generate daily data start = generate_daily(day_end_hour, use_dst, context.calib_data, context.hourly_data, context.daily_data, start) # generate monthly data generate_monthly(rain_day_threshold, day_end_hour, use_dst, context.daily_data, context.monthly_data, start) return 0 def main(argv=None): if argv is None: argv = sys.argv try: opts, args = getopt.getopt(argv[1:], "hv", ['help', 'verbose']) except getopt.error as msg: print('Error: %s\n' % msg, file=sys.stderr) print(__usage__.strip(), file=sys.stderr) return 1 # process options verbose = 0 for o, a in opts: if o in ('-h', '--help'): print(__usage__.strip()) return 0 elif o in ('-v', '--verbose'): verbose += 1 # check arguments if len(args) != 1: print('Error: 1 argument required\n', file=sys.stderr) print(__usage__.strip(), file=sys.stderr) return 2 pywws.logger.setup_handler(verbose) data_dir = args[0] with pywws.storage.pywws_context(data_dir) as context: return process_data(context) if __name__ == "__main__": sys.exit(main())
gpl-2.0
WillGuan105/django
django/db/backends/mysql/features.py
176
2682
from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property from .base import Database try: import pytz except ImportError: pytz = None class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () update_can_self_select = False allows_group_by_pk = True related_fields_match_type = True allow_sliced_subqueries = False has_bulk_insert = True has_select_for_update = True has_select_for_update_nowait = False supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False can_introspect_autofield = True can_introspect_binary_field = False can_introspect_small_integer_field = True supports_timezones = False requires_explicit_null_ordering_when_grouping = True allows_auto_pk_0 = False uses_savepoints = True can_release_savepoints = True atomic_transactions = False supports_column_check_constraints = False can_clone_databases = True @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" with self.connection.cursor() as cursor: cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'") result = cursor.fetchone() return result[0] @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def supports_microsecond_precision(self): # See https://github.com/farcepest/MySQLdb1/issues/24 for the reason # about requiring MySQLdb 1.2.5 return self.connection.mysql_version >= (5, 6, 4) and Database.version_info >= (1, 2, 5) @cached_property def has_zoneinfo_database(self): # MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects # abbreviations (eg. EAT). When pytz isn't installed and the current # time zone is LocalTimezone (the only sensible value in this # context), the current time zone name will be an abbreviation. As a # consequence, MySQL cannot perform time zone conversions reliably. if pytz is None: return False # Test if the time zone definitions are installed. with self.connection.cursor() as cursor: cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1") return cursor.fetchone() is not None def introspected_boolean_field_type(self, *args, **kwargs): return 'IntegerField'
bsd-3-clause
krichter722/binutils-gdb
gdb/testsuite/gdb.python/py-typeprint.py
46
1128
# Copyright (C) 2012-2015 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import gdb class Recognizer(object): def __init__(self): self.enabled = True def recognize(self, type_obj): if type_obj.tag == 'basic_string': return 'string' return None class StringTypePrinter(object): def __init__(self): self.name = 'string' self.enabled = True def instantiate(self): return Recognizer() gdb.type_printers.append(StringTypePrinter())
gpl-2.0
XiaosongWei/chromium-crosswalk
third_party/closure_linter/closure_linter/error_check.py
95
3900
#!/usr/bin/env python # # Copyright 2011 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Specific JSLint errors checker.""" import gflags as flags FLAGS = flags.FLAGS class Rule(object): """Different rules to check.""" # Documentations for specific rules goes in flag definition. BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level' INDENTATION = 'indentation' WELL_FORMED_AUTHOR = 'well_formed_author' NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc' BRACES_AROUND_TYPE = 'braces_around_type' OPTIONAL_TYPE_MARKER = 'optional_type_marker' VARIABLE_ARG_MARKER = 'variable_arg_marker' UNUSED_PRIVATE_MEMBERS = 'unused_private_members' UNUSED_LOCAL_VARIABLES = 'unused_local_variables' # Rule to raise all known errors. ALL = 'all' # All rules that are to be checked when using the strict flag. E.g. the rules # that are specific to the stricter Closure style. CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL, INDENTATION, WELL_FORMED_AUTHOR, NO_BRACES_AROUND_INHERIT_DOC, BRACES_AROUND_TYPE, OPTIONAL_TYPE_MARKER, VARIABLE_ARG_MARKER]) flags.DEFINE_boolean('strict', False, 'Whether to validate against the stricter Closure style. ' 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.') flags.DEFINE_multistring('jslint_error', [], 'List of specific lint errors to check. Here is a list' ' of accepted values:\n' ' - ' + Rule.ALL + ': enables all following errors.\n' ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates' 'number of blank lines between blocks at top level.\n' ' - ' + Rule.INDENTATION + ': checks correct ' 'indentation of code.\n' ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the ' '@author JsDoc tags.\n' ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': ' 'forbids braces around @inheritdoc JsDoc tags.\n' ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces ' 'around types in JsDoc tags.\n' ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct ' 'use of optional marker = in param types.\n' ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for ' 'unused private variables.\n' ' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for ' 'unused local variables.\n') def ShouldCheck(rule): """Returns whether the optional rule should be checked. Computes different flags (strict, jslint_error, jslint_noerror) to find out if this specific rule should be checked. Args: rule: Name of the rule (see Rule). Returns: True if the rule should be checked according to the flags, otherwise False. """ if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error: return True # Checks strict rules. return FLAGS.strict and rule in Rule.CLOSURE_RULES
bsd-3-clause
brickfiestastem/brickfiesta
shop/views.py
1
11431
import datetime import json import urllib.error import urllib.parse import urllib.request import uuid from django.conf import settings from django.contrib import messages from django.contrib.auth.models import User from django.core.mail import EmailMessage from django.core.mail import send_mail from django.shortcuts import render, redirect from django.template import loader from django.urls import reverse from django.utils.html import format_html from django.views import View from django.views.generic import DetailView, FormView from django.views.generic.detail import SingleObjectMixin from django.views.generic.list import ListView from event.models import Event from shop.utils import check_recaptcha from .cart import ShoppingCart from .forms import CartItemForm from .models import Product, Order, OrderItem from .utils import add_attendee_fan_badge_shirt # Create your views here. class EventListView(ListView): queryset = Event.objects.all().order_by('start_date').filter( start_date__gt=datetime.date.today()) template_name = 'shop/event_list.html' class EventProductView(View): def get(self, request, event_id): obj_products = Product.objects.filter( event__id__exact=event_id, is_public=True).order_by('product_type').extra( select={'is_top': "product_type = '" + Product.EXHIBITION + "'"}) date_two_weeks = datetime.date.today() + datetime.timedelta(days=14) if obj_products.first().event.start_date <= date_two_weeks: obj_products = obj_products.extra( order_by=['-is_top', 'product_type']) return render(request, 'shop/product_list.html', {'object_list': obj_products, 'first': obj_products.first()}) class CartTestView(View): def get(self, request): str_checkout_id = request.GET.get('checkoutId', None) str_reference_id = request.GET.get('referenceId', None) if str_reference_id: request.session['cart_id'] = str_reference_id if str_checkout_id: request.session['checkout_id'] = str_checkout_id obj_cart = ShoppingCart(request) return render(request, 'shop/cart_contents.html', {'error_message': obj_cart.get_debug(request), 'cart': obj_cart.get_basket(), 'cart_total': obj_cart.total()}) class CartCheckoutView(View): def get(self, request): list_message = list() obj_cart = ShoppingCart(request) str_checkout_id = request.GET.get('checkoutId', "INVALID") str_reference_id = request.GET.get('referenceId', "INVALID") str_transaction_id = request.GET.get('transactionId', "INVALID") if obj_cart.check_checkout_id(str_checkout_id): # valid save everything in the users obj_order = None obj_basket = obj_cart.get_basket() for obj_item in obj_basket: obj_user = None try: obj_user = User.objects.get(email=obj_item.email) list_message.append( "Found existing customer information " + obj_item.email + ".") except User.DoesNotExist: obj_user = User.objects.create_user(username=obj_item.email, email=obj_item.email, first_name=obj_item.first_name, last_name=obj_item.last_name, password=uuid.uuid4()) list_message.append( "Created a user for " + obj_item.email + ". Please check your email for password instructions.") send_mail(subject="Brick Fiesta - New Account Created", message=loader.render_to_string( "afol/new_account_email.html"), from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[obj_item.email]) if obj_order is None: if request.user.is_authenticated: obj_order = Order(user=request.user, transaction_id=str_transaction_id, reference_id=str_reference_id, guest="") else: obj_order = Order(user=obj_user, transaction_id=str_transaction_id, reference_id=str_reference_id, guest="") obj_order.save() list_message.append( "Order associated with " + obj_item.email + ".") obj_order_item = OrderItem(order=obj_order, user=obj_user, first_name=obj_item.first_name, last_name=obj_item.last_name, product=obj_item.product, price=obj_item.product.price) # if obj_item.product.quantity_available > 0: # obj_product = obj_item.product # obj_product.quantity_available -= 1 # obj_product.save() obj_order_item.save() list_message.append( "Order item " + obj_order_item.product.title + " associated with " + obj_item.email + ".") add_attendee_fan_badge_shirt(request, obj_order_item) obj_cart.clear() else: list_message.append( "It looks like there was an problem with your cart and processing it.") list_message.append( "We have gathered the data and have sent an email to look into the issue.") list_message.append( "If you do not hear back in a few days please contact us using the contact form.") str_body = "JSON: " + obj_cart.get_debug(request) + "\n\nReference: " + str_reference_id + \ "\n\nTransaction: " + str_transaction_id email = EmailMessage( 'Brick Fiesta - URGENT - Cart Error', str_body, to=[settings.DEFAULT_FROM_EMAIL]) email.send() obj_cart.clear() return render(request, 'shop/cart_complete.html', {'message': list_message, }) class CartView(View): def post(self, request, *args, **kwargs): str_error_message = False obj_cart = ShoppingCart(request) if 'cart_item' in request.POST: obj_cart.remove(request.POST['cart_item']) if 'cart' in request.POST: # generate json objects str_json = obj_cart.get_json() str_json = str_json.encode('utf-8') print(str_json) str_url = "https://connect.squareup.com/v2/locations/" + \ settings.SQUARE_LOCATION_KEY + "/checkouts" # send request for objects obj_request = urllib.request.Request(url=str_url) obj_request.add_header( 'Authorization', 'Bearer ' + settings.SQUARE_CART_KEY) obj_request.add_header( 'Content-Type', 'application/json; charset=utf-8') obj_request.add_header('Accept', 'application/json') # get response obj_response = "" try: obj_response = urllib.request.urlopen( obj_request, data=str_json) except urllib.error.URLError as obj_error: # print(obj_error.reason) str_error_message = "Unable to reach payment server. Please try again later." str_body = "URL: " + str_url + "\n\nJSON: " + \ str_json.decode('ascii') + "\n\nRESPONSE:" + obj_response email = EmailMessage( 'Brick Fiesta - Check Out URL Error', str_body, to=[settings.DEFAULT_FROM_EMAIL]) email.send() pass except urllib.error.HTTPError as obj_error: str_error_message = "Unable to process payment correctly. Error sent to event organizers." str_body = "URL: " + str_url + "\n\nJSON: " + \ str_json.decode('ascii') + "\n\nRESPONSE:" + obj_response email = EmailMessage( 'Brick Fiesta - Check Out HTTP Error', str_body, to=[settings.DEFAULT_FROM_EMAIL]) email.send() # print(obj_error.code) # print(obj_error.read()) pass else: result = json.loads(obj_response.read().decode()) # print(result) obj_cart.set_checkout_id(request, result['checkout']['id']) return redirect(result['checkout']['checkout_page_url']) return render(request, 'shop/cart_contents.html', {'error_message': str_error_message, 'cart': obj_cart.get_basket(), 'cart_total': obj_cart.total()}) def get(self, request, token=None): if token: request.session['cart'] = str(token) obj_cart = ShoppingCart(request) return render(request, 'shop/cart_contents.html', {'cart': obj_cart.get_basket(), 'cart_total': obj_cart.total()}) class ProductDetailView(DetailView): model = Product def get_context_data(self, **kwargs): context = super(ProductDetailView, self).get_context_data(**kwargs) context['form'] = CartItemForm() return context class ProductCartItemView(SingleObjectMixin, FormView): template_name = 'shop/product_detail.html' form_class = CartItemForm model = Product def post(self, request, *args, **kwargs): cart = ShoppingCart(request) self.object = self.get_object() form = CartItemForm(request.POST) if not check_recaptcha(request): form.add_error( None, 'You failed the human test. Try the reCAPTCHA again.') if form.is_valid(): cart.add(first_name=form.cleaned_data['first_name'], last_name=form.cleaned_data['last_name'], email=form.cleaned_data['email'], product=self.object) messages.info(request, format_html( 'Product added to <a href="{}">cart</a>.', reverse('shop:cart'))) return super(ProductCartItemView, self).post(request, *args, **kwargs) def get_success_url(self): return reverse('shop:event', kwargs={'event_id': self.object.event.id}) class ProductDetail(View): def get(self, request, *args, **kwargs): view = ProductDetailView.as_view() return view(request, *args, **kwargs) def post(self, request, *args, **kwargs): view = ProductCartItemView.as_view() return view(request, *args, **kwargs)
agpl-3.0
GdZ/scriptfile
software/googleAppEngine/lib/django_1_3/django/utils/translation/trans_real.py
54
22505
"""Translation helper functions.""" import locale import os import re import sys import warnings import gettext as gettext_module from cStringIO import StringIO from threading import local from django.utils.importlib import import_module from django.utils.safestring import mark_safe, SafeData # Translations are cached in a dictionary for every language+app tuple. # The active translations are stored by threadid to make them thread local. _translations = {} _active = local() # The default translation is based on the settings file. _default = None # This is a cache for normalized accept-header languages to prevent multiple # file lookups when checking the same locale on repeated requests. _accepted = {} # magic gettext number to separate context from message CONTEXT_SEPARATOR = u"\x04" # Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9. accept_language_re = re.compile(r''' ([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*" (?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8" (?:\s*,\s*|$) # Multiple accepts per header. ''', re.VERBOSE) def to_locale(language, to_lower=False): """ Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is True, the last component is lower-cased (en_us). """ p = language.find('-') if p >= 0: if to_lower: return language[:p].lower()+'_'+language[p+1:].lower() else: # Get correct locale for sr-latn if len(language[p+1:]) > 2: return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower() return language[:p].lower()+'_'+language[p+1:].upper() else: return language.lower() def to_language(locale): """Turns a locale name (en_US) into a language name (en-us).""" p = locale.find('_') if p >= 0: return locale[:p].lower()+'-'+locale[p+1:].lower() else: return locale.lower() class DjangoTranslation(gettext_module.GNUTranslations): """ This class sets up the GNUTranslations context with regard to output charset. Django uses a defined DEFAULT_CHARSET as the output charset on Python 2.4. """ def __init__(self, *args, **kw): gettext_module.GNUTranslations.__init__(self, *args, **kw) # Starting with Python 2.4, there's a function to define # the output charset. Before 2.4, the output charset is # identical with the translation file charset. try: self.set_output_charset('utf-8') except AttributeError: pass self.django_output_charset = 'utf-8' self.__language = '??' def merge(self, other): self._catalog.update(other._catalog) def set_language(self, language): self.__language = language self.__to_language = to_language(language) def language(self): return self.__language def to_language(self): return self.__to_language def __repr__(self): return "<DjangoTranslation lang:%s>" % self.__language def translation(language): """ Returns a translation object. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct a object for the requested language and add a fallback to the default language, if it's different from the requested language. """ global _translations t = _translations.get(language, None) if t is not None: return t from django.conf import settings globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') if settings.SETTINGS_MODULE is not None: parts = settings.SETTINGS_MODULE.split('.') project = import_module(parts[0]) projectpath = os.path.join(os.path.dirname(project.__file__), 'locale') else: projectpath = None def _fetch(lang, fallback=None): global _translations res = _translations.get(lang, None) if res is not None: return res loc = to_locale(lang) def _translation(path): try: t = gettext_module.translation('django', path, [loc], DjangoTranslation) t.set_language(lang) return t except IOError: return None res = _translation(globalpath) # We want to ensure that, for example, "en-gb" and "en-us" don't share # the same translation object (thus, merging en-us with a local update # doesn't affect en-gb), even though they will both use the core "en" # translation. So we have to subvert Python's internal gettext caching. base_lang = lambda x: x.split('-', 1)[0] if base_lang(lang) in [base_lang(trans) for trans in _translations]: res._info = res._info.copy() res._catalog = res._catalog.copy() def _merge(path): t = _translation(path) if t is not None: if res is None: return t else: res.merge(t) return res for appname in reversed(settings.INSTALLED_APPS): app = import_module(appname) apppath = os.path.join(os.path.dirname(app.__file__), 'locale') if os.path.isdir(apppath): res = _merge(apppath) localepaths = [os.path.normpath(path) for path in settings.LOCALE_PATHS] if (projectpath and os.path.isdir(projectpath) and os.path.normpath(projectpath) not in localepaths): res = _merge(projectpath) for localepath in reversed(settings.LOCALE_PATHS): if os.path.isdir(localepath): res = _merge(localepath) if res is None: if fallback is not None: res = fallback else: return gettext_module.NullTranslations() _translations[lang] = res return res default_translation = _fetch(settings.LANGUAGE_CODE) current_translation = _fetch(language, fallback=default_translation) return current_translation def activate(language): """ Fetches the translation object for a given tuple of application name and language and installs it as the current translation object for the current thread. """ if isinstance(language, basestring) and language == 'no': warnings.warn( "The use of the language code 'no' is deprecated. " "Please use the 'nb' translation instead.", DeprecationWarning ) _active.value = translation(language) def deactivate(): """ Deinstalls the currently active translation object so that further _ calls will resolve against the default translation object, again. """ if hasattr(_active, "value"): del _active.value def deactivate_all(): """ Makes the active translation object a NullTranslations() instance. This is useful when we want delayed translations to appear as the original string for some reason. """ _active.value = gettext_module.NullTranslations() def get_language(): """Returns the currently selected language.""" t = getattr(_active, "value", None) if t is not None: try: return t.to_language() except AttributeError: pass # If we don't have a real translation object, assume it's the default language. from django.conf import settings return settings.LANGUAGE_CODE def get_language_bidi(): """ Returns selected language's BiDi layout. * False = left-to-right layout * True = right-to-left layout """ from django.conf import settings base_lang = get_language().split('-')[0] return base_lang in settings.LANGUAGES_BIDI def catalog(): """ Returns the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string. """ global _default t = getattr(_active, "value", None) if t is not None: return t if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) return _default def do_translate(message, translation_function): """ Translates 'message' using the given 'translation_function' name -- which will be either gettext or ugettext. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default eol_message = message.replace('\r\n', '\n').replace('\r', '\n') t = getattr(_active, "value", None) if t is not None: result = getattr(t, translation_function)(eol_message) else: if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) result = getattr(_default, translation_function)(eol_message) if isinstance(message, SafeData): return mark_safe(result) return result def gettext(message): return do_translate(message, 'gettext') def ugettext(message): return do_translate(message, 'ugettext') def pgettext(context, message): result = do_translate( u"%s%s%s" % (context, CONTEXT_SEPARATOR, message), 'ugettext') if CONTEXT_SEPARATOR in result: # Translation not found result = message return result def gettext_noop(message): """ Marks strings for translation but doesn't translate them now. This can be used to store strings in global variables that should stay in the base language (because they might be used externally) and will be translated later. """ return message def do_ntranslate(singular, plural, number, translation_function): global _default t = getattr(_active, "value", None) if t is not None: return getattr(t, translation_function)(singular, plural, number) if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) return getattr(_default, translation_function)(singular, plural, number) def ngettext(singular, plural, number): """ Returns a UTF-8 bytestring of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, 'ngettext') def ungettext(singular, plural, number): """ Returns a unicode strings of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, 'ungettext') def npgettext(context, singular, plural, number): result = do_ntranslate(u"%s%s%s" % (context, CONTEXT_SEPARATOR, singular), u"%s%s%s" % (context, CONTEXT_SEPARATOR, plural), number, 'ungettext') if CONTEXT_SEPARATOR in result: # Translation not found result = do_ntranslate(singular, plural, number, 'ungettext') return result def all_locale_paths(): """ Returns a list of paths to user-provides languages files. """ from django.conf import settings globalpath = os.path.join( os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') return [globalpath] + list(settings.LOCALE_PATHS) def check_for_language(lang_code): """ Checks whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. This is only used for language codes from either the cookies or session and during format localization. """ for path in all_locale_paths(): if gettext_module.find('django', path, [to_locale(lang_code)]) is not None: return True return False def get_language_from_request(request): """ Analyzes the request to find what language the user wants the system to show. Only languages listed in settings.LANGUAGES are taken into account. If the user requests a sublanguage where we have a main language, we send out the main language. """ global _accepted from django.conf import settings supported = dict(settings.LANGUAGES) if hasattr(request, 'session'): lang_code = request.session.get('django_language', None) if lang_code in supported and lang_code is not None and check_for_language(lang_code): return lang_code lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) if lang_code and lang_code not in supported: lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr if lang_code and lang_code in supported and check_for_language(lang_code): return lang_code accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == '*': break # We have a very restricted form for our language files (no encoding # specifier, since they all must be UTF-8 and only one possible # language each time. So we avoid the overhead of gettext.find() and # work out the MO file manually. # 'normalized' is the root name of the locale in POSIX format (which is # the format used for the directories holding the MO files). normalized = locale.locale_alias.get(to_locale(accept_lang, True)) if not normalized: continue # Remove the default encoding from locale_alias. normalized = normalized.split('.')[0] if normalized in _accepted: # We've seen this locale before and have an MO file for it, so no # need to check again. return _accepted[normalized] for lang, dirname in ((accept_lang, normalized), (accept_lang.split('-')[0], normalized.split('_')[0])): if lang.lower() not in supported: continue for path in all_locale_paths(): if os.path.exists(os.path.join(path, dirname, 'LC_MESSAGES', 'django.mo')): _accepted[normalized] = lang return lang return settings.LANGUAGE_CODE dot_re = re.compile(r'\S') def blankout(src, char): """ Changes every non-whitespace character to the given char. Used in the templatize function. """ return dot_re.sub(char, src) inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""") block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""") endblock_re = re.compile(r"""^\s*endblocktrans$""") plural_re = re.compile(r"""^\s*plural$""") constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""") def templatize(src, origin=None): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK) out = StringIO() intrans = False inplural = False singular = [] plural = [] incomment = False comment = [] for t in Lexer(src, origin).tokenize(): if incomment: if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment': content = ''.join(comment) translators_comment_start = None for lineno, line in enumerate(content.splitlines(True)): if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK): translators_comment_start = lineno for lineno, line in enumerate(content.splitlines(True)): if translators_comment_start is not None and lineno >= translators_comment_start: out.write(' # %s' % line) else: out.write(' #\n') incomment = False comment = [] else: comment.append(t.contents) elif intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if inplural: out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural))) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: out.write(' gettext(%r) ' % ''.join(singular)) for part in singular: out.write(blankout(part, 'S')) intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: filemsg = '' if origin: filemsg = 'file %s, ' % origin raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno)) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = t.contents.replace('%', '%%') if inplural: plural.append(contents) else: singular.append(contents) else: if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) intrans = True inplural = False singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) elif t.contents == 'comment': incomment = True else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':',1)[1]) else: out.write(blankout(p, 'F')) elif t.token_type == TOKEN_COMMENT: out.write(' # %s' % t.contents) else: out.write(blankout(t.contents, 'X')) return out.getvalue() def parse_accept_lang_header(lang_string): """ Parses the lang_string, which is the body of an HTTP Accept-Language header, and returns a list of (lang, q-value), ordered by 'q' values. Any format errors in lang_string results in an empty list being returned. """ result = [] pieces = accept_language_re.split(lang_string) if pieces[-1]: return [] for i in range(0, len(pieces) - 1, 3): first, lang, priority = pieces[i : i + 3] if first: return [] priority = priority and float(priority) or 1.0 result.append((lang, priority)) result.sort(key=lambda k: k[1], reverse=True) return result # get_date_formats and get_partial_date_formats aren't used anymore by Django # and are kept for backward compatibility. # Note, it's also important to keep format names marked for translation. # For compatibility we still want to have formats on translation catalogs. # That makes template code like {{ my_date|date:_('DATE_FORMAT') }} still work def get_date_formats(): """ Checks whether translation files provide a translation for some technical message ID to store date and time formats. If it doesn't contain one, the formats provided in the settings will be used. """ warnings.warn( "'django.utils.translation.get_date_formats' is deprecated. " "Please update your code to use the new i18n aware formatting.", DeprecationWarning ) from django.conf import settings date_format = ugettext('DATE_FORMAT') datetime_format = ugettext('DATETIME_FORMAT') time_format = ugettext('TIME_FORMAT') if date_format == 'DATE_FORMAT': date_format = settings.DATE_FORMAT if datetime_format == 'DATETIME_FORMAT': datetime_format = settings.DATETIME_FORMAT if time_format == 'TIME_FORMAT': time_format = settings.TIME_FORMAT return date_format, datetime_format, time_format def get_partial_date_formats(): """ Checks whether translation files provide a translation for some technical message ID to store partial date formats. If it doesn't contain one, the formats provided in the settings will be used. """ warnings.warn( "'django.utils.translation.get_partial_date_formats' is deprecated. " "Please update your code to use the new i18n aware formatting.", DeprecationWarning ) from django.conf import settings year_month_format = ugettext('YEAR_MONTH_FORMAT') month_day_format = ugettext('MONTH_DAY_FORMAT') if year_month_format == 'YEAR_MONTH_FORMAT': year_month_format = settings.YEAR_MONTH_FORMAT if month_day_format == 'MONTH_DAY_FORMAT': month_day_format = settings.MONTH_DAY_FORMAT return year_month_format, month_day_format
mit
2014c2g3/w16b_test
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/locals.py
603
1141
## pygame - Python Game Library ## Copyright (C) 2000-2003 Pete Shinners ## ## This library is free software; you can redistribute it and/or ## modify it under the terms of the GNU Library General Public ## License as published by the Free Software Foundation; either ## version 2 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Library General Public License for more details. ## ## You should have received a copy of the GNU Library General Public ## License along with this library; if not, write to the Free ## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ## ## Pete Shinners ## pete@shinners.org """Set of functions from PyGame that are handy to have in the local namespace for your module""" from pygame.constants import * from pygame.rect import Rect import pygame.color as color Color = color.Color
agpl-3.0
markeTIC/OCB
addons/auth_crypt/__openerp__.py
310
2298
# -*- encoding: utf-8 -*- ############################################################################## # # Odoo, Open Source Management Solution # Copyright (C) 2004-2014 OpenERP S.A. (<http://odoo.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Password Encryption', 'version': '2.0', 'author': ['OpenERP SA', 'FS3'], 'maintainer': 'OpenERP SA', 'website': 'https://www.odoo.com', 'category': 'Tools', 'description': """ Encrypted passwords =================== Replaces the default password storage with a strong cryptographic hash. The key derivation function currently used is RSA Security LLC's industry-standard ``PKDF2``, in combination with ``SHA512``. This includes salting and key stretching with several thousands rounds. All passwords are encrypted as soon as the module is installed. This may take a few minutes if there are thousands of users. Past versions of encrypted passwords will be automatically upgraded to the current scheme whenever a user authenticates (``auth_crypt`` was previously using the weaker ``md5crypt`` key derivation function). Note: Installing this module permanently prevents user password recovery and cannot be undone. It is thus recommended to enable some password reset mechanism for users, such as the one provided by the ``auth_signup`` module (signup for new users does not necessarily have to be enabled). """, 'depends': ['base'], 'data': [], 'auto_install': True, 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
PeterWangIntel/chromium-crosswalk
chrome/browser/resources/PRESUBMIT_test.py
42
4389
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys import imp import tempfile import unittest import PRESUBMIT sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( os.path.abspath(__file__)))))) from PRESUBMIT_test_mocks import MockInputApi, MockOutputApi from PRESUBMIT_test_mocks import MockFile, MockChange class HTMLActionAdditionTest(unittest.TestCase): def testActionXMLChanged(self): mock_input_api = MockInputApi() lines = ['<input id="testinput" pref="testpref"', 'metric="validaction" type="checkbox" dialog-pref>'] mock_input_api.files = [MockFile('path/valid.html', lines)] mock_input_api.change = MockChange(['path/valid.html','actions.xml']) action_xml_path = self._createActionXMLFile() self.assertEqual([], PRESUBMIT.CheckUserActionUpdate(mock_input_api, MockOutputApi(), action_xml_path)) def testValidChange_StartOfLine(self): lines = ['<input id="testinput" pref="testpref"', 'metric="validaction" type="checkbox" dialog-pref>'] self.assertEqual([], self._testChange(lines)) def testValidChange_StartsWithSpace(self): lines = ['<input id="testinput" pref="testpref"', ' metric="validaction" type="checkbox" dialog-pref>'] self.assertEqual([], self._testChange(lines)) def testValidChange_Radio(self): lines = ['<input id="testinput" pref="testpref"', ' metric="validaction" type="radio" dialog-pref value="true">'] self.assertEqual([], self._testChange(lines)) def testValidChange_UsingDatatype(self): lines = ['<input id="testinput" pref="testpref"', ' metric="validaction" datatype="boolean" dialog-pref>'] self.assertEqual([], self._testChange(lines)) def testValidChange_NotBoolean(self): lines = ['<input id="testinput" pref="testpref"', ' metric="notboolean_validaction" dialog-pref>'] self.assertEqual([], self._testChange(lines)) def testInvalidChange(self): lines = ['<input id="testinput" pref="testpref"', 'metric="invalidaction" type="checkbox" dialog-pref>'] warnings = self._testChange(lines) self.assertEqual(1, len(warnings), warnings) def testInValidChange_Radio(self): lines = ['<input id="testinput" pref="testpref"', ' metric="validaction" type="radio" dialog-pref value="string">'] warnings = self._testChange(lines) self.assertEqual(1, len(warnings), warnings) def testValidChange_MultilineType(self): lines = ['<input id="testinput" pref="testpref"\n' ' metric="validaction" type=\n' ' "radio" dialog-pref value=\n' ' "false">'] warnings = self._testChange(lines) self.assertEqual([], self._testChange(lines)) def _testChange(self, lines): mock_input_api = MockInputApi() mock_input_api.files = [MockFile('path/test.html', lines)] action_xml_path = self._createActionXMLFile() return PRESUBMIT.CheckUserActionUpdate(mock_input_api, MockOutputApi(), action_xml_path) def _createActionXMLFile(self): content = ('<actions>' '<action name="validaction_Disable">' ' <owner>Please list the metric\'s owners.</owner>' ' <description>Enter the description of this user action.</description>' '</action>' '<action name="validaction_Enable">' ' <owner>Please list the metric\'s owners. </owner>' ' <description>Enter the description of this user action.</description>' '</action>' '<action name="notboolean_validaction">' ' <owner>Please list the metric\'s owners.</owner>' ' <description>Enter the description of this user action.</description>' '</action>' '</actions>') sys_temp = tempfile.gettempdir() action_xml_path = os.path.join(sys_temp, 'actions_test.xml') if not os.path.exists(action_xml_path): with open(action_xml_path, 'w+') as action_file: action_file.write(content) return action_xml_path if __name__ == '__main__': unittest.main()
bsd-3-clause
megraf/asuswrt-merlin
release/src/router/samba36/source4/lib/policy/tests/python/bindings.py
20
1188
#!/usr/bin/env python # Unix SMB/CIFS implementation. # Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2010 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Tests for the libpolicy Python bindings. """ import unittest from samba import policy class PolicyTests(unittest.TestCase): def test_get_gpo_flags(self): self.assertEquals(["GPO_FLAG_USER_DISABLE"], policy.get_gpo_flags(policy.GPO_FLAG_USER_DISABLE)) def test_get_gplink_options(self): self.assertEquals(["GPLINK_OPT_DISABLE"], policy.get_gplink_options(policy.GPLINK_OPT_DISABLE))
gpl-2.0
MoritzS/django
tests/template_tests/filter_tests/test_make_list.py
85
1494
from django.template.defaultfilters import make_list from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class MakeListTests(SimpleTestCase): """ The make_list filter can destroy existing escaping, so the results are escaped. """ @setup({'make_list01': '{% autoescape off %}{{ a|make_list }}{% endautoescape %}'}) def test_make_list01(self): output = self.engine.render_to_string('make_list01', {"a": mark_safe("&")}) self.assertEqual(output, "['&']") @setup({'make_list02': '{{ a|make_list }}'}) def test_make_list02(self): output = self.engine.render_to_string('make_list02', {"a": mark_safe("&")}) self.assertEqual(output, "[&#39;&amp;&#39;]") @setup({'make_list03': '{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}'}) def test_make_list03(self): output = self.engine.render_to_string('make_list03', {"a": mark_safe("&")}) self.assertEqual(output, "['&']") @setup({'make_list04': '{{ a|make_list|stringformat:"s"|safe }}'}) def test_make_list04(self): output = self.engine.render_to_string('make_list04', {"a": mark_safe("&")}) self.assertEqual(output, "['&']") class FunctionTests(SimpleTestCase): def test_string(self): self.assertEqual(make_list('abc'), ['a', 'b', 'c']) def test_integer(self): self.assertEqual(make_list(1234), ['1', '2', '3', '4'])
bsd-3-clause
ryfeus/lambda-packs
Sklearn_scipy_numpy/source/sklearn/preprocessing/__init__.py
268
1319
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization and imputation methods. """ from ._function_transformer import FunctionTransformer from .data import Binarizer from .data import KernelCenterer from .data import MinMaxScaler from .data import MaxAbsScaler from .data import Normalizer from .data import RobustScaler from .data import StandardScaler from .data import add_dummy_feature from .data import binarize from .data import normalize from .data import scale from .data import robust_scale from .data import maxabs_scale from .data import minmax_scale from .data import OneHotEncoder from .data import PolynomialFeatures from .label import label_binarize from .label import LabelBinarizer from .label import LabelEncoder from .label import MultiLabelBinarizer from .imputation import Imputer __all__ = [ 'Binarizer', 'FunctionTransformer', 'Imputer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', 'label_binarize', ]
mit
acabey/flash-dump-tool
test/test_xecrypt.py
1
35801
from unittest import TestCase from Crypto.PublicKey import RSA from lib.xecrypt import XeCryptBnQw_SwapLeBe, XeCryptBnDw_SwapLeBe, XeCryptBnQw, XeCryptBnQw_toInt, XeCryptBnQwNeMod from lib.xecrypt_rsa import XeCrypt_RSA from lib.xecrypt import XeCryptRotSum, RotSumCtx from lib.xecrypt import XeCryptRotSumSha class TestXeCryptRotSumSha(TestCase): def test_normal(self): buffer1 = bytearray(0x40) for i in range(len(buffer1)): buffer1[i] = i % 255 buffer2 = bytearray(0x40) for i in range(len(buffer2)): buffer2[i] = 255 - (i % 255) digest = bytes( [0x31, 0xf0, 0x0b, 0x77, 0x68, 0xf9, 0x57, 0x71, 0x8c, 0x4d, 0x62, 0x84, 0xb3, 0x45, 0xcd, 0xdd, 0x37, 0x9c, 0x6e, 0xe9]) self.assertEqual(XeCryptRotSumSha(buffer1, buffer2, 0x20), digest) def test_input_2_zero(self): buffer1 = bytearray(0x40) for i in range(len(buffer1)): buffer1[i] = i % 255 buffer2 = bytearray(0x40) digest = bytes( [0x2a, 0xdb, 0xc2, 0x8c, 0xda, 0xca, 0x48, 0x8e, 0xa5, 0x01, 0x74, 0xa5, 0xd9, 0x80, 0x60, 0xc5, 0xf9, 0x0f, 0x5d, 0x54]) self.assertEqual(XeCryptRotSumSha(buffer1, buffer2, 0x20), digest) def test_input_all_zero(self): buffer1 = bytearray(0x40) buffer2 = bytearray(0x40) digest = bytes( [0x37, 0xd3, 0xb6, 0xb8, 0x4e, 0x35, 0x08, 0x0a, 0xe5, 0xf9, 0x60, 0xfd, 0xbf, 0x44, 0x26, 0x06, 0x54, 0x02, 0x7e, 0x5b]) self.assertEqual(XeCryptRotSumSha(buffer1, buffer2, 0x20), digest) class TestXeCryptRotSum(TestCase): def test_launch_xex(self): input_1_size = 0x70 input_1 = bytearray( [0x00, 0x00, 0x01, 0x74, 0x00, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00, 0xfc, 0xa0, 0x61, 0xd4, 0xc2, 0xa1, 0x71, 0xc0, 0xec, 0x98, 0x00, 0x86, 0xe6, 0x8b, 0xa8, 0x9f, 0xb0, 0x9c, 0x83, 0xd4, 0x00, 0x00, 0x00, 0x02, 0x47, 0x88, 0x8f, 0x6a, 0x94, 0x36, 0x58, 0xc0, 0x7a, 0xdf, 0xeb, 0xda, 0x44, 0x13, 0x4a, 0x7f, 0xe0, 0xd2, 0x3f, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xd8, 0x0b, 0xfa, 0xf3, 0xdc, 0x53, 0xb0, 0x89, 0x7d, 0x5e, 0xb5, 0x08, 0xd0, 0x10, 0x63, 0x00, 0x00, 0x00, 0x00, 0xca, 0xe2, 0xc7, 0x20, 0xef, 0xb2, 0x9a, 0x1d, 0x2e, 0x90, 0xe4, 0x7e, 0xb9, 0xdf, 0x41, 0xa0, 0xf5, 0xf5, 0xb7, 0xa4]) rotsum_ctx = RotSumCtx([0] * 4) output = bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb2, 0x51, 0xdc, 0xa2, 0xc4, 0x60, 0xa3, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xd8, 0x2d, 0xc6, 0x4e, 0xec, 0xd3, 0xea, 0x24]) XeCryptRotSum(rotsum_ctx, input_1, input_1_size >> 3) self.assertEqual(bytes(rotsum_ctx), output) def test_launch_xex_zero_all(self): input_1_size = 0x0 input_1 = [] output = bytes([0x0] * 0x20) rotsum_ctx = RotSumCtx.from_bytes(output) XeCryptRotSum(rotsum_ctx, input_1, input_1_size >> 3) self.assertEqual(bytes(rotsum_ctx), output) def test_launch_xex_zero_input(self): input_1_size = 0x0 input_1 = [] output = bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb2, 0x51, 0xdc, 0xa2, 0xc4, 0x60, 0xa3, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xd8, 0x2d, 0xc6, 0x4e, 0xec, 0xd3, 0xea, 0x24]) rotsum_ctx = RotSumCtx.from_bytes(output) XeCryptRotSum(rotsum_ctx, input_1, input_1_size >> 3) self.assertEqual(bytes(rotsum_ctx), output) def test_normal(self): input_1_size = 0x40 input_1_bytes = bytearray( [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f]) input_1 = bytearray(input_1_size) for i in range(input_1_size): input_1[i] = i % 255 self.assertEqual(input_1_bytes, input_1) rotsum_ctx = RotSumCtx([0] * 4) output = bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x83, 0x62, 0xe0, 0xd8, 0xd7, 0xd3, 0xc3, 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0x6d, 0x14, 0x14, 0x14, 0x19, 0x69, 0x69, 0x69]) XeCryptRotSum(rotsum_ctx, input_1, input_1_size >> 3) self.assertEqual(bytes(rotsum_ctx), output) def test_normal_2(self): input_2_size = 0x40 input_2_bytes = bytearray( [0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, 0xdf, 0xde, 0xdd, 0xdc, 0xdb, 0xda, 0xd9, 0xd8, 0xd7, 0xd6, 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xd0, 0xcf, 0xce, 0xcd, 0xcc, 0xcb, 0xca, 0xc9, 0xc8, 0xc7, 0xc6, 0xc5, 0xc4, 0xc3, 0xc2, 0xc1, 0xc0]) input_2 = bytearray(input_2_size) for i in range(input_2_size): input_2[i] = 255 - (i % 255) self.assertEqual(input_2_bytes, input_2) output_1 = bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x83, 0x62, 0xe0, 0xd8, 0xd7, 0xd3, 0xc3, 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0x6d, 0x14, 0x14, 0x14, 0x19, 0x69, 0x69, 0x69]) rotsum_ctx = RotSumCtx.from_bytes(output_1) output_2 = bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x50, 0x60, 0x61, 0xaa, 0x6a, 0x8b, 0x0d, 0x54, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x52, 0x59, 0x00, 0x00, 0xa4, 0xb0, 0x00, 0x00]) XeCryptRotSum(rotsum_ctx, input_2, input_2_size >> 3) self.assertEqual(bytes(rotsum_ctx), output_2) class TestXeCryptBnQw_SwapLeBe(TestCase): def test_normal_1(self): self.assertEqual(XeCryptBnQw_SwapLeBe( bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]), 1), bytes([0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01])) def test_normal_2(self): self.assertEqual(XeCryptBnQw_SwapLeBe( bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]), 3), bytes([0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01])) def test_zero(self): self.assertEqual(XeCryptBnQw_SwapLeBe( bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), 1), bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) def test_one(self): self.assertEqual(XeCryptBnQw_SwapLeBe( bytes([0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), 1), bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01])) class TestXeCryptBnDw_SwapLeBe(TestCase): def test_normal_1(self): self.assertEqual(XeCryptBnDw_SwapLeBe( bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]), 2), bytes([0x04, 0x03, 0x02, 0x01, 0x08, 0x07, 0x06, 0x05, ])) def test_normal_2(self): self.assertEqual(XeCryptBnDw_SwapLeBe( bytes([0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]), 6), bytes([0x04, 0x03, 0x02, 0x01, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x08, 0x07, 0x06, 0x05])) def test_zero(self): self.assertEqual(XeCryptBnDw_SwapLeBe( bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), 2), bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) def test_one(self): self.assertEqual(XeCryptBnDw_SwapLeBe( bytes([0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), 2), bytes([0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00])) class TestXeCryptBnQw(TestCase): def test_rsa(self): rsa_obj = RSA.generate(1024) p_old_bn = XeCryptBnQw(rsa_obj.p, 64) self.assertEqual(XeCryptBnQw_toInt(p_old_bn), rsa_obj.p) n_old_bn = XeCryptBnQw(rsa_obj.n, 128) self.assertEqual(XeCryptBnQw_toInt(n_old_bn), rsa_obj.n) def test_xersa(self): rsa_obj = RSA.generate(1024) xecrypt_obj = XeCrypt_RSA.from_rsa_obj(rsa_obj) p_old_bn = XeCryptBnQw(xecrypt_obj.p, 64) self.assertEqual(XeCryptBnQw_toInt(p_old_bn), xecrypt_obj.p) n_old_bn = XeCryptBnQw(xecrypt_obj.n, 128) self.assertEqual(XeCryptBnQw_toInt(n_old_bn), xecrypt_obj.n) class TestXeCryptBnQwNeMod(TestCase): def test_normal(self): r3 = bytes([ 0xf5, 0xbf, 0x46, 0xe4, 0x34, 0xf1, 0x73, 0x3c, 0xef, 0x6d, 0x46, 0x91, 0x3d, 0x75, 0x08, 0xcf, 0x3f, 0x09, 0x57, 0xcd, 0x7e, 0xe5, 0x1a, 0xa9, 0x7d, 0x83, 0x99, 0xf8, 0xf6, 0xe9, 0x58, 0x39, 0xa5, 0x37, 0xf0, 0x6c, 0x56, 0xa7, 0xef, 0x33, 0xe8, 0x11, 0x23, 0x26, 0x1b, 0x94, 0x9e, 0xc8, 0xcc, 0x30, 0x81, 0x7b, 0x21, 0x9f, 0x6c, 0x54, 0x39, 0x20, 0x35, 0x88, 0xbb, 0xe1, 0x98, 0xf6, 0x62, 0x6c, 0xa9, 0x28, 0x65, 0xd4, 0x70, 0xe8, 0x40, 0xb2, 0xd5, 0x3d, 0x94, 0x9d, 0x45, 0x27, 0x0c, 0xc9, 0xf4, 0x09, 0x24, 0x12, 0x08, 0x59, 0xaf, 0x7c, 0xd2, 0xf3, 0xc4, 0x9a, 0xcd, 0x3d, 0x6c, 0xe7, 0xe9, 0x6f, 0x22, 0x49, 0x18, 0x1e, 0x40, 0x72, 0x0b, 0x80, 0x47, 0x49, 0x52, 0x5f, 0x33, 0x70, 0x77, 0xff, 0xbb, 0xc4, 0xf4, 0xb5, 0xdb, 0xbb, 0x3f, 0x02, 0x55, 0x73, 0xe9, 0x34, 0xa1, 0x99, 0x04, 0xc0, 0x43, 0xef, 0x73, 0x4e, 0xc6, 0xaa, 0xc7, 0xe1, 0x07, 0x92, 0xb7, 0x18, 0x84, 0x2f, 0x0d, 0x44, 0xa1, 0x5f, 0xe6, 0x7c, 0x15, 0x43, 0xbe, 0xa4, 0x16, 0x15, 0x03, 0xd0, 0x12, 0x7a, 0x3a, 0xeb, 0xdc, 0xf1, 0x7e, 0x2d, 0x05, 0x5e, 0xb9, 0x21, 0xbe, 0x4e, 0xeb, 0xf9, 0x78, 0x69, 0x40, 0x7f, 0x38, 0xe8, 0x19, 0xe2, 0x41, 0x45, 0xbf, 0x0c, 0x5f, 0xa8, 0x8c, 0xdc, 0x1e, 0x52, 0x0e, 0x37, 0x7a, 0x39, 0xd7, 0x6a, 0x5e, 0x65, 0x06, 0x8c, 0x85, 0x5d, 0x0a, 0xc9, 0xb4, 0xfd, 0xfe, 0x31, 0x6e, 0x8c, 0x0d, 0x7d, 0xa1, 0xad, 0x20, 0x6e, 0xe9, 0x57, 0xa0, 0xd7, 0x36, 0x0b, 0x2a, 0x0b, 0x2e, 0x92, 0xb3, 0xdf, 0x18, 0x30, 0x6a, 0x93, 0xd5, 0x9e, 0x0c, 0xa9, 0xf1, 0xfe, 0x6a, 0x2d, 0x8b, 0x92, 0x7d, 0x39, 0x3c, 0xd0, 0x23, 0x2f, 0xd7, 0x70, 0x37, 0xf7, 0x4a, 0xf1, 0xee, 0x3e, 0xe1, 0x2c, 0xbc, 0x97, 0xd8, 0x2d, 0xf1, 0x5b, 0xb0, 0x2e, 0xea, 0xaf, ]) # Unchanged r4 = bytes([ 0x4a, 0xf1, 0xee, 0x3e, 0xe1, 0x2c, 0xbc, 0x97, 0xd8, 0x2d, 0xf1, 0x5b, 0xb0, 0x2e, 0xea, 0xaf, 0x0e, 0xec, 0x36, 0xd2, 0x0a, 0xf8, 0x57, 0x65, 0x2b, 0x49, 0x76, 0x1c, 0x40, 0x3e, 0x45, 0x2e, 0x30, 0x27, 0x90, 0xf4, 0x26, 0x42, 0x4a, 0x4c, 0xaf, 0x75, 0xe8, 0x98, 0x79, 0xa7, 0x3e, 0x44, 0x03, 0x03, 0xd9, 0x86, 0xb3, 0xef, 0x13, 0xdf, 0xcd, 0x10, 0xf1, 0xe5, 0x63, 0x6b, 0xcd, 0x2a, 0x30, 0xe4, 0xa2, 0x16, 0xcf, 0xb7, 0xd5, 0x7c, 0x1b, 0xb0, 0x49, 0xb0, 0x4f, 0xc4, 0xd5, 0x8c, 0x8d, 0xd8, 0x3c, 0x81, 0xe8, 0xa1, 0x73, 0xee, 0x76, 0xb8, 0xb8, 0x49, 0x4d, 0x0b, 0x37, 0x42, 0x70, 0x4e, 0x08, 0x43, 0x2d, 0xdb, 0xf2, 0x37, 0xfe, 0x82, 0xcf, 0xc2, 0x99, 0xc4, 0xa1, 0xa9, 0xbd, 0x8b, 0xc0, 0x0e, 0x88, 0x87, 0xcd, 0x82, 0x1c, 0x75, 0x82, 0x62, 0x16, 0x77, 0xc6, 0xfa, 0x19, 0x5c, 0x04, 0xa7, 0x98, 0x40, 0x05, 0xbf, 0xf1, 0x98, 0x9b, 0xd8, 0x00, 0x95, 0x4a, 0x2a, 0xea, 0x45, 0x5b, 0xb9, 0x89, 0x94, 0x9e, 0x07, 0xb8, 0xb5, 0x86, 0x6c, 0x9d, 0xb9, 0xc6, 0xef, 0xc0, 0x45, 0x7b, 0x3b, 0xaf, 0x32, 0x53, 0x63, 0xd1, 0x49, 0x58, 0xe0, 0xda, 0x9b, 0x96, 0x86, 0x1a, 0xfa, 0xac, 0xaf, 0x66, 0xee, 0x49, 0x0c, 0x37, 0x01, 0x60, 0x8f, 0xca, 0x80, 0x71, 0xfd, 0x2c, 0x35, 0x59, 0xd8, 0x3d, 0xfe, 0xf8, 0xef, 0x0f, 0x3d, 0x12, 0x28, 0xef, 0x6c, 0xf3, 0xad, 0x76, 0x33, 0x37, 0x4f, 0xae, 0x1d, 0x59, 0xe3, 0xdc, 0x14, 0x88, 0x46, 0xa6, 0x96, 0x4a, 0xb8, 0xee, 0x6b, 0x74, 0xf4, 0x46, 0x13, 0x04, 0x45, 0xb1, 0x96, 0xc3, 0xca, 0x98, 0xcb, 0xe0, 0x9b, 0x53, 0x5f, 0xd1, 0xf9, 0x78, 0x0a, 0x90, 0x46, 0xd7, 0xb3, 0x49, 0x2a, 0x5b, 0x89, 0x91, 0x1f, 0x65, 0x49, 0x51, 0xf7, 0xea, 0x7b, 0x30, 0x57, 0x01, 0x72, 0xe2, 0x45, 0xe8, 0x42, 0xea, 0xd3 ]) r5 = bytes([ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]) r5out = bytes([ 0xf5, 0xbf, 0x46, 0xe4, 0x34, 0xf1, 0x73, 0x3c, 0xef, 0x6d, 0x46, 0x91, 0x3d, 0x75, 0x08, 0xcf, 0x3f, 0x09, 0x57, 0xcd, 0x7e, 0xe5, 0x1a, 0xa9, 0x7d, 0x83, 0x99, 0xf8, 0xf6, 0xe9, 0x58, 0x39, 0xa5, 0x37, 0xf0, 0x6c, 0x56, 0xa7, 0xef, 0x33, 0xe8, 0x11, 0x23, 0x26, 0x1b, 0x94, 0x9e, 0xc8, 0xcc, 0x30, 0x81, 0x7b, 0x21, 0x9f, 0x6c, 0x54, 0x39, 0x20, 0x35, 0x88, 0xbb, 0xe1, 0x98, 0xf6, 0x62, 0x6c, 0xa9, 0x28, 0x65, 0xd4, 0x70, 0xe8, 0x40, 0xb2, 0xd5, 0x3d, 0x94, 0x9d, 0x45, 0x27, 0x0c, 0xc9, 0xf4, 0x09, 0x24, 0x12, 0x08, 0x59, 0xaf, 0x7c, 0xd2, 0xf3, 0xc4, 0x9a, 0xcd, 0x3d, 0x6c, 0xe7, 0xe9, 0x6f, 0x22, 0x49, 0x18, 0x1e, 0x40, 0x72, 0x0b, 0x80, 0x47, 0x49, 0x52, 0x5f, 0x33, 0x70, 0x77, 0xff, 0xbb, 0xc4, 0xf4, 0xb5, 0xdb, 0xbb, 0x3f, 0x02, 0x55, 0x73, 0xe9, 0x34, 0xa1, 0x99, 0x04, 0xc0, 0x43, 0xef, 0x73, 0x4e, 0xc6, 0xaa, 0xc7, 0xe1, 0x07, 0x92, 0xb7, 0x18, 0x84, 0x2f, 0x0d, 0x44, 0xa1, 0x5f, 0xe6, 0x7c, 0x15, 0x43, 0xbe, 0xa4, 0x16, 0x15, 0x03, 0xd0, 0x12, 0x7a, 0x3a, 0xeb, 0xdc, 0xf1, 0x7e, 0x2d, 0x05, 0x5e, 0xb9, 0x21, 0xbe, 0x4e, 0xeb, 0xf9, 0x78, 0x69, 0x40, 0x7f, 0x38, 0xe8, 0x19, 0xe2, 0x41, 0x45, 0xbf, 0x0c, 0x5f, 0xa8, 0x8c, 0xdc, 0x1e, 0x52, 0x0e, 0x37, 0x7a, 0x39, 0xd7, 0x6a, 0x5e, 0x65, 0x06, 0x8c, 0x85, 0x5d, 0x0a, 0xc9, 0xb4, 0xfd, 0xfe, 0x31, 0x6e, 0x8c, 0x0d, 0x7d, 0xa1, 0xad, 0x20, 0x6e, 0xe9, 0x57, 0xa0, 0xd7, 0x36, 0x0b, 0x2a, 0x0b, 0x2e, 0x92, 0xb3, 0xdf, 0x18, 0x30, 0x6a, 0x93, 0xd5, 0x9e, 0x0c, 0xa9, 0xf1, 0xfe, 0x6a, 0x2d, 0x8b, 0x92, 0x7d, 0x39, 0x3c, 0xd0, 0x23, 0x2f, 0xd7, 0x70, 0x37, 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]) self.assertEqual(XeCryptBnQwNeMod(r3, r4, 0x20, 0x20), r5out) def test_normal_2(self): r3 = bytes([ 0x93, 0x25, 0x50, 0x3e, 0xa5, 0xd4, 0x78, 0x10, 0x49, 0x8b, 0x32, 0x53, 0x33, 0x49, 0xf6, 0xbf, 0x4d, 0xf5, 0x1e, 0xaf, 0x72, 0xa8, 0x0b, 0x35, 0xc2, 0x53, 0xcd, 0xd8, 0x67, 0x71, 0xd2, 0x5a, 0x38, 0xf9, 0x65, 0xdd, 0x8f, 0x2c, 0xfd, 0xf9, 0x02, 0x2b, 0x3e, 0xeb, 0x9d, 0xc5, 0x0d, 0x9c, 0x72, 0x5f, 0x53, 0x7f, 0xf6, 0x84, 0x6d, 0x25, 0x88, 0x27, 0x55, 0x47, 0xad, 0x90, 0x03, 0x84, 0xb4, 0xdc, 0xe8, 0x16, 0x70, 0x8f, 0xdb, 0xeb, 0xc5, 0x7a, 0xa7, 0xc2, 0xcd, 0xe8, 0xf1, 0xc8, 0x7e, 0x4f, 0xea, 0xaf, 0xdf, 0xe8, 0x1b, 0xa8, 0xbe, 0xb4, 0xb1, 0xba, 0x00, 0x3e, 0xb5, 0x94, 0x6c, 0x1c, 0xff, 0x71, 0x53, 0x8f, 0x1a, 0x31, 0x32, 0xab, 0x34, 0xa7, 0x13, 0xce, 0x74, 0x37, 0x6b, 0xf8, 0x2a, 0xa3, 0xde, 0xe7, 0x6d, 0x3d, 0x22, 0x8c, 0x88, 0xf4, 0xc5, 0x74, 0x39, 0x92, 0x0c, 0xed, 0x7f, 0x75, 0x65, 0xc2, 0xcb, 0x75, 0xbf, 0x85, 0x20, 0x33, 0x70, 0xeb, 0x37, 0x80, 0x43, 0xc8, 0x17, 0xd9, 0x37, 0xe6, 0x08, 0x0c, 0xfd, 0xb1, 0x7d, 0x94, 0x87, 0x36, 0x15, 0xf8, 0x3c, 0xeb, 0x73, 0x45, 0x31, 0xa5, 0xde, 0xca, 0xf3, 0xaa, 0x02, 0xed, 0xfa, 0x9f, 0xb1, 0xb9, 0x9e, 0xc4, 0xb1, 0x2b, 0x4f, 0x5e, 0x6f, 0xda, 0xc6, 0xe9, 0x29, 0x8f, 0xc0, 0x73, 0xee, 0x7d, 0xf3, 0x05, 0x9b, 0xbb, 0xc9, 0x9f, 0x1e, 0x8c, 0x85, 0x8a, 0x39, 0xa3, 0xd6, 0xc7, 0xe7, 0x33, 0xe4, 0xfb, 0x5a, 0x29, 0xd4, 0x2e, 0x5c, 0x0e, 0xf5, 0x72, 0x9a, 0xe1, 0x2e, 0xb4, 0x86, 0xaf, 0xae, 0x07, 0x81, 0xa7, 0x0e, 0xb3, 0x76, 0xd2, 0xa9, 0xcf, 0xc3, 0xe6, 0x65, 0x89, 0x27, 0x4b, 0xf0, 0x7f, 0x2f, 0xe1, 0x2d, 0x72, 0x96, 0xc0, 0xeb, 0xdf, 0xbf, 0x66, 0x98, 0xb4, 0x9e, 0x3e, 0x44, 0x2b, 0x95, 0x32, 0x0d, 0x2a, 0x7b, 0x5f, 0x63, 0x94, 0x84, 0x6e, 0x71, 0x7a, 0xed, 0x76, 0x10, 0xe0, 0xd6, 0xba, 0x49, 0x9c, 0x59, 0x1e, 0x62, 0xbf, 0x29, 0xfb, 0xca, 0xc2, 0x39, 0x42, 0xff, 0x76, 0x5f, 0x04, 0x47, 0xbf, 0x60, 0x43, 0x8d, 0x6d, 0x09, 0x92, 0x51, 0x2f, 0x63, 0x9f, 0x62, 0xe4, 0xd2, 0x95, 0x25, 0x19, 0xa4, 0xd8, 0x93, 0xe1, 0xf2, 0x91, 0x9b, 0xdd, 0x45, 0x90, 0xe6, 0x4a, 0xe0, 0x85, 0x01, 0x89, 0x05, 0xde, 0xcf, 0xeb, 0xe0, 0x62, 0x31, 0xf1, 0xeb, 0xc4, 0x00, 0xf4, 0x85, 0xae, 0xee, 0xa5, 0x9b, 0x3d, 0xbf, 0x32, 0xaa, 0x51, 0xaa, 0x5c, 0xf7, 0xf3, 0x27, 0x47, 0xa9, 0x29, 0xa6, 0x87, 0xa8, 0x15, 0xe9, 0xe2, 0x2f, 0x5a, 0xaf, 0xd3, 0x4f, 0xb1, 0x6f, 0x47, 0x3c, 0x24, 0x3d, 0xc9, 0x83, 0x4e, 0x27, 0x51, 0x41, 0x0f, 0x44, 0xb8, 0xf5, 0x88, 0xfa, 0x1e, 0xeb, 0x26, 0x80, 0x99, 0x08, 0x24, 0x37, 0xed, 0x77, 0x75, 0xa6, 0x8b, 0x45, 0x25, 0x85, 0x27, 0x85, 0x7b, 0xa5, 0x3c, 0x41, 0x84, 0xb6, 0x42, 0xc7, 0x01, 0xeb, 0x40, 0x2e, 0x3c, 0x3a, 0xf4, 0xf2, 0x27, 0xfe, 0x2c, 0x40, 0x6f, 0x79, 0x17, 0xd8, 0x47, 0x6e, 0x87, 0x34, 0x55, 0x14, 0xc1, 0x47, 0x24, 0x0b, 0x2b, 0xa1, 0x20, 0x1f, 0x35, 0x4e, 0x5e, 0x37, 0xff, 0x6a, 0x3e, 0x3e, 0x67, 0xe4, 0xa1, 0xb0, 0x0b, 0xd4, 0x26, 0xe6, 0x45, 0x7c, 0xee, 0x68, 0xa9, 0x87, 0xd5, 0x74, 0xbf, 0x16, 0x21, 0xb7, 0xc0, 0x71, 0xea, 0x8c, 0xf3, 0x19, 0x1e, 0x52, 0x7a, 0x69, 0xf4, 0xd0, 0xeb, 0x45, 0xa8, 0xa4, 0x7a, 0x22, 0x8f, 0x24, 0xc0, 0x2a, 0x80, 0x38, 0xd2, 0x88, 0x82, 0x4b, 0x5f, 0xa6, 0x8b, 0xf2, 0x11, 0x40, 0x07, 0x15, 0xcb, 0x22, 0xa1, 0xd7, 0x0e, 0x1c, 0x25, ]) # Unchanged r4 = bytes([ 0x4a, 0xf1, 0xee, 0x3e, 0xe1, 0x2c, 0xbc, 0x97, 0xd8, 0x2d, 0xf1, 0x5b, 0xb0, 0x2e, 0xea, 0xaf, 0x0e, 0xec, 0x36, 0xd2, 0x0a, 0xf8, 0x57, 0x65, 0x2b, 0x49, 0x76, 0x1c, 0x40, 0x3e, 0x45, 0x2e, 0x30, 0x27, 0x90, 0xf4, 0x26, 0x42, 0x4a, 0x4c, 0xaf, 0x75, 0xe8, 0x98, 0x79, 0xa7, 0x3e, 0x44, 0x03, 0x03, 0xd9, 0x86, 0xb3, 0xef, 0x13, 0xdf, 0xcd, 0x10, 0xf1, 0xe5, 0x63, 0x6b, 0xcd, 0x2a, 0x30, 0xe4, 0xa2, 0x16, 0xcf, 0xb7, 0xd5, 0x7c, 0x1b, 0xb0, 0x49, 0xb0, 0x4f, 0xc4, 0xd5, 0x8c, 0x8d, 0xd8, 0x3c, 0x81, 0xe8, 0xa1, 0x73, 0xee, 0x76, 0xb8, 0xb8, 0x49, 0x4d, 0x0b, 0x37, 0x42, 0x70, 0x4e, 0x08, 0x43, 0x2d, 0xdb, 0xf2, 0x37, 0xfe, 0x82, 0xcf, 0xc2, 0x99, 0xc4, 0xa1, 0xa9, 0xbd, 0x8b, 0xc0, 0x0e, 0x88, 0x87, 0xcd, 0x82, 0x1c, 0x75, 0x82, 0x62, 0x16, 0x77, 0xc6, 0xfa, 0x19, 0x5c, 0x04, 0xa7, 0x98, 0x40, 0x05, 0xbf, 0xf1, 0x98, 0x9b, 0xd8, 0x00, 0x95, 0x4a, 0x2a, 0xea, 0x45, 0x5b, 0xb9, 0x89, 0x94, 0x9e, 0x07, 0xb8, 0xb5, 0x86, 0x6c, 0x9d, 0xb9, 0xc6, 0xef, 0xc0, 0x45, 0x7b, 0x3b, 0xaf, 0x32, 0x53, 0x63, 0xd1, 0x49, 0x58, 0xe0, 0xda, 0x9b, 0x96, 0x86, 0x1a, 0xfa, 0xac, 0xaf, 0x66, 0xee, 0x49, 0x0c, 0x37, 0x01, 0x60, 0x8f, 0xca, 0x80, 0x71, 0xfd, 0x2c, 0x35, 0x59, 0xd8, 0x3d, 0xfe, 0xf8, 0xef, 0x0f, 0x3d, 0x12, 0x28, 0xef, 0x6c, 0xf3, 0xad, 0x76, 0x33, 0x37, 0x4f, 0xae, 0x1d, 0x59, 0xe3, 0xdc, 0x14, 0x88, 0x46, 0xa6, 0x96, 0x4a, 0xb8, 0xee, 0x6b, 0x74, 0xf4, 0x46, 0x13, 0x04, 0x45, 0xb1, 0x96, 0xc3, 0xca, 0x98, 0xcb, 0xe0, 0x9b, 0x53, 0x5f, 0xd1, 0xf9, 0x78, 0x0a, 0x90, 0x46, 0xd7, 0xb3, 0x49, 0x2a, 0x5b, 0x89, 0x91, 0x1f, 0x65, 0x49, 0x51, 0xf7, 0xea, 0x7b, 0x30, 0x57, 0x01, 0x72, 0xe2, 0x45, 0xe8, 0x42, 0xea, 0xd3 ]) r5 = bytes([ 0xe8, 0x23, 0x02, 0xdc, 0xb7, 0x69, 0x2a, 0xbc, 0x4d, 0x8c, 0xae, 0x8b, 0x45, 0x1f, 0x2f, 0xa9, 0xc7, 0x5e, 0xfe, 0xb7, 0x15, 0x7b, 0x09, 0xac, 0x84, 0x92, 0x21, 0x5a, 0xfa, 0x22, 0x51, 0x87, 0xe4, 0xf0, 0xa2, 0x38, 0x87, 0x86, 0x28, 0x50, 0x29, 0xd2, 0x38, 0x47, 0x4f, 0x97, 0x37, 0x8c, 0x34, 0xb3, 0x0b, 0x79, 0xa9, 0xc2, 0x86, 0x8f, 0xfb, 0xb5, 0xfe, 0x0e, 0x51, 0x0e, 0x1d, 0x9b, 0xfb, 0xb7, 0x5a, 0x45, 0x10, 0x1a, 0xdb, 0x25, 0x46, 0x9d, 0x7b, 0x0a, 0xc0, 0x53, 0x25, 0xec, 0x7e, 0x18, 0x1a, 0xbe, 0xa4, 0xe6, 0xf3, 0xdb, 0x63, 0x58, 0x5b, 0xeb, 0xc0, 0xc0, 0xfd, 0x1f, 0x74, 0x5c, 0x31, 0x78, 0x7e, 0xbe, 0x17, 0xc2, 0xe5, 0x54, 0x0b, 0x1f, 0x75, 0x70, 0xba, 0x17, 0x67, 0x5b, 0x0b, 0x16, 0xa1, 0x66, 0x5d, 0x30, 0x41, 0x65, 0x03, 0x62, 0xac, 0x3e, 0x84, 0x5b, 0xef, 0x7f, 0xf8, 0x0a, 0x90, 0x7c, 0xdb, 0xb9, 0x34, 0x67, 0xf2, 0xaf, 0x00, 0xef, 0x49, 0x33, 0x6b, 0xf0, 0x07, 0xf0, 0x8d, 0x6e, 0x66, 0x30, 0x18, 0xa8, 0x9e, 0x53, 0xb4, 0x3b, 0xc5, 0xfb, 0xbc, 0x31, 0x00, 0xb9, 0x0b, 0x6d, 0x9e, 0x44, 0xbd, 0x6f, 0x20, 0x6f, 0x29, 0x84, 0xb5, 0x15, 0x31, 0x02, 0x24, 0x39, 0x17, 0xf7, 0x1c, 0x35, 0xc7, 0x0b, 0x7a, 0x70, 0x21, 0xe6, 0x53, 0x80, 0x56, 0x78, 0x27, 0x3c, 0xa0, 0xaa, 0x86, 0x4f, 0x6d, 0xfe, 0x5b, 0x3e, 0x09, 0x8c, 0xb3, 0x9e, 0x5d, 0x0a, 0x0a, 0x17, 0xaf, 0xf1, 0x19, 0x60, 0x6b, 0xff, 0x2f, 0xc8, 0x55, 0x9c, 0x6c, 0x08, 0xe2, 0x72, 0x7b, 0xc9, 0x23, 0x6a, 0xb6, 0x78, 0xa2, 0xb4, 0x9d, 0x3f, 0xee, 0x60, 0xb7, 0xad, 0x35, 0xcc, 0xbb, 0x6c, 0x91, 0xb0, 0xc4, 0xe9, 0x5b, 0xbe, 0x0f, 0x5a, 0x9a, 0x9f, 0xb9, 0x90, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x80, 0x23, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x04, 0xfd, 0x60, 0x82, 0x01, 0x94, 0x20, 0x78, 0x1e, 0x12, 0xc0, 0x70, 0x04, 0xff, 0x50, 0x81, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x01, 0x80, 0x28, 0x00, 0x00, 0x82, 0x80, 0x09, 0xad, 0xbc, 0x80, 0x09, 0x88, 0x74, 0x80, 0x0a, 0x18, 0xe0, 0x70, 0x04, 0xfe, 0x00, 0x78, 0x1a, 0xe0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x82, 0x01, 0x02, 0xd0, 0x70, 0x04, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x23, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x78, 0x1e, 0x12, 0xd0, 0xff, 0xff, 0xff, 0xff, 0x81, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x1e, 0x0f, 0x80, 0x81, 0x75, 0x93, 0xa0, 0x80, 0x0a, 0x18, 0xe0, 0x70, 0x04, 0xfe, 0x60, 0x81, 0x75, 0xdc, 0x30, 0x00, 0x00, 0x00, 0x00, 0x82, 0x01, 0x02, 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x78, 0x1e, 0x29, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x23, 0x01, 0x48, ]) r5out = bytes([ 0x79, 0x67, 0x0d, 0x33, 0x3b, 0x82, 0x4d, 0x68, 0x66, 0x5a, 0x26, 0x47, 0x5c, 0x06, 0x13, 0xf6, 0x36, 0x1a, 0x0a, 0x85, 0x3b, 0xa8, 0x16, 0x1f, 0xef, 0x06, 0xe2, 0x26, 0x3b, 0x28, 0x45, 0x8c, 0xe3, 0xab, 0x11, 0x58, 0x43, 0x1f, 0xe5, 0x05, 0x9e, 0x27, 0x8a, 0xad, 0xc0, 0xc7, 0x66, 0x5f, 0x17, 0xea, 0x78, 0x90, 0x9c, 0xb6, 0xea, 0x09, 0xce, 0xe5, 0x22, 0xa2, 0xca, 0xe0, 0x72, 0xe1, 0xd8, 0xe7, 0x04, 0xc0, 0x1f, 0x57, 0x6c, 0x5e, 0xb2, 0x5d, 0xe0, 0x06, 0x16, 0x5b, 0xf7, 0x42, 0x77, 0x57, 0x20, 0x5b, 0xb4, 0x05, 0x26, 0xef, 0x07, 0x52, 0x35, 0xc8, 0xe8, 0x06, 0x93, 0xaf, 0xca, 0x04, 0xd7, 0x04, 0xe6, 0x0d, 0xd4, 0xf6, 0x6a, 0x97, 0x6f, 0x95, 0x17, 0x48, 0x63, 0xe9, 0xb8, 0x57, 0xa8, 0x1e, 0x88, 0x68, 0xa6, 0x0c, 0xbd, 0x5a, 0xc5, 0x40, 0x8b, 0x67, 0x4f, 0xef, 0x90, 0x61, 0x3b, 0x15, 0xc3, 0xb2, 0xd8, 0x4d, 0x2c, 0x46, 0x93, 0x0e, 0x11, 0xa4, 0x73, 0x03, 0x69, 0x69, 0xa4, 0x8a, 0xa8, 0x0a, 0x99, 0x88, 0x97, 0xdc, 0xcd, 0xbe, 0x75, 0xdb, 0xce, 0xb9, 0x15, 0x47, 0x43, 0x2d, 0x05, 0x55, 0xab, 0x94, 0xd1, 0x7d, 0xda, 0x52, 0x2a, 0x02, 0x01, 0x69, 0x2d, 0xfd, 0xa6, 0x2c, 0x2f, 0xbb, 0xba, 0x7d, 0x39, 0xf8, 0x86, 0x26, 0xcd, 0x4a, 0xb5, 0x03, 0x2c, 0xa6, 0x87, 0x99, 0xca, 0x8f, 0xc3, 0x58, 0x9a, 0x06, 0xb7, 0x80, 0xad, 0xbb, 0x21, 0x55, 0x49, 0x66, 0xb2, 0x38, 0xc8, 0x96, 0xfc, 0xbf, 0x74, 0x2f, 0x95, 0x9d, 0xbb, 0xf7, 0x58, 0x32, 0x69, 0x3d, 0x87, 0x57, 0xb2, 0x76, 0xef, 0x25, 0x42, 0xf1, 0x2d, 0x6e, 0x27, 0x57, 0x6e, 0xa0, 0x1f, 0x5e, 0xd7, 0xfb, 0x08, 0x79, 0xd3, 0x28, 0x97, 0xa3, 0x1b, 0xaa, 0xd0, 0x1e, 0x24, 0x60, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x80, 0x23, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x04, 0xfd, 0x60, 0x82, 0x01, 0x94, 0x20, 0x78, 0x1e, 0x12, 0xc0, 0x70, 0x04, 0xff, 0x50, 0x81, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x01, 0x80, 0x28, 0x00, 0x00, 0x82, 0x80, 0x09, 0xad, 0xbc, 0x80, 0x09, 0x88, 0x74, 0x80, 0x0a, 0x18, 0xe0, 0x70, 0x04, 0xfe, 0x00, 0x78, 0x1a, 0xe0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x82, 0x01, 0x02, 0xd0, 0x70, 0x04, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x23, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x78, 0x1e, 0x12, 0xd0, 0xff, 0xff, 0xff, 0xff, 0x81, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x1e, 0x0f, 0x80, 0x81, 0x75, 0x93, 0xa0, 0x80, 0x0a, 0x18, 0xe0, 0x70, 0x04, 0xfe, 0x60, 0x81, 0x75, 0xdc, 0x30, 0x00, 0x00, 0x00, 0x00, 0x82, 0x01, 0x02, 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x78, 0x1e, 0x29, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x23, 0x01, 0x48, ]) self.assertEqual(XeCryptBnQwNeMod(r3, r4, 0x40, 0x20), r5out) def test_normal_3(self): r3 = bytes([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, ]) r4 = bytes([ ]) r5 = bytes([ 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, 0x6c, 0x6f, 0x6f, 0x50, ]) r5out = bytes([ ]) self.assertEqual(XeCryptBnQwNeMod(r3, r4, r5, 0x12, 0x10)) def test_normal_4(self): r3 = bytes([ ]) r4 = bytes([ ]) r5 = bytes([ ]) r5out = bytes([ ]) self.assertEqual(XeCryptBnQwNeMod(r3, r4, r5, 0x12, 0x10))
gpl-3.0
haveal/googleads-python-lib
examples/dfp/v201411/line_item_creative_association_service/create_licas.py
4
1879
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code example creates new line item creative associations (LICAs) for an existing line item and a set of creative ids. To determine which LICAs exist, run get_all_licas.py.""" # Import appropriate modules from the client library. from googleads import dfp # Set the line item ID and creative IDs to associate. LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE' CREATIVE_IDS = ['INSERT_CREATIVE_IDS_HERE'] def main(client, line_item_id, creative_ids): # Initialize appropriate service. lica_service = client.GetService( 'LineItemCreativeAssociationService', version='v201411') licas = [] for creative_id in creative_ids: licas.append({'creativeId': creative_id, 'lineItemId': line_item_id}) # Create the LICAs remotely. licas = lica_service.createLineItemCreativeAssociations(licas) # Display results. if licas: for lica in licas: print ('LICA with line item id \'%s\', creative id \'%s\', and ' 'status \'%s\' was created.' % (lica['lineItemId'], lica['creativeId'], lica['status'])) else: print 'No LICAs created.' if __name__ == '__main__': # Initialize client object. dfp_client = dfp.DfpClient.LoadFromStorage() main(dfp_client, LINE_ITEM_ID, CREATIVE_IDS)
apache-2.0
jspraul/bite-project
deps/gdata-python-client/tests/gdata_tests/calendar_test.py
41
39080
#!/usr/bin/python # # Copyright (C) 2006 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = 'api.jscudder@gmail.com (Jeff Scudder)' import unittest try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree import atom import gdata from gdata import test_data import gdata.calendar class CalendarFeedTest(unittest.TestCase): def setUp(self): self.calendar_feed = gdata.calendar.CalendarListFeedFromString( test_data.CALENDAR_FEED) def testEntryCount(self): # Assert the number of items in the feed of calendars self.assertEquals(len(self.calendar_feed.entry),2) def testToAndFromString(self): # Assert the appropriate type for each entry for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry, gdata.calendar.CalendarListEntry), 'Entry must be an instance of CalendarListEntry') # Regenerate feed from xml text new_calendar_feed = ( gdata.calendar.CalendarListFeedFromString(str(self.calendar_feed))) for an_entry in new_calendar_feed.entry: self.assert_(isinstance(an_entry, gdata.calendar.CalendarListEntry), 'Entry in regenerated feed must be an instance of CalendarListEntry') def testAuthor(self): """Tests the existence of a <atom:author> and verifies the name and email""" # Assert that each element in the feed author list is an atom.Author for an_author in self.calendar_feed.author: self.assert_(isinstance(an_author, atom.Author), "Calendar feed <atom:author> element must be an instance of " + "atom.Author: %s" % an_author) # Assert the feed author name is as expected self.assertEquals(self.calendar_feed.author[0].name.text, 'GData Ops Demo') # Assert the feed author name is as expected self.assertEquals(self.calendar_feed.author[0].email.text, 'gdata.ops.demo@gmail.com') # Assert one of the values for an entry author self.assertEquals(self.calendar_feed.entry[0].author[0].name.text, 'GData Ops Demo') self.assertEquals(self.calendar_feed.entry[0].author[0].email.text, 'gdata.ops.demo@gmail.com') def testId(self): """Tests the existence of a <atom:id> in the feed and entries and verifies the value""" # Assert the feed id exists and is an atom.Id self.assert_(isinstance(self.calendar_feed.id, atom.Id), "Calendar feed <atom:id> element must be an instance of atom.Id: %s" % ( self.calendar_feed.id)) # Assert the feed id value is as expected self.assertEquals(self.calendar_feed.id.text, 'http://www.google.com/calendar/feeds/default') # Assert that each entry has an id which is an atom.Id for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry.id, atom.Id), "Calendar entry <atom:id> element must be an instance of " + "atom.Id: %s" % an_entry.id) # Assert one of the values for an id self.assertEquals(self.calendar_feed.entry[1].id.text, 'http://www.google.com/calendar/feeds/default/' + 'jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com') def testPublished(self): """Tests the existence of a <atom:published> in the entries and verifies the value""" # Assert that each entry has a published value which is an atom.Published for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry.published, atom.Published), "Calendar entry <atom:published> element must be an instance of " + "atom.Published: %s" % an_entry.published) # Assert one of the values for published is as expected self.assertEquals(self.calendar_feed.entry[1].published.text, '2007-03-20T22:48:57.837Z') def testUpdated(self): """Tests the existence of a <atom:updated> in the feed and the entries and verifies the value""" # Assert that the feed updated element exists and is an atom.Updated self.assert_(isinstance(self.calendar_feed.updated, atom.Updated), "Calendar feed <atom:updated> element must be an instance of " + "atom.Updated: %s" % self.calendar_feed.updated) # Assert that each entry has a updated value which is an atom.Updated for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry.updated, atom.Updated), "Calendar entry <atom:updated> element must be an instance of" + "atom.Updated: %s" % an_entry.updated) # Assert the feed updated value is as expected self.assertEquals(self.calendar_feed.updated.text, '2007-03-20T22:48:57.833Z') # Assert one of the values for updated self.assertEquals(self.calendar_feed.entry[0].updated.text, '2007-03-20T22:48:52.000Z') def testTitle(self): """Tests the existence of a <atom:title> in the feed and the entries and verifies the value""" # Assert that the feed title element exists and is an atom.Title self.assert_(isinstance(self.calendar_feed.title, atom.Title), "Calendar feed <atom:title> element must be an instance of " + "atom.Title: %s" % self.calendar_feed.title) # Assert that each entry has a title value which is an atom.Title for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry.title, atom.Title), "Calendar entry <atom:title> element must be an instance of " + "atom.Title: %s" % an_entry.title) # Assert the feed title value is as expected self.assertEquals(self.calendar_feed.title.text, 'GData Ops Demo\'s Calendar List') # Assert one of the values for title self.assertEquals(self.calendar_feed.entry[0].title.text, 'GData Ops Demo') def testColor(self): """Tests the existence of a <gCal:color> and verifies the value""" # Assert the color is present and is a gdata.calendar.Color for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry.color, gdata.calendar.Color), "Calendar feed <gCal:color> element must be an instance of " + "gdata.calendar.Color: %s" % an_entry.color) # Assert the color value is as expected self.assertEquals(self.calendar_feed.entry[0].color.value, '#2952A3') def testAccessLevel(self): """Tests the existence of a <gCal:accesslevel> element and verifies the value""" # Assert the access_level is present and is a gdata.calendar.AccessLevel for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry.access_level, gdata.calendar.AccessLevel), "Calendar feed <gCal:accesslevel> element must be an instance of " + "gdata.calendar.AccessLevel: %s" % an_entry.access_level) # Assert the access_level value is as expected self.assertEquals(self.calendar_feed.entry[0].access_level.value, 'owner') def testTimezone(self): """Tests the existence of a <gCal:timezone> element and verifies the value""" # Assert the timezone is present and is a gdata.calendar.Timezone for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry.timezone, gdata.calendar.Timezone), "Calendar feed <gCal:timezone> element must be an instance of " + "gdata.calendar.Timezone: %s" % an_entry.timezone) # Assert the timezone value is as expected self.assertEquals(self.calendar_feed.entry[0].timezone.value, 'America/Los_Angeles') def testHidden(self): """Tests the existence of a <gCal:hidden> element and verifies the value""" # Assert the hidden is present and is a gdata.calendar.Hidden for an_entry in self.calendar_feed.entry: self.assert_(isinstance(an_entry.hidden, gdata.calendar.Hidden), "Calendar feed <gCal:hidden> element must be an instance of " + "gdata.calendar.Hidden: %s" % an_entry.hidden) # Assert the hidden value is as expected self.assertEquals(self.calendar_feed.entry[0].hidden.value, 'false') def testOpenSearch(self): """Tests the existence of <openSearch:startIndex>""" # Assert that the elements exist and are the appropriate type self.assert_(isinstance(self.calendar_feed.start_index, gdata.StartIndex), "Calendar feed <openSearch:startIndex> element must be an " + "instance of gdata.StartIndex: %s" % self.calendar_feed.start_index) # Assert the values for each openSearch element are as expected self.assertEquals(self.calendar_feed.start_index.text, '1') def testGenerator(self): """Tests the existence of <atom:generator> and verifies the value""" # Assert that the element exists and is of the appropriate type self.assert_(isinstance(self.calendar_feed.generator, atom.Generator), "Calendar feed <atom:generator> element must be an instance of " + "atom.Generator: %s" % self.calendar_feed.generator) # Assert the generator version, uri and text are as expected self.assertEquals(self.calendar_feed.generator.text, 'Google Calendar') self.assertEquals(self.calendar_feed.generator.version, '1.0') self.assertEquals(self.calendar_feed.generator.uri, 'http://www.google.com/calendar') def testEntryLink(self): """Makes sure entry links in the private composite feed are parsed.""" entry = gdata.calendar.CalendarEventEntryFromString( test_data.RECURRENCE_EXCEPTION_ENTRY) self.assert_(isinstance(entry.recurrence_exception, list)) self.assert_(isinstance(entry.recurrence_exception[0].entry_link, gdata.EntryLink)) self.assert_(isinstance(entry.recurrence_exception[0].entry_link.entry, gdata.calendar.CalendarEventEntry)) self.assertEquals( entry.recurrence_exception[0].entry_link.entry.author[0].name.text, 'gdata ops') def testSequence(self): entry = gdata.calendar.CalendarEventEntry( sequence=gdata.calendar.Sequence(value='1')) entry2 = gdata.calendar.CalendarEventEntryFromString(str(entry)) self.assertEqual(entry.sequence.value, entry2.sequence.value) entry = gdata.calendar.CalendarEventEntryFromString( '<entry xmlns="%s"><sequence xmlns="%s" value="7" /></entry>' % ( atom.ATOM_NAMESPACE, gdata.calendar.GCAL_NAMESPACE)) self.assertEqual(entry.sequence.value, '7') def testOriginalEntry(self): """Make sure original entry in the private composite feed are parsed.""" entry = gdata.calendar.CalendarEventEntryFromString( test_data.RECURRENCE_EXCEPTION_ENTRY) self.assertEquals( entry.recurrence_exception[0].entry_link.entry.original_event.id, 'i7lgfj69mjqjgnodklif3vbm7g') class CalendarFeedTestRegenerated(CalendarFeedTest): def setUp(self): old_calendar_feed = ( gdata.calendar.CalendarListFeedFromString(test_data.CALENDAR_FEED)) self.calendar_feed = ( gdata.calendar.CalendarListFeedFromString(str(old_calendar_feed))) tree = ElementTree.fromstring(str(old_calendar_feed)) class CalendarEventFeedTest(unittest.TestCase): def setUp(self): self.calendar_event_feed = ( gdata.calendar.CalendarEventFeedFromString( test_data.CALENDAR_FULL_EVENT_FEED)) def testEntryCount(self): # Assert the number of items in the feed of events self.assertEquals(len(self.calendar_event_feed.entry),11) def testToAndFromString(self): # Assert the appropriate type for each entry for an_entry in self.calendar_event_feed.entry: self.assert_(isinstance(an_entry, gdata.calendar.CalendarEventEntry), "Entry must be an instance of a CalendarEventEntry") # Regenerate feed from xml text new_calendar_event_feed = gdata.calendar.CalendarEventFeedFromString( str(self.calendar_event_feed)) for an_entry in new_calendar_event_feed.entry: self.assert_(isinstance(an_entry, gdata.calendar.CalendarEventEntry), "Entry in regenerated feed must be an instance of CalendarEventEntry") def testAuthor(self): """Tests the existence of a <atom:author> and verifies the name and email""" # Assert that each element in the feed author list is an atom.Author for an_author in self.calendar_event_feed.author: self.assert_(isinstance(an_author, atom.Author), "Calendar event feed <atom:author> element must be an instance of " + "atom.Author: %s" % an_author) # Assert the feed author name is as expected self.assertEquals(self.calendar_event_feed.author[0].name.text, 'GData Ops Demo') # Assert the feed author name is as expected self.assertEquals(self.calendar_event_feed.author[0].email.text, 'gdata.ops.demo@gmail.com') # Assert one of the values for an entry author self.assertEquals(self.calendar_event_feed.entry[0].author[0].name.text, 'GData Ops Demo') self.assertEquals(self.calendar_event_feed.entry[0].author[0].email.text, 'gdata.ops.demo@gmail.com') def testId(self): """Tests the existence of a <atom:id> in the feed and entries and verifies the value""" # Assert the feed id exists and is an atom.Id self.assert_(isinstance(self.calendar_event_feed.id, atom.Id), "Calendar event feed <atom:id> element must be an instance of " + "atom.Id: %s" % self.calendar_event_feed.id) # Assert the feed id value is as expected self.assertEquals(self.calendar_event_feed.id.text, 'http://www.google.com/calendar/feeds/default/private/full') # Assert that each entry has an id which is an atom.Id for an_entry in self.calendar_event_feed.entry: self.assert_(isinstance(an_entry.id, atom.Id), "Calendar event entry <atom:id> element must be an " + "instance of atom.Id: %s" % an_entry.id) # Assert one of the values for an id self.assertEquals(self.calendar_event_feed.entry[1].id.text, 'http://www.google.com/calendar/feeds/default/private/full/' + '2qt3ao5hbaq7m9igr5ak9esjo0') def testPublished(self): """Tests the existence of a <atom:published> in the entries and verifies the value""" # Assert that each entry has a published value which is an atom.Published for an_entry in self.calendar_event_feed.entry: self.assert_(isinstance(an_entry.published, atom.Published), "Calendar event entry <atom:published> element must be an instance " + "of atom.Published: %s" % an_entry.published) # Assert one of the values for published is as expected self.assertEquals(self.calendar_event_feed.entry[1].published.text, '2007-03-20T21:26:04.000Z') def testUpdated(self): """Tests the existence of a <atom:updated> in the feed and the entries and verifies the value""" # Assert that the feed updated element exists and is an atom.Updated self.assert_(isinstance(self.calendar_event_feed.updated, atom.Updated), "Calendar feed <atom:updated> element must be an instance of " + "atom.Updated: %s" % self.calendar_event_feed.updated) # Assert that each entry has a updated value which is an atom.Updated for an_entry in self.calendar_event_feed.entry: self.assert_(isinstance(an_entry.updated, atom.Updated), "Calendar event entry <atom:updated> element must be an instance " + "of atom.Updated: %s" % an_entry.updated) # Assert the feed updated value is as expected self.assertEquals(self.calendar_event_feed.updated.text, '2007-03-20T21:29:57.000Z') # Assert one of the values for updated self.assertEquals(self.calendar_event_feed.entry[3].updated.text, '2007-03-20T21:25:46.000Z') def testTitle(self): """Tests the existence of a <atom:title> in the feed and the entries and verifies the value""" # Assert that the feed title element exists and is an atom.Title self.assert_(isinstance(self.calendar_event_feed.title, atom.Title), "Calendar feed <atom:title> element must be an instance of " + "atom.Title: %s" % self.calendar_event_feed.title) # Assert that each entry has a title value which is an atom.Title for an_entry in self.calendar_event_feed.entry: self.assert_(isinstance(an_entry.title, atom.Title), "Calendar event entry <atom:title> element must be an instance of " + "atom.Title: %s" % an_entry.title) # Assert the feed title value is as expected self.assertEquals(self.calendar_event_feed.title.text, 'GData Ops Demo') # Assert one of the values for title self.assertEquals(self.calendar_event_feed.entry[0].title.text, 'test deleted') def testPostLink(self): """Tests the existence of a <atom:link> with a rel='...#post' and verifies the value""" # Assert that each link in the feed is an atom.Link for a_link in self.calendar_event_feed.link: self.assert_(isinstance(a_link, atom.Link), "Calendar event entry <atom:link> element must be an instance of " + "atom.Link: %s" % a_link) # Assert post link exists self.assert_(self.calendar_event_feed.GetPostLink() is not None) # Assert the post link value is as expected self.assertEquals(self.calendar_event_feed.GetPostLink().href, 'http://www.google.com/calendar/feeds/default/private/full') def testEditLink(self): """Tests the existence of a <atom:link> with a rel='edit' in each entry and verifies the value""" # Assert that each link in the feed is an atom.Link for a_link in self.calendar_event_feed.link: self.assert_(isinstance(a_link, atom.Link), "Calendar event entry <atom:link> element must be an instance of " + "atom.Link: %s" % a_link) # Assert edit link exists for a_entry in self.calendar_event_feed.entry: self.assert_(a_entry.GetEditLink() is not None) # Assert the edit link value is as expected self.assertEquals(self.calendar_event_feed.entry[0].GetEditLink().href, 'http://www.google.com/calendar/feeds/default/private/full/o99flmgm' + 'kfkfrr8u745ghr3100/63310109397') self.assertEquals(self.calendar_event_feed.entry[0].GetEditLink().type, 'application/atom+xml') def testOpenSearch(self): """Tests the existence of <openSearch:totalResults>, <openSearch:startIndex>, <openSearch:itemsPerPage>""" # Assert that the elements exist and are the appropriate type self.assert_(isinstance(self.calendar_event_feed.total_results, gdata.TotalResults), "Calendar event feed <openSearch:totalResults> element must be an " + "instance of gdata.TotalResults: %s" % ( self.calendar_event_feed.total_results)) self.assert_( isinstance(self.calendar_event_feed.start_index, gdata.StartIndex), "Calendar event feed <openSearch:startIndex> element must be an " + "instance of gdata.StartIndex: %s" % ( self.calendar_event_feed.start_index)) self.assert_( isinstance(self.calendar_event_feed.items_per_page, gdata.ItemsPerPage), "Calendar event feed <openSearch:itemsPerPage> element must be an " + "instance of gdata.ItemsPerPage: %s" % ( self.calendar_event_feed.items_per_page)) # Assert the values for each openSearch element are as expected self.assertEquals(self.calendar_event_feed.total_results.text, '10') self.assertEquals(self.calendar_event_feed.start_index.text, '1') self.assertEquals(self.calendar_event_feed.items_per_page.text, '25') def testGenerator(self): """Tests the existence of <atom:generator> and verifies the value""" # Assert that the element exists and is of the appropriate type self.assert_(isinstance(self.calendar_event_feed.generator, atom.Generator), "Calendar event feed <atom:generator> element must be an instance " + "of atom.Generator: %s" % self.calendar_event_feed.generator) # Assert the generator version, uri and text are as expected self.assertEquals(self.calendar_event_feed.generator.text, 'Google Calendar') self.assertEquals(self.calendar_event_feed.generator.version, '1.0') self.assertEquals(self.calendar_event_feed.generator.uri, 'http://www.google.com/calendar') def testCategory(self): """Tests the existence of <atom:category> and verifies the value""" # Assert that the element exists and is of the appropriate type and value for a_category in self.calendar_event_feed.category: self.assert_(isinstance(a_category, atom.Category), "Calendar event feed <atom:category> element must be an instance " + "of atom.Category: %s" % a_category) self.assertEquals(a_category.scheme, 'http://schemas.google.com/g/2005#kind') self.assertEquals(a_category.term, 'http://schemas.google.com/g/2005#event') for an_event in self.calendar_event_feed.entry: for a_category in an_event.category: self.assert_(isinstance(a_category, atom.Category), "Calendar event feed entry <atom:category> element must be an " + "instance of atom.Category: %s" % a_category) self.assertEquals(a_category.scheme, 'http://schemas.google.com/g/2005#kind') self.assertEquals(a_category.term, 'http://schemas.google.com/g/2005#event') def testSendEventNotifications(self): """Test the existence of <gCal:sendEventNotifications> and verifies the value""" # Assert that the element exists and is of the appropriate type and value for an_event in self.calendar_event_feed.entry: self.assert_(isinstance(an_event.send_event_notifications, gdata.calendar.SendEventNotifications), ("Calendar event feed entry <gCal:sendEventNotifications> element " + "must be an instance of gdata.calendar.SendEventNotifications: %s") % ( an_event.send_event_notifications,)) # Assert the <gCal:sendEventNotifications> are as expected self.assertEquals( self.calendar_event_feed.entry[0].send_event_notifications.value, 'false') self.assertEquals( self.calendar_event_feed.entry[2].send_event_notifications.value, 'true') def testQuickAdd(self): """Test the existence of <gCal:quickadd> and verifies the value""" entry = gdata.calendar.CalendarEventEntry() entry.quick_add = gdata.calendar.QuickAdd(value='true') unmarshalled_entry = entry.ToString() tag = '{%s}quickadd' % (gdata.calendar.GCAL_NAMESPACE) marshalled_entry = ElementTree.fromstring(unmarshalled_entry).find(tag) self.assert_(marshalled_entry.attrib['value'],'true') self.assert_(marshalled_entry.tag,tag) def testEventStatus(self): """Test the existence of <gd:eventStatus> and verifies the value""" # Assert that the element exists and is of the appropriate type and value for an_event in self.calendar_event_feed.entry: self.assert_(isinstance(an_event.event_status, gdata.calendar.EventStatus), ("Calendar event feed entry <gd:eventStatus> element " + "must be an instance of gdata.calendar.EventStatus: %s") % ( an_event.event_status,)) # Assert the <gd:eventStatus> are as expected self.assertEquals( self.calendar_event_feed.entry[0].event_status.value, 'CANCELED') self.assertEquals( self.calendar_event_feed.entry[1].event_status.value, 'CONFIRMED') def testComments(self): """Tests the existence of <atom:comments> and verifies the value""" # Assert that the element exists and is of the appropriate type and value for an_event in self.calendar_event_feed.entry: self.assert_(an_event.comments is None or isinstance(an_event.comments, gdata.calendar.Comments), ("Calendar event feed entry <gd:comments> element " + "must be an instance of gdata.calendar.Comments: %s") % ( an_event.comments,)) def testVisibility(self): """Test the existence of <gd:visibility> and verifies the value""" # Assert that the element exists and is of the appropriate type and value for an_event in self.calendar_event_feed.entry: self.assert_(isinstance(an_event.visibility, gdata.calendar.Visibility), ("Calendar event feed entry <gd:visibility> element " + "must be an instance of gdata.calendar.Visibility: %s") % ( an_event.visibility,)) # Assert the <gd:visibility> are as expected self.assertEquals( self.calendar_event_feed.entry[0].visibility.value, 'DEFAULT') self.assertEquals( self.calendar_event_feed.entry[1].visibility.value, 'PRIVATE') self.assertEquals( self.calendar_event_feed.entry[2].visibility.value, 'PUBLIC') def testTransparency(self): """Test the existence of <gd:transparency> and verifies the value""" # Assert that the element exists and is of the appropriate type and value for an_event in self.calendar_event_feed.entry: self.assert_(isinstance(an_event.transparency, gdata.calendar.Transparency), ("Calendar event feed entry <gd:transparency> element " + "must be an instance of gdata.calendar.Transparency: %s") % ( an_event.transparency,)) # Assert the <gd:transparency> are as expected self.assertEquals( self.calendar_event_feed.entry[0].transparency.value, 'OPAQUE') self.assertEquals( self.calendar_event_feed.entry[1].transparency.value, 'OPAQUE') self.assertEquals( self.calendar_event_feed.entry[2].transparency.value, 'OPAQUE') # TODO: TEST VALUES OF VISIBILITY OTHER THAN OPAQUE def testWhere(self): """Tests the existence of a <gd:where> in the entries and verifies the value""" # Assert that each entry has a where value which is an gdata.calendar.Where for an_entry in self.calendar_event_feed.entry: for a_where in an_entry.where: self.assert_(isinstance(a_where, gdata.calendar.Where), "Calendar event entry <gd:where> element must be an instance of " + "gdata.calendar.Where: %s" % a_where) # Assert one of the values for where is as expected self.assertEquals(self.calendar_event_feed.entry[1].where[0].value_string, 'Dolores Park with Kim') def testWhenAndReminder(self): """Tests the existence of a <gd:when> and <gd:reminder> in the entries and verifies the values""" # Assert that each entry's when value is a gdata.calendar.When # Assert that each reminder is a gdata.calendar.Reminder for an_entry in self.calendar_event_feed.entry: for a_when in an_entry.when: self.assert_(isinstance(a_when, gdata.calendar.When), "Calendar event entry <gd:when> element must be an instance " + "of gdata.calendar.When: %s" % a_when) for a_reminder in a_when.reminder: self.assert_(isinstance(a_reminder, gdata.calendar.Reminder), "Calendar event entry <gd:reminder> element must be an " + "instance of gdata.calendar.Reminder: %s" % a_reminder) # Assert one of the values for when is as expected self.assertEquals(self.calendar_event_feed.entry[0].when[0].start_time, '2007-03-23T12:00:00.000-07:00') self.assertEquals(self.calendar_event_feed.entry[0].when[0].end_time, '2007-03-23T13:00:00.000-07:00') # Assert the reminder child of when is as expected self.assertEquals( self.calendar_event_feed.entry[0].when[0].reminder[0].minutes, '10') self.assertEquals( self.calendar_event_feed.entry[1].when[0].reminder[0].minutes, '20') def testBatchRequestParsing(self): batch_request = gdata.calendar.CalendarEventFeedFromString( test_data.CALENDAR_BATCH_REQUEST) self.assertEquals(len(batch_request.entry), 4) # Iterate over the batch request entries and match the operation with # the batch id. These values are hard coded to match the test data. for entry in batch_request.entry: if entry.batch_id.text == '1': self.assertEquals(entry.batch_operation.type, 'insert') if entry.batch_id.text == '2': self.assertEquals(entry.batch_operation.type, 'query') if entry.batch_id.text == '3': self.assertEquals(entry.batch_operation.type, 'update') self.assertEquals(entry.title.text, 'Event updated via batch') if entry.batch_id.text == '4': self.assertEquals(entry.batch_operation.type, 'delete') self.assertEquals(entry.id.text, 'http://www.google.com/calendar/feeds/default/' 'private/full/d8qbg9egk1n6lhsgq1sjbqffqc') self.assertEquals(entry.GetEditLink().href, 'http://www.google.com/calendar/feeds/default/' 'private/full/d8qbg9egk1n6lhsgq1sjbqffqc/' '63326018324') def testBatchResponseParsing(self): batch_response = gdata.calendar.CalendarEventFeedFromString( test_data.CALENDAR_BATCH_RESPONSE) self.assertEquals(len(batch_response.entry), 4) for entry in batch_response.entry: if entry.batch_id.text == '1': self.assertEquals(entry.batch_operation.type, 'insert') self.assertEquals(entry.batch_status.code, '201') self.assertEquals(entry.batch_status.reason, 'Created') self.assertEquals(entry.id.text, 'http://www.google.com/calendar/' 'feeds/default/private/full/' 'n9ug78gd9tv53ppn4hdjvk68ek') if entry.batch_id.text == '2': self.assertEquals(entry.batch_operation.type, 'query') if entry.batch_id.text == '3': self.assertEquals(entry.batch_operation.type, 'update') if entry.batch_id.text == '4': self.assertEquals(entry.batch_operation.type, 'delete') self.assertEquals(entry.id.text, 'http://www.google.com/calendar/' 'feeds/default/private/full/' 'd8qbg9egk1n6lhsgq1sjbqffqc') # TODO add reminder tests for absolute_time and hours/seconds (if possible) # TODO test recurrence and recurrenceexception # TODO test originalEvent class CalendarWebContentTest(unittest.TestCase): def setUp(self): self.calendar_event_feed = ( gdata.calendar.CalendarEventFeedFromString( test_data.CALENDAR_FULL_EVENT_FEED)) def testAddSimpleWebContentEventEntry(self): """Verifies that we can add a web content link to an event entry.""" title = "Al Einstein's Birthday!" href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif' type = 'image/jpeg' url = 'http://gdata.ops.demo.googlepages.com/einstein.jpg' width = '300' height = '225' # Create a web content event event = gdata.calendar.CalendarEventEntry() web_content = gdata.calendar.WebContent(url=url, width=width, height=height) web_content_link = gdata.calendar.WebContentLink(title=title, href=href, link_type=type, web_content=web_content) event.link.append(web_content_link) # Verify the web content link exists and contains the expected data web_content_link = event.GetWebContentLink() self.assertValidWebContentLink(title, href, type, web_content_link) # Verify the web content element exists and contains the expected data web_content_element = web_content_link.web_content self.assertValidSimpleWebContent(url, width, height, web_content_element) def testAddWebContentGadgetEventEntry(self): """Verifies that we can add a web content gadget link to an event entry.""" title = "Date and Time Gadget" href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif' url = 'http://google.com/ig/modules/datetime.xml' type = 'application/x-google-gadgets+xml' width = '300' height = '200' pref_name = 'color' pref_value = 'green' # Create a web content event event = gdata.calendar.CalendarEventEntry() web_content = gdata.calendar.WebContent(url=url, width=width, height=height) web_content.gadget_pref.append( gdata.calendar.WebContentGadgetPref(name=pref_name, value=pref_value)) web_content_link = gdata.calendar.WebContentLink(title=title, href=href, web_content=web_content, link_type=type) event.link.append(web_content_link) # Verify the web content link exists and contains the expected data web_content_link = event.GetWebContentLink() self.assertValidWebContentLink(title, href, type, web_content_link) # Verify the web content element exists and contains the expected data web_content_element = web_content_link.web_content self.assertValidWebContentGadget(url, width, height, pref_name, pref_value, web_content_element) def testFromXmlToSimpleWebContent(self): """Verifies that we can read a web content link from an event entry.""" # Expected values (from test_data.py file) title = 'World Cup' href = 'http://www.google.com/calendar/images/google-holiday.gif' type = 'image/gif' url = 'http://www.google.com/logos/worldcup06.gif' width = '276' height = '120' # Note: The tenth event entry contains web content web_content_event = self.calendar_event_feed.entry[9] # Verify the web content link exists and contains the expected data web_content_link = web_content_event.GetWebContentLink() self.assertValidWebContentLink(title, href, type, web_content_link) # Verify the web content element exists and contains the expected data web_content_element = web_content_link.web_content self.assertValidSimpleWebContent(url, width, height, web_content_element) def testFromXmlToWebContentGadget(self): """Verifies that we can read a web content link from an event entry.""" # Expected values (from test_data.py file) title = 'Date and Time Gadget' href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif' url = 'http://google.com/ig/modules/datetime.xml' type = 'application/x-google-gadgets+xml' width = '300' height = '136' pref_name = 'color' pref_value = 'green' # Note: The eleventh event entry contains web content web_content_event = self.calendar_event_feed.entry[10] # Verify the web content link exists and contains the expected data web_content_link = web_content_event.GetWebContentLink() self.assertValidWebContentLink(title, href, type, web_content_link) # Verify the web content element exists and contains the expected data web_content_element = web_content_link.web_content self.assertValidWebContentGadget(url, width, height, pref_name, pref_value, web_content_element) def assertValidWebContentLink(self, expected_title=None, expected_href=None, expected_type=None, web_content_link=None): """Asserts that the web content link is the correct type and contains the expected values""" self.assert_(isinstance(web_content_link, gdata.calendar.WebContentLink), "Web content link element must be an " + "instance of gdata.calendar.WebContentLink: %s" % web_content_link) expected_rel = '%s/%s' % (gdata.calendar.GCAL_NAMESPACE, 'webContent') self.assertEquals(expected_rel, web_content_link.rel) self.assertEqual(expected_title, web_content_link.title) self.assertEqual(expected_href, web_content_link.href) self.assertEqual(expected_type, web_content_link.type) def assertValidSimpleWebContent(self, expected_url=None, expected_width=None, expected_height=None, web_content_element=None): """Asserts that the web content element is the correct type and contains the expected values""" self.assert_(isinstance(web_content_element, gdata.calendar.WebContent), "Calendar event entry <gCal:webContent> element must be an " + "instance of gdata.calendar.WebContent: %s" % web_content_element) self.assertEquals(expected_width, web_content_element.width) self.assertEquals(expected_height, web_content_element.height) self.assertEquals(expected_url, web_content_element.url) def assertValidWebContentGadget(self, expected_url=None, expected_width=None, expected_height=None, expected_pref_name=None, expected_pref_value=None, web_content_element=None): """Asserts that the web content element is the correct type and contains the expected values""" self.assert_(isinstance(web_content_element, gdata.calendar.WebContent), "Calendar event entry <gCal:webContent> element must be an " + "instance of gdata.calendar.WebContent: %s" % web_content_element) self.assertEquals(expected_width, web_content_element.width) self.assertEquals(expected_height, web_content_element.height) self.assertEquals(expected_url, web_content_element.url) self.assertEquals(expected_pref_name, web_content_element.gadget_pref[0].name) self.assertEquals(expected_pref_value, web_content_element.gadget_pref[0].value) def testSampleCode(self): # From http://code.google.com/apis/calendar/gadgets/event/ wc = gdata.calendar.WebContent() wc.url = 'http://www.thefreedictionary.com/_/WoD/wod-module.xml' wc.width = '300' wc.height = '136' wc.gadget_pref.append(gdata.calendar.WebContentGadgetPref(name='Days', value='1')) wc.gadget_pref.append(gdata.calendar.WebContentGadgetPref(name='Format', value='0')) wcl = gdata.calendar.WebContentLink() wcl.title = 'Word of the Day' wcl.href = 'http://www.thefreedictionary.com/favicon.ico' wcl.type = 'application/x-google-gadgets+xml' wcl.web_content = wc self.assertEqual(wcl.web_content.url, 'http://www.thefreedictionary.com/_/WoD/wod-module.xml') self.assertEqual(wcl.type, 'application/x-google-gadgets+xml') self.assertEqual(wcl.web_content.height, '136') class ExtendedPropertyTest(unittest.TestCase): def testExtendedPropertyToAndFromXml(self): ep = gdata.calendar.ExtendedProperty(name='test') ep.value = 'val' xml_string = ep.ToString() ep2 = gdata.ExtendedPropertyFromString(xml_string) self.assertEquals(ep.name, ep2.name) self.assertEquals(ep.value, ep2.value) if __name__ == '__main__': unittest.main()
apache-2.0
shsingh/ansible
lib/ansible/modules/cloud/azure/azure_rm_servicebus.py
24
6521
#!/usr/bin/python # # Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_servicebus version_added: "2.8" short_description: Manage Azure Service Bus description: - Create, update or delete an Azure Service Bus namespaces. options: resource_group: description: - Name of resource group. required: true name: description: - Name of the servicebus namespace. required: true state: description: - Assert the state of the servicebus. Use C(present) to create or update and use C(absen) to delete. default: present choices: - absent - present location: description: - The servicebus's location. sku: description: - Namespace SKU. choices: - standard - basic - premium default: standard extends_documentation_fragment: - azure - azure_tags author: - Yuwei Zhou (@yuwzho) ''' EXAMPLES = ''' - name: Create a namespace azure_rm_servicebus: name: deadbeef location: eastus ''' RETURN = ''' id: description: - Current state of the service bus. returned: success type: str sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/namespaces/myServicebus" ''' try: from msrestazure.azure_exceptions import CloudError except ImportError: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake from ansible.module_utils._text import to_native from datetime import datetime, timedelta class AzureRMServiceBus(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( resource_group=dict(type='str', required=True), name=dict(type='str', required=True), location=dict(type='str'), state=dict(type='str', default='present', choices=['present', 'absent']), sku=dict(type='str', choices=['basic', 'standard', 'premium'], default='standard') ) self.resource_group = None self.name = None self.state = None self.sku = None self.location = None self.results = dict( changed=False, id=None ) super(AzureRMServiceBus, self).__init__(self.module_arg_spec, supports_check_mode=True) def exec_module(self, **kwargs): for key in list(self.module_arg_spec.keys()): setattr(self, key, kwargs[key]) changed = False if not self.location: resource_group = self.get_resource_group(self.resource_group) self.location = resource_group.location original = self.get() if self.state == 'present' and not original: self.check_name() changed = True if not self.check_mode: original = self.create() elif self.state == 'absent' and original: changed = True original = None if not self.check_mode: self.delete() self.results['deleted'] = True if original: self.results = self.to_dict(original) self.results['changed'] = changed return self.results def check_name(self): try: check_name = self.servicebus_client.namespaces.check_name_availability_method(self.name) if not check_name or not check_name.name_available: self.fail("Error creating namespace {0} - {1}".format(self.name, check_name.message or str(check_name))) except Exception as exc: self.fail("Error creating namespace {0} - {1}".format(self.name, exc.message or str(exc))) def create(self): self.log('Cannot find namespace, creating a one') try: sku = self.servicebus_models.SBSku(name=str.capitalize(self.sku)) poller = self.servicebus_client.namespaces.create_or_update(self.resource_group, self.name, self.servicebus_models.SBNamespace(location=self.location, sku=sku)) ns = self.get_poller_result(poller) except Exception as exc: self.fail('Error creating namespace {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc))) return ns def delete(self): try: self.servicebus_client.namespaces.delete(self.resource_group, self.name) return True except Exception as exc: self.fail("Error deleting route {0} - {1}".format(self.name, str(exc))) def get(self): try: return self.servicebus_client.namespaces.get(self.resource_group, self.name) except Exception: return None def to_dict(self, instance): result = dict() attribute_map = self.servicebus_models.SBNamespace._attribute_map for attribute in attribute_map.keys(): value = getattr(instance, attribute) if not value: continue if isinstance(value, self.servicebus_models.SBSku): result[attribute] = value.name.lower() elif isinstance(value, datetime): result[attribute] = str(value) elif isinstance(value, str): result[attribute] = to_native(value) elif attribute == 'max_size_in_megabytes': result['max_size_in_mb'] = value else: result[attribute] = value return result def is_valid_timedelta(value): if value == timedelta(10675199, 10085, 477581): return None return value def main(): AzureRMServiceBus() if __name__ == '__main__': main()
gpl-3.0
joseamaya/tambox
contabilidad/urls.py
1
4452
from django.conf.urls import url from django.contrib.auth.decorators import login_required from contabilidad.views import Tablero, ListadoCuentasContables, \ CargarCuentasContables, ListadoTiposDocumentos, CrearTipoDocumento, \ EliminarTipoDocumento, DetalleTipoDocumento, ModificarTipoDocumento, \ ReporteExcelCuentasContables, ModificarCuentaContable, CrearCuentaContable, \ DetalleCuentaContable, CrearImpuesto, DetalleImpuesto, ListadoImpuestos, \ ModificarImpuesto, CrearConfiguracion, ModificarConfiguracion, \ ListadoFormasPago, CrearFormaPago, ModificarFormaPago, DetalleFormaPago, \ ReporteExcelFormasPago, EliminarFormaPago, ReporteExcelTiposDocumentos, \ CargarTiposDocumentos, ListadoTiposCambio, CrearTipoCambio, DetalleTipoCambio, ModificarTipoCambio, \ ObtenerTipoCambio, ListadoTiposExistencias, CargarTiposExistencias urlpatterns = [ url(r'^tablero/$', login_required(Tablero.as_view()), name="tablero"), url(r'^formas_pago/$', login_required(ListadoFormasPago.as_view()), name="formas_pago"), url(r'^crear_forma_pago/$', login_required(CrearFormaPago.as_view()), name="crear_forma_pago"), url(r'^crear_tipo_cambio/$', login_required(CrearTipoCambio.as_view()), name="crear_tipo_cambio"), url(r'^modificar_forma_pago/(?P<pk>.+)/$', login_required(ModificarFormaPago.as_view()), name="modificar_forma_pago"), url(r'^detalle_forma_pago/(?P<pk>.+)/$', login_required(DetalleFormaPago.as_view()), name="detalle_forma_pago"), url(r'^detalle_tipo_cambio/(?P<pk>.+)/$', login_required(DetalleTipoCambio.as_view()), name="detalle_tipo_cambio"), url(r'^maestro_formas_pago_excel/$', login_required(ReporteExcelFormasPago.as_view()), name="maestro_formas_pago_excel"), url(r'^eliminar_forma_pago/$', login_required(EliminarFormaPago.as_view()), name="eliminar_forma_pago"), url(r'^cuentas_contables/$', (ListadoCuentasContables.as_view()), name="cuentas_contables"), url(r'^tipos_existencias/$', (ListadoTiposExistencias.as_view()), name="tipos_existencias"), url(r'^configuracion/$', (CrearConfiguracion.as_view()), name="configuracion"), url(r'^tipos_documentos/$', (ListadoTiposDocumentos.as_view()), name="tipos_documentos"), url(r'^tipos_cambio/$', (ListadoTiposCambio.as_view()), name="tipos_cambio"), url(r'^impuestos/$', (ListadoImpuestos.as_view()), name="impuestos"), url(r'^detalle_tipo_documento/(?P<pk>.+)/$', (DetalleTipoDocumento.as_view()), name="detalle_tipo_documento"), url(r'^detalle_cuenta_contable/(?P<pk>.+)/$', (DetalleCuentaContable.as_view()), name="detalle_cuenta_contable"), url(r'^detalle_impuesto/(?P<pk>.+)/$', (DetalleImpuesto.as_view()), name="detalle_impuesto"), url(r'^cargar_cuentas_contables/$', (CargarCuentasContables.as_view()), name="cargar_cuentas_contables"), url(r'^cargar_tipos_documento/$', (CargarTiposDocumentos.as_view()), name="cargar_tipos_documento"), url(r'^crear_tipo_documento/$', (CrearTipoDocumento.as_view()), name="crear_tipo_documento"), url(r'^crear_impuesto/$', (CrearImpuesto.as_view()), name="crear_impuesto"), url(r'^crear_cuenta_contable/$', (CrearCuentaContable.as_view()), name="crear_cuenta_contable"), url(r'^cargar_tipos_existencias/$', (CargarTiposExistencias.as_view()), name="cargar_tipos_existencias"), url(r'^eliminar_tipo_documento/$', (EliminarTipoDocumento.as_view()), name="eliminar_tipo_documento"), url(r'^modificar_tipo_documento/(?P<pk>.+)/$', (ModificarTipoDocumento.as_view()), name="modificar_tipo_documento"), url(r'^modificar_tipo_cambio/(?P<pk>.+)/$', (ModificarTipoCambio.as_view()), name="modificar_tipo_cambio"), url(r'^modificar_cuenta_contable/(?P<pk>.+)/$', (ModificarCuentaContable.as_view()), name="modificar_cuenta_contable"), url(r'^modificar_impuesto/(?P<pk>.+)/$', (ModificarImpuesto.as_view()), name="modificar_impuesto"), url(r'^modificar_configuracion/(?P<pk>.+)/$', (ModificarConfiguracion.as_view()), name="modificar_configuracion"), url(r'^maestro_cuentas_contables_excel/$', (ReporteExcelCuentasContables.as_view()), name="maestro_cuentas_contables_excel"), url(r'^maestro_tipos_documentos_excel/$', (ReporteExcelTiposDocumentos.as_view()), name="maestro_tipos_documentos_excel"), url(r'^obtener_tipo_cambio/$', (ObtenerTipoCambio.as_view()), name="obtener_tipo_cambio"), ]
gpl-3.0
kyubifire/softlayer-python
SoftLayer/CLI/user/permissions.py
1
1893
"""List A users permissions.""" import click import SoftLayer from SoftLayer.CLI import environment from SoftLayer.CLI import formatting from SoftLayer.CLI import helpers @click.command() @click.argument('identifier') @environment.pass_env def cli(env, identifier): """User Permissions. TODO change to list all permissions, and which users have them""" mgr = SoftLayer.UserManager(env.client) user_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'username') object_mask = "mask[id, permissions, isMasterUserFlag, roles]" user = mgr.get_user(user_id, object_mask) all_permissions = mgr.get_all_permissions() user_permissions = perms_to_dict(user['permissions']) if user['isMasterUserFlag']: click.secho('This account is the Master User and has all permissions enabled', fg='green') env.fout(roles_table(user)) env.fout(permission_table(user_permissions, all_permissions)) def perms_to_dict(perms): """Takes a list of permissions and transforms it into a dictionary for better searching""" permission_dict = {} for perm in perms: permission_dict[perm['keyName']] = True return permission_dict def permission_table(user_permissions, all_permissions): """Creates a table of available permissions""" table = formatting.Table(['Description', 'KeyName', 'Assigned']) table.align['KeyName'] = 'l' table.align['Description'] = 'l' table.align['Assigned'] = 'l' for perm in all_permissions: assigned = user_permissions.get(perm['keyName'], False) table.add_row([perm['name'], perm['keyName'], assigned]) return table def roles_table(user): """Creates a table for a users roles""" table = formatting.Table(['id', 'Role Name', 'Description']) for role in user['roles']: table.add_row([role['id'], role['name'], role['description']]) return table
mit
koparasy/gemfi
src/sim/Root.py
39
3341
# Copyright (c) 2005-2007 The Regents of The University of Michigan # Copyright (c) 2010-2013 Advanced Micro Devices, Inc. # Copyright (c) 2013 Mark D. Hill and David A. Wood # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nathan Binkert from m5.SimObject import SimObject from m5.defines import buildEnv from m5.params import * from m5.util import fatal class Root(SimObject): _the_instance = None def __new__(cls, **kwargs): if Root._the_instance: fatal("Attempt to allocate multiple instances of Root.") return None # first call: allocate the unique instance # # If SimObject ever implements __new__, we may want to pass # kwargs here, but for now this goes straight to # object.__new__ which prints an ugly warning if you pass it # args. Seems like a bad design but that's the way it is. Root._the_instance = SimObject.__new__(cls) return Root._the_instance @classmethod def getInstance(cls): return Root._the_instance def path(self): return 'root' type = 'Root' cxx_header = "sim/root.hh" # By default, root sim object and hence all other sim objects schedule # event on the eventq with index 0. eventq_index = 0 # Simulation Quantum for multiple main event queue simulation. # Needs to be set explicitly for a multi-eventq simulation. sim_quantum = Param.Tick(0, "simulation quantum") full_system = Param.Bool("if this is a full system simulation") # Time syncing prevents the simulation from running faster than real time. time_sync_enable = Param.Bool(False, "whether time syncing is enabled") time_sync_period = Param.Clock("100ms", "how often to sync with real time") time_sync_spin_threshold = \ Param.Clock("100us", "when less than this much time is left, spin")
bsd-3-clause
staslev/beam
sdks/python/apache_beam/examples/wordcount_fnapi.py
3
5575
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A word-counting workflow using the experimental FnApi. For the stable wordcount example see wordcount.py. """ # TODO(BEAM-2887): Merge with wordcount.py. from __future__ import absolute_import import argparse import logging import apache_beam as beam from apache_beam.io import ReadFromText # TODO(BEAM-2887): Enable after the issue is fixed. # from apache_beam.io import WriteToText from apache_beam.metrics import Metrics from apache_beam.metrics.metric import MetricsFilter from apache_beam.options.pipeline_options import DebugOptions from apache_beam.options.pipeline_options import PipelineOptions class WordExtractingDoFn(beam.DoFn): """Parse each line of input text into words.""" def __init__(self): super(WordExtractingDoFn, self).__init__() self.words_counter = Metrics.counter(self.__class__, 'words') self.word_lengths_counter = Metrics.counter(self.__class__, 'word_lengths') self.word_lengths_dist = Metrics.distribution( self.__class__, 'word_len_dist') self.empty_line_counter = Metrics.counter(self.__class__, 'empty_lines') def process(self, element): """Returns an iterator over the words of this element. The element is a line of text. If the line is blank, note that, too. Args: element: the element being processed Returns: The processed element. """ # TODO(BEAM-3041): Move this import to top of the file after the fix. # Portable containers does not support save main session, and importing here # is required. This is only needed for running experimental jobs with FnApi. import re text_line = element.strip() if not text_line: self.empty_line_counter.inc(1) words = re.findall(r'[A-Za-z\']+', text_line) for w in words: self.words_counter.inc() self.word_lengths_counter.inc(len(w)) self.word_lengths_dist.update(len(w)) return words def run(argv=None): """Main entry point; defines and runs the wordcount pipeline.""" parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', default='gs://dataflow-samples/shakespeare/kinglear.txt', help='Input file to process.') parser.add_argument('--output', dest='output', required=True, help='Output file to write results to.') known_args, pipeline_args = parser.parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) p = beam.Pipeline(options=pipeline_options) # Ensure that the experiment flag is set explicitly by the user. debug_options = pipeline_options.view_as(DebugOptions) use_fn_api = ( debug_options.experiments and 'beam_fn_api' in debug_options.experiments) assert use_fn_api, 'Enable beam_fn_api experiment, in order run this example.' # Read the text file[pattern] into a PCollection. lines = p | 'read' >> ReadFromText(known_args.input) # Count the occurrences of each word. def count_ones(word_ones): (word, ones) = word_ones return (word, sum(ones)) counts = (lines | 'split' >> (beam.ParDo(WordExtractingDoFn()) .with_output_types(unicode)) | 'pair_with_one' >> beam.Map(lambda x: (x, 1)) | 'group' >> beam.GroupByKey() | 'count' >> beam.Map(count_ones)) # Format the counts into a PCollection of strings. def format_result(word_count): (word, count) = word_count return '%s: %s' % (word, count) # pylint: disable=unused-variable output = counts | 'format' >> beam.Map(format_result) # Write the output using a "Write" transform that has side effects. # pylint: disable=expression-not-assigned # TODO(BEAM-2887): Enable after the issue is fixed. # output | 'write' >> WriteToText(known_args.output) result = p.run() result.wait_until_finish() # Do not query metrics when creating a template which doesn't run if (not hasattr(result, 'has_job') # direct runner or result.has_job): # not just a template creation empty_lines_filter = MetricsFilter().with_name('empty_lines') query_result = result.metrics().query(empty_lines_filter) if query_result['counters']: empty_lines_counter = query_result['counters'][0] logging.info('number of empty lines: %d', empty_lines_counter.committed) word_lengths_filter = MetricsFilter().with_name('word_len_dist') query_result = result.metrics().query(word_lengths_filter) if query_result['distributions']: word_lengths_dist = query_result['distributions'][0] logging.info('average word length: %d', word_lengths_dist.committed.mean) if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
apache-2.0
nicobustillos/odoo
openerp/tools/yaml_import.py
35
43133
# -*- coding: utf-8 -*- import threading import types import time # used to eval time.strftime expressions from datetime import datetime, timedelta import logging import openerp import openerp.sql_db as sql_db import openerp.workflow import misc from config import config import yaml_tag import yaml import re from lxml import etree from openerp import SUPERUSER_ID # YAML import needs both safe and unsafe eval, but let's # default to /safe/. unsafe_eval = eval from safe_eval import safe_eval as eval import assertion_report _logger = logging.getLogger(__name__) class YamlImportException(Exception): pass class YamlImportAbortion(Exception): pass def _is_yaml_mapping(node, tag_constructor): value = isinstance(node, types.DictionaryType) \ and len(node.keys()) == 1 \ and isinstance(node.keys()[0], tag_constructor) return value def is_comment(node): return isinstance(node, types.StringTypes) def is_assert(node): return isinstance(node, yaml_tag.Assert) \ or _is_yaml_mapping(node, yaml_tag.Assert) def is_record(node): return _is_yaml_mapping(node, yaml_tag.Record) def is_python(node): return _is_yaml_mapping(node, yaml_tag.Python) def is_menuitem(node): return isinstance(node, yaml_tag.Menuitem) \ or _is_yaml_mapping(node, yaml_tag.Menuitem) def is_function(node): return isinstance(node, yaml_tag.Function) \ or _is_yaml_mapping(node, yaml_tag.Function) def is_report(node): return isinstance(node, yaml_tag.Report) def is_workflow(node): return isinstance(node, yaml_tag.Workflow) def is_act_window(node): return isinstance(node, yaml_tag.ActWindow) def is_delete(node): return isinstance(node, yaml_tag.Delete) def is_context(node): return isinstance(node, yaml_tag.Context) def is_url(node): return isinstance(node, yaml_tag.Url) def is_eval(node): return isinstance(node, yaml_tag.Eval) def is_ref(node): return isinstance(node, yaml_tag.Ref) \ or _is_yaml_mapping(node, yaml_tag.Ref) def is_ir_set(node): return _is_yaml_mapping(node, yaml_tag.IrSet) def is_string(node): return isinstance(node, basestring) class RecordDictWrapper(dict): """ Used to pass a record as locals in eval: records do not strictly behave like dict, so we force them to. """ def __init__(self, record): self.record = record def __getitem__(self, key): if key in self.record: return self.record[key] return dict.__getitem__(self, key) class YamlInterpreter(object): def __init__(self, cr, module, id_map, mode, filename, report=None, noupdate=False, loglevel=logging.DEBUG): self.cr = cr self.module = module self.id_map = id_map self.mode = mode self.filename = filename if report is None: report = assertion_report.assertion_report() self.assertion_report = report self.noupdate = noupdate self.loglevel = loglevel self.pool = openerp.registry(cr.dbname) self.uid = 1 self.context = {} # opererp context self.eval_context = {'ref': self._ref(), '_ref': self._ref(), # added '_ref' so that record['ref'] is possible 'time': time, 'datetime': datetime, 'timedelta': timedelta} self.env = openerp.api.Environment(self.cr, self.uid, self.context) def _log(self, *args, **kwargs): _logger.log(self.loglevel, *args, **kwargs) def _ref(self): return lambda xml_id: self.get_id(xml_id) def get_model(self, model_name): return self.pool[model_name] def validate_xml_id(self, xml_id): id = xml_id if '.' in xml_id: module, id = xml_id.split('.', 1) assert '.' not in id, "The ID reference '%s' must contains maximum one dot.\n" \ "It is used to refer to other modules ID, in the form: module.record_id" \ % (xml_id,) if module != self.module: module_count = self.pool['ir.module.module'].search_count(self.cr, self.uid, \ ['&', ('name', '=', module), ('state', 'in', ['installed'])]) assert module_count == 1, 'The ID "%s" refers to an uninstalled module.' % (xml_id,) if len(id) > 64: # TODO where does 64 come from (DB is 128)? should be a constant or loaded form DB _logger.error('id: %s is to long (max: 64)', id) def get_id(self, xml_id): if xml_id is False or xml_id is None: return False #if not xml_id: # raise YamlImportException("The xml_id should be a non empty string.") elif isinstance(xml_id, types.IntType): id = xml_id elif xml_id in self.id_map: id = self.id_map[xml_id] else: if '.' in xml_id: module, checked_xml_id = xml_id.split('.', 1) else: module = self.module checked_xml_id = xml_id try: _, id = self.pool['ir.model.data'].get_object_reference(self.cr, self.uid, module, checked_xml_id) self.id_map[xml_id] = id except ValueError: raise ValueError("""%s not found when processing %s. This Yaml file appears to depend on missing data. This often happens for tests that belong to a module's test suite and depend on each other.""" % (checked_xml_id, self.filename)) return id def get_record(self, xml_id): if '.' not in xml_id: xml_id = "%s.%s" % (self.module, xml_id) return self.env.ref(xml_id) def get_context(self, node, eval_dict): context = self.context.copy() if node.context: context.update(eval(node.context, eval_dict)) return context def isnoupdate(self, node): return self.noupdate or node.noupdate or False def _get_first_result(self, results, default=False): if len(results): value = results[0] if isinstance(value, types.TupleType): value = value[0] else: value = default return value def process_comment(self, node): return node def _log_assert_failure(self, msg, *args): self.assertion_report.record_failure() _logger.error(msg, *args) def _get_assertion_id(self, assertion): if assertion.id: ids = [self.get_id(assertion.id)] elif assertion.search: q = eval(assertion.search, self.eval_context) ids = self.pool[assertion.model].search(self.cr, self.uid, q, context=assertion.context) else: raise YamlImportException('Nothing to assert: you must give either an id or a search criteria.') return ids def process_assert(self, node): if isinstance(node, dict): assertion, expressions = node.items()[0] else: assertion, expressions = node, [] if self.isnoupdate(assertion) and self.mode != 'init': _logger.warning('This assertion was not evaluated ("%s").', assertion.string) return model = self.get_model(assertion.model) ids = self._get_assertion_id(assertion) if assertion.count is not None and len(ids) != assertion.count: msg = 'assertion "%s" failed!\n' \ ' Incorrect search count:\n' \ ' expected count: %d\n' \ ' obtained count: %d\n' args = (assertion.string, assertion.count, len(ids)) self._log_assert_failure(msg, *args) else: context = self.get_context(assertion, self.eval_context) for id in ids: record = model.browse(self.cr, self.uid, id, context) for test in expressions: try: success = unsafe_eval(test, self.eval_context, RecordDictWrapper(record)) except Exception, e: _logger.debug('Exception during evaluation of !assert block in yaml_file %s.', self.filename, exc_info=True) raise YamlImportAbortion(e) if not success: msg = 'Assertion "%s" FAILED\ntest: %s\n' args = (assertion.string, test) for aop in ('==', '!=', '<>', 'in', 'not in', '>=', '<=', '>', '<'): if aop in test: left, right = test.split(aop,1) lmsg = '' rmsg = '' try: lmsg = unsafe_eval(left, self.eval_context, RecordDictWrapper(record)) except Exception, e: lmsg = '<exc>' try: rmsg = unsafe_eval(right, self.eval_context, RecordDictWrapper(record)) except Exception, e: rmsg = '<exc>' msg += 'values: ! %s %s %s' args += ( lmsg, aop, rmsg ) break self._log_assert_failure(msg, *args) return else: # all tests were successful for this assertion tag (no break) self.assertion_report.record_success() def _coerce_bool(self, value, default=False): if isinstance(value, types.BooleanType): b = value if isinstance(value, types.StringTypes): b = value.strip().lower() not in ('0', 'false', 'off', 'no') elif isinstance(value, types.IntType): b = bool(value) else: b = default return b def create_osv_memory_record(self, record, fields): model = self.get_model(record.model) context = self.get_context(record, self.eval_context) record_dict = self._create_record(model, fields) id_new = model.create(self.cr, self.uid, record_dict, context=context) self.id_map[record.id] = int(id_new) return record_dict def process_record(self, node): record, fields = node.items()[0] model = self.get_model(record.model) view_id = record.view if view_id and (view_id is not True) and isinstance(view_id, basestring): module = self.module if '.' in view_id: module, view_id = view_id.split('.',1) view_id = self.pool['ir.model.data'].get_object_reference(self.cr, SUPERUSER_ID, module, view_id)[1] if model.is_transient(): record_dict=self.create_osv_memory_record(record, fields) else: self.validate_xml_id(record.id) try: self.pool['ir.model.data']._get_id(self.cr, SUPERUSER_ID, self.module, record.id) default = False except ValueError: default = True if self.isnoupdate(record) and self.mode != 'init': id = self.pool['ir.model.data']._update_dummy(self.cr, SUPERUSER_ID, record.model, self.module, record.id) # check if the resource already existed at the last update if id: self.id_map[record] = int(id) return None else: if not self._coerce_bool(record.forcecreate): return None #context = self.get_context(record, self.eval_context) #TOFIX: record.context like {'withoutemployee':True} should pass from self.eval_context. example: test_project.yml in project module context = record.context view_info = False if view_id: varg = view_id if view_id is True: varg = False view_info = model.fields_view_get(self.cr, SUPERUSER_ID, varg, 'form', context) record_dict = self._create_record(model, fields, view_info, default=default) id = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, record.model, \ self.module, record_dict, record.id, noupdate=self.isnoupdate(record), mode=self.mode, context=context) self.id_map[record.id] = int(id) if config.get('import_partial'): self.cr.commit() def _create_record(self, model, fields, view_info=None, parent={}, default=True): """This function processes the !record tag in yalm files. It simulates the record creation through an xml view (either specified on the !record tag or the default one for this object), including the calls to on_change() functions, and sending only values for fields that aren't set as readonly. :param model: model instance :param fields: dictonary mapping the field names and their values :param view_info: result of fields_view_get() called on the object :param parent: dictionary containing the values already computed for the parent, in case of one2many fields :param default: if True, the default values must be processed too or not :return: dictionary mapping the field names and their values, ready to use when calling the create() function :rtype: dict """ def _get_right_one2many_view(fg, field_name, view_type): one2many_view = fg[field_name]['views'].get(view_type) # if the view is not defined inline, we call fields_view_get() if not one2many_view: one2many_view = self.pool[fg[field_name]['relation']].fields_view_get(self.cr, SUPERUSER_ID, False, view_type, self.context) return one2many_view def process_val(key, val): if fg[key]['type'] == 'many2one': if type(val) in (tuple,list): val = val[0] elif fg[key]['type'] == 'one2many': if val and isinstance(val, (list,tuple)) and isinstance(val[0], dict): # we want to return only the fields that aren't readonly # For that, we need to first get the right tree view to consider for the field `key´ one2many_tree_view = _get_right_one2many_view(fg, key, 'tree') arch = etree.fromstring(one2many_tree_view['arch'].encode('utf-8')) for rec in val: # make a copy for the iteration, as we will alter `rec´ rec_copy = rec.copy() for field_key in rec_copy: # if field is missing in view or has a readonly modifier, drop it field_elem = arch.xpath("//field[@name='%s']" % field_key) if field_elem and (field_elem[0].get('modifiers', '{}').find('"readonly": true') >= 0): # TODO: currently we only support if readonly is True in the modifiers. Some improvement may be done in # order to support also modifiers that look like {"readonly": [["state", "not in", ["draft", "confirm"]]]} del rec[field_key] # now that unwanted values have been removed from val, we can encapsulate it in a tuple as returned value val = map(lambda x: (0,0,x), val) elif fg[key]['type'] == 'many2many': if val and isinstance(val,(list,tuple)) and isinstance(val[0], (int,long)): val = [(6,0,val)] # we want to return only the fields that aren't readonly if el.get('modifiers', '{}').find('"readonly": true') >= 0: # TODO: currently we only support if readonly is True in the modifiers. Some improvement may be done in # order to support also modifiers that look like {"readonly": [["state", "not in", ["draft", "confirm"]]]} return False return val if view_info: arch = etree.fromstring(view_info['arch'].decode('utf-8')) view = arch if len(arch) else False else: view = False fields = fields or {} if view is not False: fg = view_info['fields'] onchange_spec = model._onchange_spec(self.cr, SUPERUSER_ID, view_info, context=self.context) # gather the default values on the object. (Can't use `fields´ as parameter instead of {} because we may # have references like `base.main_company´ in the yaml file and it's not compatible with the function) defaults = default and model._add_missing_default_values(self.cr, SUPERUSER_ID, {}, context=self.context) or {} # copy the default values in record_dict, only if they are in the view (because that's what the client does) # the other default values will be added later on by the create(). record_dict = dict([(key, val) for key, val in defaults.items() if key in fg]) # Process all on_change calls nodes = [view] while nodes: el = nodes.pop(0) if el.tag=='field': field_name = el.attrib['name'] assert field_name in fg, "The field '%s' is defined in the form view but not on the object '%s'!" % (field_name, model._name) if field_name in fields: one2many_form_view = None if (view is not False) and (fg[field_name]['type']=='one2many'): # for one2many fields, we want to eval them using the inline form view defined on the parent one2many_form_view = _get_right_one2many_view(fg, field_name, 'form') field_value = self._eval_field(model, field_name, fields[field_name], one2many_form_view or view_info, parent=record_dict, default=default) #call process_val to not update record_dict if values were given for readonly fields val = process_val(field_name, field_value) if val: record_dict[field_name] = val #if (field_name in defaults) and defaults[field_name] == field_value: # print '*** You can remove these lines:', field_name, field_value #if field_name has a default value or a value is given in the yaml file, we must call its on_change() elif field_name not in defaults: continue if not el.attrib.get('on_change', False): continue if el.attrib['on_change'] in ('1', 'true'): # New-style on_change recs = model.browse(self.cr, SUPERUSER_ID, [], self.context) result = recs.onchange(record_dict, field_name, onchange_spec) else: match = re.match("([a-z_1-9A-Z]+)\((.*)\)", el.attrib['on_change']) assert match, "Unable to parse the on_change '%s'!" % (el.attrib['on_change'], ) # creating the context class parent2(object): def __init__(self, d): self.d = d def __getattr__(self, name): return self.d.get(name, False) ctx = record_dict.copy() ctx['context'] = self.context ctx['uid'] = SUPERUSER_ID ctx['parent'] = parent2(parent) for a in fg: if a not in ctx: ctx[a] = process_val(a, defaults.get(a, False)) # Evaluation args args = map(lambda x: eval(x, ctx), match.group(2).split(',')) result = getattr(model, match.group(1))(self.cr, SUPERUSER_ID, [], *args) for key, val in (result or {}).get('value', {}).items(): if key in fg: if key not in fields: # do not shadow values explicitly set in yaml. record_dict[key] = process_val(key, val) else: _logger.debug("The returning field '%s' from your on_change call '%s'" " does not exist either on the object '%s', either in" " the view '%s'", key, match.group(1), model._name, view_info['name']) else: nodes = list(el) + nodes else: record_dict = {} for field_name, expression in fields.items(): if field_name in record_dict: continue field_value = self._eval_field(model, field_name, expression, default=False) record_dict[field_name] = field_value return record_dict def process_ref(self, node, column=None): assert node.search or node.id, '!ref node should have a `search` attribute or `id` attribute' if node.search: if node.model: model_name = node.model elif column: model_name = column._obj else: raise YamlImportException('You need to give a model for the search, or a column to infer it.') model = self.get_model(model_name) q = eval(node.search, self.eval_context) ids = model.search(self.cr, self.uid, q) if node.use: instances = model.browse(self.cr, self.uid, ids) value = [inst[node.use] for inst in instances] else: value = ids elif node.id: value = self.get_id(node.id) else: value = None return value def process_eval(self, node): return eval(node.expression, self.eval_context) def _eval_field(self, model, field_name, expression, view_info=False, parent={}, default=True): # TODO this should be refactored as something like model.get_field() in bin/osv if field_name in model._columns: column = model._columns[field_name] elif field_name in model._inherit_fields: column = model._inherit_fields[field_name][2] else: raise KeyError("Object '%s' does not contain field '%s'" % (model, field_name)) if is_ref(expression): elements = self.process_ref(expression, column) if column._type in ("many2many", "one2many"): value = [(6, 0, elements)] else: # many2one if isinstance(elements, (list,tuple)): value = self._get_first_result(elements) else: value = elements elif column._type == "many2one": value = self.get_id(expression) elif column._type == "one2many": other_model = self.get_model(column._obj) value = [(0, 0, self._create_record(other_model, fields, view_info, parent, default=default)) for fields in expression] elif column._type == "many2many": ids = [self.get_id(xml_id) for xml_id in expression] value = [(6, 0, ids)] elif column._type == "date" and is_string(expression): # enforce ISO format for string date values, to be locale-agnostic during tests time.strptime(expression, misc.DEFAULT_SERVER_DATE_FORMAT) value = expression elif column._type == "datetime" and is_string(expression): # enforce ISO format for string datetime values, to be locale-agnostic during tests time.strptime(expression, misc.DEFAULT_SERVER_DATETIME_FORMAT) value = expression else: # scalar field if is_eval(expression): value = self.process_eval(expression) else: value = expression # raise YamlImportException('Unsupported column "%s" or value %s:%s' % (field_name, type(expression), expression)) return value def process_context(self, node): self.context = node.__dict__ if node.uid: self.uid = self.get_id(node.uid) if node.noupdate: self.noupdate = node.noupdate self.env = openerp.api.Environment(self.cr, self.uid, self.context) def process_python(self, node): python, statements = node.items()[0] assert python.model or python.id, "!python node must have attribute `model` or `id`" if python.id is None: record = self.pool[python.model] elif isinstance(python.id, basestring): record = self.get_record(python.id) else: record = self.env[python.model].browse(python.id) if python.model: assert record._name == python.model, "`id` is not consistent with `model`" statements = "\n" * python.first_line + statements.replace("\r\n", "\n") code_context = { 'self': record, 'model': record._model, 'cr': self.cr, 'uid': self.uid, 'log': self._log, 'context': self.context, 'openerp': openerp, } try: code_obj = compile(statements, self.filename, 'exec') unsafe_eval(code_obj, {'ref': self.get_id}, code_context) except AssertionError, e: self._log_assert_failure('AssertionError in Python code %s (line %d): %s', python.name, python.first_line, e) return except Exception, e: _logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True) raise else: self.assertion_report.record_success() def process_workflow(self, node): workflow, values = node.items()[0] if self.isnoupdate(workflow) and self.mode != 'init': return if workflow.ref: id = self.get_id(workflow.ref) else: if not values: raise YamlImportException('You must define a child node if you do not give a ref.') if not len(values) == 1: raise YamlImportException('Only one child node is accepted (%d given).' % len(values)) value = values[0] if not 'model' in value and (not 'eval' in value or not 'search' in value): raise YamlImportException('You must provide a "model" and an "eval" or "search" to evaluate.') value_model = self.get_model(value['model']) local_context = {'obj': lambda x: value_model.browse(self.cr, self.uid, x, context=self.context)} local_context.update(self.id_map) id = eval(value['eval'], self.eval_context, local_context) if workflow.uid is not None: uid = workflow.uid else: uid = self.uid self.cr.execute('select distinct signal, sequence, id from wkf_transition ORDER BY sequence,id') signals=[x['signal'] for x in self.cr.dictfetchall()] if workflow.action not in signals: raise YamlImportException('Incorrect action %s. No such action defined' % workflow.action) openerp.workflow.trg_validate(uid, workflow.model, id, workflow.action, self.cr) def _eval_params(self, model, params): args = [] for i, param in enumerate(params): if isinstance(param, types.ListType): value = self._eval_params(model, param) elif is_ref(param): value = self.process_ref(param) elif is_eval(param): value = self.process_eval(param) elif isinstance(param, types.DictionaryType): # supports XML syntax param_model = self.get_model(param.get('model', model)) if 'search' in param: q = eval(param['search'], self.eval_context) ids = param_model.search(self.cr, self.uid, q) value = self._get_first_result(ids) elif 'eval' in param: local_context = {'obj': lambda x: param_model.browse(self.cr, self.uid, x, self.context)} local_context.update(self.id_map) value = eval(param['eval'], self.eval_context, local_context) else: raise YamlImportException('You must provide either a !ref or at least a "eval" or a "search" to function parameter #%d.' % i) else: value = param # scalar value args.append(value) return args def process_function(self, node): function, params = node.items()[0] if self.isnoupdate(function) and self.mode != 'init': return model = self.get_model(function.model) if function.eval: args = self.process_eval(function.eval) else: args = self._eval_params(function.model, params) method = function.name getattr(model, method)(self.cr, self.uid, *args) def _set_group_values(self, node, values): if node.groups: group_names = node.groups.split(',') groups_value = [] for group in group_names: if group.startswith('-'): group_id = self.get_id(group[1:]) groups_value.append((3, group_id)) else: group_id = self.get_id(group) groups_value.append((4, group_id)) values['groups_id'] = groups_value def process_menuitem(self, node): self.validate_xml_id(node.id) if not node.parent: parent_id = False self.cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (node.name,)) res = self.cr.fetchone() values = {'parent_id': parent_id, 'name': node.name} else: parent_id = self.get_id(node.parent) values = {'parent_id': parent_id} if node.name: values['name'] = node.name try: res = [ self.get_id(node.id) ] except: # which exception ? res = None if node.action: action_type = node.type or 'act_window' icons = { "act_window": 'STOCK_NEW', "report.xml": 'STOCK_PASTE', "wizard": 'STOCK_EXECUTE', "url": 'STOCK_JUMP_TO', } values['icon'] = icons.get(action_type, 'STOCK_NEW') if action_type == 'act_window': action_id = self.get_id(node.action) self.cr.execute('select view_type,view_mode,name,view_id,target from ir_act_window where id=%s', (action_id,)) ir_act_window_result = self.cr.fetchone() assert ir_act_window_result, "No window action defined for this id %s !\n" \ "Verify that this is a window action or add a type argument." % (node.action,) action_type, action_mode, action_name, view_id, target = ir_act_window_result if view_id: self.cr.execute('SELECT type FROM ir_ui_view WHERE id=%s', (view_id,)) # TODO guess why action_mode is ir_act_window.view_mode above and ir_ui_view.type here action_mode = self.cr.fetchone() self.cr.execute('SELECT view_mode FROM ir_act_window_view WHERE act_window_id=%s ORDER BY sequence LIMIT 1', (action_id,)) if self.cr.rowcount: action_mode = self.cr.fetchone() if action_type == 'tree': values['icon'] = 'STOCK_INDENT' elif action_mode and action_mode.startswith('tree'): values['icon'] = 'STOCK_JUSTIFY_FILL' elif action_mode and action_mode.startswith('graph'): values['icon'] = 'terp-graph' elif action_mode and action_mode.startswith('calendar'): values['icon'] = 'terp-calendar' if target == 'new': values['icon'] = 'STOCK_EXECUTE' if not values.get('name', False): values['name'] = action_name elif action_type == 'wizard': action_id = self.get_id(node.action) self.cr.execute('select name from ir_act_wizard where id=%s', (action_id,)) ir_act_wizard_result = self.cr.fetchone() if (not values.get('name', False)) and ir_act_wizard_result: values['name'] = ir_act_wizard_result[0] else: raise YamlImportException("Unsupported type '%s' in menuitem tag." % action_type) if node.sequence: values['sequence'] = node.sequence if node.icon: values['icon'] = node.icon self._set_group_values(node, values) pid = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, \ 'ir.ui.menu', self.module, values, node.id, mode=self.mode, \ noupdate=self.isnoupdate(node), res_id=res and res[0] or False) if node.id and parent_id: self.id_map[node.id] = int(parent_id) if node.action and pid: action_type = node.type or 'act_window' action_id = self.get_id(node.action) action = "ir.actions.%s,%d" % (action_type, action_id) self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, 'action', \ 'tree_but_open', 'Menuitem', [('ir.ui.menu', int(parent_id))], action, True, True, xml_id=node.id) def process_act_window(self, node): assert getattr(node, 'id'), "Attribute %s of act_window is empty !" % ('id',) assert getattr(node, 'name'), "Attribute %s of act_window is empty !" % ('name',) assert getattr(node, 'res_model'), "Attribute %s of act_window is empty !" % ('res_model',) self.validate_xml_id(node.id) view_id = False if node.view: view_id = self.get_id(node.view) if not node.context: node.context={} context = eval(str(node.context), self.eval_context) values = { 'name': node.name, 'type': node.type or 'ir.actions.act_window', 'view_id': view_id, 'domain': node.domain, 'context': context, 'res_model': node.res_model, 'src_model': node.src_model, 'view_type': node.view_type or 'form', 'view_mode': node.view_mode or 'tree,form', 'usage': node.usage, 'limit': node.limit, 'auto_refresh': node.auto_refresh, 'multi': getattr(node, 'multi', False), } self._set_group_values(node, values) if node.target: values['target'] = node.target id = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, \ 'ir.actions.act_window', self.module, values, node.id, mode=self.mode) self.id_map[node.id] = int(id) if node.src_model: keyword = 'client_action_relate' value = 'ir.actions.act_window,%s' % id replace = node.replace or True self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, 'action', keyword, \ node.id, [node.src_model], value, replace=replace, noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id) # TODO add remove ir.model.data def process_delete(self, node): assert getattr(node, 'model'), "Attribute %s of delete tag is empty !" % ('model',) if node.model in self.pool: if node.search: ids = self.pool[node.model].search(self.cr, self.uid, eval(node.search, self.eval_context)) else: ids = [self.get_id(node.id)] if len(ids): self.pool[node.model].unlink(self.cr, self.uid, ids) else: self._log("Record not deleted.") def process_url(self, node): self.validate_xml_id(node.id) res = {'name': node.name, 'url': node.url, 'target': node.target} id = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, \ "ir.actions.act_url", self.module, res, node.id, mode=self.mode) self.id_map[node.id] = int(id) # ir_set if (not node.menu or eval(node.menu)) and id: keyword = node.keyword or 'client_action_multi' value = 'ir.actions.act_url,%s' % id replace = node.replace or True self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, 'action', \ keyword, node.url, ["ir.actions.act_url"], value, replace=replace, \ noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id) def process_ir_set(self, node): if not self.mode == 'init': return False _, fields = node.items()[0] res = {} for fieldname, expression in fields.items(): if is_eval(expression): value = eval(expression.expression, self.eval_context) else: value = expression res[fieldname] = value self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, res['key'], res['key2'], \ res['name'], res['models'], res['value'], replace=res.get('replace',True), \ isobject=res.get('isobject', False), meta=res.get('meta',None)) def process_report(self, node): values = {} for dest, f in (('name','string'), ('model','model'), ('report_name','name')): values[dest] = getattr(node, f) assert values[dest], "Attribute %s of report is empty !" % (f,) for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')): if getattr(node, field): values[dest] = getattr(node, field) if node.auto: values['auto'] = eval(node.auto) if node.sxw: sxw_file = misc.file_open(node.sxw) try: sxw_content = sxw_file.read() values['report_sxw_content'] = sxw_content finally: sxw_file.close() if node.header: values['header'] = eval(node.header) values['multi'] = node.multi and eval(node.multi) xml_id = node.id self.validate_xml_id(xml_id) self._set_group_values(node, values) id = self.pool['ir.model.data']._update(self.cr, SUPERUSER_ID, "ir.actions.report.xml", \ self.module, values, xml_id, noupdate=self.isnoupdate(node), mode=self.mode) self.id_map[xml_id] = int(id) if not node.menu or eval(node.menu): keyword = node.keyword or 'client_print_multi' value = 'ir.actions.report.xml,%s' % id replace = node.replace or True self.pool['ir.model.data'].ir_set(self.cr, SUPERUSER_ID, 'action', \ keyword, values['name'], [values['model']], value, replace=replace, isobject=True, xml_id=xml_id) def process_none(self): """ Empty node or commented node should not pass silently. """ self._log_assert_failure("You have an empty block in your tests.") def process(self, yaml_string): """ Processes a Yaml string. Custom tags are interpreted by 'process_' instance methods. """ yaml_tag.add_constructors() is_preceded_by_comment = False for node in yaml.load(yaml_string): is_preceded_by_comment = self._log_node(node, is_preceded_by_comment) try: self._process_node(node) except Exception, e: _logger.exception(e) raise def _process_node(self, node): if is_comment(node): self.process_comment(node) elif is_assert(node): self.process_assert(node) elif is_record(node): self.process_record(node) elif is_python(node): self.process_python(node) elif is_menuitem(node): self.process_menuitem(node) elif is_delete(node): self.process_delete(node) elif is_url(node): self.process_url(node) elif is_context(node): self.process_context(node) elif is_ir_set(node): self.process_ir_set(node) elif is_act_window(node): self.process_act_window(node) elif is_report(node): self.process_report(node) elif is_workflow(node): if isinstance(node, types.DictionaryType): self.process_workflow(node) else: self.process_workflow({node: []}) elif is_function(node): if isinstance(node, types.DictionaryType): self.process_function(node) else: self.process_function({node: []}) elif node is None: self.process_none() else: raise YamlImportException("Can not process YAML block: %s" % node) def _log_node(self, node, is_preceded_by_comment): if is_comment(node): is_preceded_by_comment = True self._log(node) elif not is_preceded_by_comment: if isinstance(node, types.DictionaryType): msg = "Creating %s\n with %s" args = node.items()[0] self._log(msg, *args) else: self._log(node) else: is_preceded_by_comment = False return is_preceded_by_comment def yaml_import(cr, module, yamlfile, kind, idref=None, mode='init', noupdate=False, report=None): if idref is None: idref = {} loglevel = logging.DEBUG yaml_string = yamlfile.read() yaml_interpreter = YamlInterpreter(cr, module, idref, mode, filename=yamlfile.name, report=report, noupdate=noupdate, loglevel=loglevel) yaml_interpreter.process(yaml_string) # keeps convention of convert.py convert_yaml_import = yaml_import # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
shi2wei3/virt-test
tools/run_unittests_nose.py
22
2211
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'Lucas Meneghel Rodrigues <lmr@redhat.com>' from nose.selector import Selector from nose.plugins import Plugin from nose.plugins.attrib import AttributeSelector from nose.plugins.xunit import Xunit from nose.plugins.cover import Coverage import logging import os import nose import sys logger = logging.getLogger(__name__) class VirtTestSelector(Selector): def wantDirectory(self, dirname): return True def wantModule(self, module): if module.__name__ == 'virttest.utils_test': return False return True def wantFile(self, filename): if not filename.endswith('_unittest.py'): return False skip_tests = [] if self.config.options.skip_tests: skip_tests = self.config.options.skip_tests.split() if os.path.basename(filename)[:-3] in skip_tests: logger.debug('Skipping test: %s' % filename) return False if self.config.options.debug: logger.debug('Adding %s as a valid test' % filename) return True class VirtTestRunner(Plugin): enabled = True name = 'virt_test_runner' def configure(self, options, config): self.result_stream = sys.stdout config.logStream = self.result_stream self.testrunner = nose.core.TextTestRunner(stream=self.result_stream, descriptions=True, verbosity=2, config=config) def options(self, parser, env): parser.add_option("--virttest-skip-tests", dest="skip_tests", default=[], help='A space separated list of tests to skip') def prepareTestLoader(self, loader): loader.selector = VirtTestSelector(loader.config) def run_test(): nose.main(addplugins=[VirtTestRunner(), AttributeSelector(), Xunit(), Coverage()]) def main(): run_test() if __name__ == '__main__': main()
gpl-2.0
dracidoupe/graveyard
ddcz/migrations/0018_auto_20180617_1740.py
1
2511
# Generated by Django 2.0.2 on 2018-06-17 15:40 import ddcz.models.magic from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("ddcz", "0017_auto_20180617_1604"), ] operations = [ migrations.AddField( model_name="gallerypicture", name="hodnota_hlasovani", field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name="gallerypicture", name="pocet_hlasujicich", field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name="gallerypicture", name="precteno", field=models.IntegerField(default=0), ), migrations.AddField( model_name="gallerypicture", name="tisknuto", field=models.IntegerField(default=0), ), migrations.AlterField( model_name="gallerypicture", name="autmail", field=ddcz.models.magic.MisencodedCharField( blank=True, max_length=50, null=True ), ), migrations.AlterField( model_name="gallerypicture", name="autor", field=ddcz.models.magic.MisencodedCharField( blank=True, max_length=50, null=True ), ), migrations.AlterField( model_name="gallerypicture", name="datum", field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name="gallerypicture", name="id", field=models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID" ), ), migrations.AlterField( model_name="gallerypicture", name="schvaleno", field=ddcz.models.magic.MisencodedCharField( choices=[("a", "Schváleno"), ("n", "Neschváleno")], max_length=1 ), ), migrations.AlterField( model_name="gallerypicture", name="zdrojmail", field=ddcz.models.magic.MisencodedCharField( blank=True, max_length=30, null=True ), ), migrations.AlterField( model_name="gallerypicture", name="pochvez", field=ddcz.models.magic.MisencodedIntegerField(max_length=5), ), ]
mit
actuino/unicorn-display
display-client/client.py
1
3700
#!/usr/bin/env python # Unicorn Jauge Display Client import json import os import sys, getopt # pip install socketIO-client # https://github.com/invisibleroads/socketIO-client from socketIO_client import SocketIO, LoggingNamespace current_page = 0; # The socker server Hostname DISPLAY_SERVER_HOST = 'localhost' if 'DISPLAY_SERVER_HOST' in os.environ: DISPLAY_SERVER_HOST = os.environ['DISPLAY_SERVER_HOST'] # The socker server Port DISPLAY_SERVER_PORT = 80 if 'DISPLAY_SERVER_PORT' in os.environ: DISPLAY_SERVER_PORT = os.environ['DISPLAY_SERVER_PORT'] # The Physical display name DISPLAY_NAME = 'Astra' # Default unicorn name http://www.myangelcardreadings.com/unicornnames.html # TODO : random unicorn name according to serial. if 'DISPLAY_NAME' in os.environ: DISPLAY_NAME = os.environ['DISPLAY_NAME'] CONFIG_FILE_NAME = 'res/config.json' if 'CONFIG_FILE_NAME' in os.environ: CONFIG_FILE_NAME = os.environ['CONFIG_FILE_NAME'] import unicorndisplay def main(argv): global CONFIG_FILE_NAME try: opts, args = getopt.getopt(argv,"hc:",["help","config="]) except getopt.GetoptError: print 'client.py -c <configfile> ' sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): print 'client.py -c <configfile> ' sys.exit() elif opt in ("-c", "--config"): CONFIG_FILE_NAME = arg print 'Config file is "', CONFIG_FILE_NAME unicorndisplay.init(CONFIG_FILE_NAME) if __name__ == "__main__": main(sys.argv[1:]) # From http://raspberrypi.stackexchange.com/questions/2086/how-do-i-get-the-serial-number def getserial(): # Extract serial from cpuinfo file cpuserial = "0000000000000000" try: f = open('/proc/cpuinfo','r') for line in f: if line[0:6]=='Serial': cpuserial = line[10:26] f.close() except: cpuserial = "ERROR000000000" return cpuserial def on_connect(): print "Connected" socketIO.emit('name','{"Serial":"'+getserial()+'", "Name":"'+DISPLAY_NAME+'"}') def on_file(*args): unicorndisplay.receive_file(args[0]) def send_current_page(): page = {'Page':current_page,'Serial':getserial(),'Name':DISPLAY_NAME,'Channel':unicorndisplay.get_current_channel()} socketIO.emit('page',json.dumps(page)) def on_command(*args): global current_page try: # ? message for us ? print "receive command: ",args[0]["Command"] if DISPLAY_NAME != args[0]["Name"]: print "Ignored Command" return command = args[0]["Command"] if command == "NextPage" or command == 'LeftGesture': current_page = unicorndisplay.next_page() send_current_page() #socketIO.emit('page','{"Page":"'+str(current_page)+'","Serial":"'+getserial()+'", "Name":"'+DISPLAY_NAME+'"}') elif command == "PreviousPage" or command == 'RightGesture': current_page = unicorndisplay.previous_page() send_current_page() #socketIO.emit('page','{"Page":"'+str(current_page)+'","Serial":"'+getserial()+'", "Name":"'+DISPLAY_NAME+'"}') else: print 'Unknown Command' except Exception as e: s = str(e) print "Bad message" ,s print DISPLAY_NAME,getserial(),"Connecting to",DISPLAY_SERVER_HOST, DISPLAY_SERVER_PORT socketIO = SocketIO(DISPLAY_SERVER_HOST, DISPLAY_SERVER_PORT) socketIO.on('connect', on_connect) socketIO.on('file', on_file) socketIO.on('command', on_command) send_current_page() # TODO : Manage exceptions and reconnect while 1: socketIO.wait(60) socketIO.emit('ping')
mit
mathspace/libcloud
libcloud/loadbalancer/types.py
9
2298
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ "Provider", "State", "LibcloudLBError", "LibcloudLBImmutableError", "OLD_CONSTANT_TO_NEW_MAPPING" ] from libcloud.common.types import LibcloudError class LibcloudLBError(LibcloudError): pass class LibcloudLBImmutableError(LibcloudLBError): pass class Provider(object): """ Defines for each of the supported providers Non-Dummy drivers are sorted in alphabetical order. Please preserve this ordering when adding new drivers. :cvar ALIYUN_SLB: Aliyun SLB loadbalancer driver """ ALIYUN_SLB = 'aliyun_slb' BRIGHTBOX = 'brightbox' CLOUDSTACK = 'cloudstack' DIMENSIONDATA = 'dimensiondata' ELB = 'elb' GCE = 'gce' GOGRID = 'gogrid' NINEFOLD = 'ninefold' RACKSPACE = 'rackspace' SOFTLAYER = 'softlayer' # Deprecated RACKSPACE_US = 'rackspace_us' RACKSPACE_UK = 'rackspace_uk' OLD_CONSTANT_TO_NEW_MAPPING = { Provider.RACKSPACE_US: Provider.RACKSPACE, Provider.RACKSPACE_UK: Provider.RACKSPACE, } class State(object): """ Standard states for a loadbalancer :cvar RUNNING: loadbalancer is running and ready to use :cvar UNKNOWN: loabalancer state is unknown """ RUNNING = 0 PENDING = 1 UNKNOWN = 2 ERROR = 3 DELETED = 4 class MemberCondition(object): """ Each member of a load balancer can have an associated condition which determines its role within the load balancer. """ ENABLED = 0 DISABLED = 1 DRAINING = 2
apache-2.0
austinvernsonger/metagoofil
hachoir_metadata/filter.py
85
1664
from hachoir_metadata.timezone import UTC from datetime import date, datetime # Year in 1850..2030 MIN_YEAR = 1850 MAX_YEAR = 2030 class Filter: def __init__(self, valid_types, min=None, max=None): self.types = valid_types self.min = min self.max = max def __call__(self, value): if not isinstance(value, self.types): return True if self.min is not None and value < self.min: return False if self.max is not None and self.max < value: return False return True class NumberFilter(Filter): def __init__(self, min=None, max=None): Filter.__init__(self, (int, long, float), min, max) class DatetimeFilter(Filter): def __init__(self, min=None, max=None): Filter.__init__(self, (date, datetime), datetime(MIN_YEAR, 1, 1), datetime(MAX_YEAR, 12, 31)) self.min_date = date(MIN_YEAR, 1, 1) self.max_date = date(MAX_YEAR, 12, 31) self.min_tz = datetime(MIN_YEAR, 1, 1, tzinfo=UTC) self.max_tz = datetime(MAX_YEAR, 12, 31, tzinfo=UTC) def __call__(self, value): """ Use different min/max values depending on value type (datetime with timezone, datetime or date). """ if not isinstance(value, self.types): return True if hasattr(value, "tzinfo") and value.tzinfo: return (self.min_tz <= value <= self.max_tz) elif isinstance(value, datetime): return (self.min <= value <= self.max) else: return (self.min_date <= value <= self.max_date) DATETIME_FILTER = DatetimeFilter()
gpl-2.0
magenta-aps/mox
oio_rest/oio_rest/validate.py
1
13865
# Copyright (C) 2015-2019 Magenta ApS, https://magenta.dk. # Contact: info@magenta.dk. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import copy import jsonschema from . import settings # A very nice reference explaining the JSON schema syntax can be found # here: https://spacetelescope.github.io/understanding-json-schema/ # JSON schema types BOOLEAN = {'type': 'boolean'} INTEGER = {'type': 'integer'} STRING = {'type': 'string'} def _generate_schema_array(items, maxItems=None): schema_array = { 'type': 'array', 'items': items } if maxItems: schema_array['maxItems'] = maxItems return schema_array def _generate_schema_object(properties, required, kwargs=None): schema_obj = { 'type': 'object', 'properties': properties, 'additionalProperties': False } # passing an empty array causes the schema to fail validation... if required: schema_obj['required'] = required if kwargs: schema_obj.update(kwargs) return schema_obj # Mapping from DATABASE_STRUCTURE types to JSON schema types TYPE_MAP = { 'aktoerattr': _generate_schema_object( { 'accepteret': STRING, 'obligatorisk': STRING, 'repraesentation_uuid': {'$ref': '#/definitions/uuid'}, }, ['accepteret', 'obligatorisk', 'repraesentation_uuid'] ), 'boolean': BOOLEAN, 'date': STRING, 'int': INTEGER, 'interval(0)': STRING, 'journaldokument': _generate_schema_object( { 'dokumenttitel': STRING, 'offentlighedundtaget': { '$ref': '#/definitions/offentlighedundtaget'} }, ['dokumenttitel', 'offentlighedundtaget'] ), 'journalnotat': _generate_schema_object( { 'titel': STRING, 'notat': STRING, 'format': STRING, }, ['titel', 'notat', 'format'] ), 'offentlighedundtagettype': { '$ref': '#/definitions/offentlighedundtaget'}, 'soegeord': _generate_schema_array(_generate_schema_array(STRING), 2), 'text[]': _generate_schema_array(STRING), 'timestamptz': STRING, 'vaerdirelationattr': _generate_schema_object( { 'forventet': BOOLEAN, 'nominelvaerdi': STRING }, ['forventet', 'nominelvaerdi'] ) } def _get_metadata(obj, metadata_type, key): """ Get the metadata for a given attribute :param obj: The type of LoRa object, i.e. 'bruger', 'organisation' etc. :param metadata_type: Must be either 'attributter' or 'relationer' :param key: The attribute to get the metadata from, e.g. 'egenskaber' :return: Dictionary containing the metadata for the attribute fields """ metadata = settings.REAL_DB_STRUCTURE[obj].get( '{}_metadata'.format(metadata_type), []) if not metadata or key not in metadata: return metadata return metadata[key] def _get_mandatory(obj, attribute_name): """ Get a list of mandatory attribute fields for a given attribute. :param obj: The type of LoRa object, i.e. 'bruger', 'organisation' etc. :param attribute_name: The attribute to get the fields from, e.g. 'egenskaber' :return: Sorted list of mandatory attribute keys """ attribute = _get_metadata(obj, 'attributter', attribute_name) mandatory = sorted( key for key in attribute if attribute[key].get('mandatory', False) ) return mandatory def _handle_attribute_metadata(obj, fields, attribute_name): """ Update the types of the attribute fields. :param obj: The type of LoRa object, i.e. 'bruger', 'organisation' etc. :param fields: A dictionary of attribute fields to update. :param attribute_name: The name of the attribute fields :return: Dictionary of updated attribute fields. """ attribute = _get_metadata(obj, 'attributter', attribute_name) fields.update( { key: TYPE_MAP[attribute[key]['type']] for key in attribute if attribute[key].get('type', False) } ) return fields def _generate_attributter(obj): """ Generate the 'attributter' part of the JSON schema. :param obj: The type of LoRa object, i.e. 'bruger', 'organisation' etc. :return: Dictionary representing the 'attributter' part of the JSON schema. """ db_attributter = settings.REAL_DB_STRUCTURE[obj]['attributter'] attrs = {} required = [] for attrname, attrval in db_attributter.items(): full_name = '{}{}'.format(obj, attrname) schema = { key: STRING for key in attrval } schema.update({'virkning': {'$ref': '#/definitions/virkning'}}) schema = _handle_attribute_metadata(obj, schema, attrname) mandatory = _get_mandatory(obj, attrname) attrs[full_name] = _generate_schema_array( _generate_schema_object( schema, mandatory + ['virkning'], ), ) if mandatory: required.append(full_name) return _generate_schema_object(attrs, required) def _generate_tilstande(obj): """ Generate the 'tilstande' part of the JSON schema. :param obj: The type of LoRa object, i.e. 'bruger', 'organisation' etc. :return: Dictionary representing the 'tilstande' part of the JSON schema. """ tilstande = dict(settings.REAL_DB_STRUCTURE[obj]['tilstande']) properties = {} required = [] for key in sorted(tilstande): tilstand_name = obj + key properties[tilstand_name] = _generate_schema_array( _generate_schema_object( { key: { 'type': 'string', 'enum': tilstande[key] }, 'virkning': {'$ref': '#/definitions/virkning'}, }, [key, 'virkning'] ) ) required.append(tilstand_name) return _generate_schema_object(properties, required) def _handle_relation_metadata_all(obj, relation): """ Update relations an their metadata (e.g. types) for all relations of the given LoRa object. :param obj: The type of LoRa object, i.e. 'bruger', 'organisation' etc. :param relation: The base relation to update. :return: Dictionary representing the updated relation. """ metadata_all = _get_metadata(obj, 'relationer', '*') for key in metadata_all: if 'type' in metadata_all[key]: relation['items']['oneOf'][0]['properties'][key] = TYPE_MAP[ metadata_all[key]['type']] relation['items']['oneOf'][1]['properties'][key] = TYPE_MAP[ metadata_all[key]['type']] return relation def _handle_relation_metadata_specific(obj, relation_schema): """ Update relations an their metadata (e.g. types) for specific relations of the given LoRa object. :param obj: The type of LoRa object, i.e. 'bruger', 'organisation' etc. :param relation_schema: Dictionary representing the 'relationer' part of the JSON schema. :return: Dictionary representing the updated 'relationer' part of the JSON schema. """ metadata_specific = ( settings.REAL_DB_STRUCTURE[obj].get('relationer_metadata', []) ) for relation in [key for key in metadata_specific if not key == '*']: for i in range(2): properties = relation_schema[relation]['items']['oneOf'][i][ 'properties'] metadata = metadata_specific[relation] for key in metadata: if 'type' in metadata[key]: properties[key] = TYPE_MAP[metadata[key]['type']] if 'enum' in metadata[key]: # Enum implies type = text properties[key] = { 'type': 'string', 'enum': metadata[key]['enum'] } if metadata[key].get('mandatory', False): relation_schema[relation]['items']['oneOf'][i][ 'required'].append(key) if obj == 'tilstand': # Handle special case for 'tilstand' where UUID not allowed item = relation_schema['tilstandsvaerdi']['items']['oneOf'][0] del item['properties']['uuid'] item['required'].remove('uuid') relation_schema['tilstandsvaerdi']['items'] = item return relation_schema def _generate_relationer(obj): """ Generate the 'relationer' part of the JSON schema. :param obj: The type of LoRa object, i.e. 'bruger', 'organisation' etc. :return: Dictionary representing the 'relationer' part of the JSON schema. """ relationer_nul_til_en = \ settings.REAL_DB_STRUCTURE[obj]['relationer_nul_til_en'] relationer_nul_til_mange = settings.REAL_DB_STRUCTURE[obj][ 'relationer_nul_til_mange'] relation_nul_til_mange = _generate_schema_array( { 'oneOf': [ _generate_schema_object( { 'uuid': {'$ref': '#/definitions/uuid'}, 'virkning': {'$ref': '#/definitions/virkning'}, 'objekttype': STRING }, ['uuid', 'virkning'] ), _generate_schema_object( { 'urn': {'$ref': '#/definitions/urn'}, 'virkning': {'$ref': '#/definitions/virkning'}, 'objekttype': STRING }, ['urn', 'virkning'] ) ] } ) relation_nul_til_mange = _handle_relation_metadata_all( obj, relation_nul_til_mange) relation_schema = { relation: copy.deepcopy(relation_nul_til_mange) for relation in relationer_nul_til_mange } relation_nul_til_en = copy.deepcopy(relation_nul_til_mange) relation_nul_til_en['items']['oneOf'][0]['properties'].pop('indeks', None) relation_nul_til_en['items']['oneOf'][1]['properties'].pop('indeks', None) relation_nul_til_en['maxItems'] = 1 for relation in relationer_nul_til_en: relation_schema[relation] = relation_nul_til_en relation_schema = _handle_relation_metadata_specific(obj, relation_schema) return { 'type': 'object', 'properties': relation_schema, 'additionalProperties': False } def _generate_varianter(): """ Function to generate the special 'varianter' section of the JSON schema used for the the 'Dokument' LoRa object type. """ return _generate_schema_array(_generate_schema_object( { 'egenskaber': _generate_schema_array(_generate_schema_object( { 'varianttekst': STRING, 'arkivering': BOOLEAN, 'delvisscannet': BOOLEAN, 'offentliggoerelse': BOOLEAN, 'produktion': BOOLEAN, 'virkning': {'$ref': '#/definitions/virkning'} }, ['varianttekst', 'virkning'] )) }, ['egenskaber'] )) def generate_json_schema(obj): """ Generate the JSON schema corresponding to LoRa object type. :param obj: The LoRa object type, i.e. 'bruger', 'organisation',... :return: Dictionary representing the JSON schema. """ if obj == 'dokument': # Due to an inconsistency between the way LoRa handles # "DokumentVariantEgenskaber" and the specs' we will have to do # this for now, i.e. we allow any JSON-object for "Dokument". return {'type': 'object'} schema = _generate_schema_object( { 'attributter': _generate_attributter(obj), 'tilstande': _generate_tilstande(obj), 'relationer': _generate_relationer(obj), 'note': STRING, }, ['attributter', 'tilstande'] ) schema['$schema'] = 'http://json-schema.org/schema#' schema['id'] = 'http://github.com/magenta-aps/mox' schema['definitions'] = { 'urn': { 'type': 'string', 'pattern': '^urn:.' }, 'uuid': { 'type': 'string', 'pattern': '^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-' '[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$' }, 'virkning': _generate_schema_object( { 'from': STRING, 'to': STRING, 'from_included': BOOLEAN, 'to_included': BOOLEAN, 'aktoerref': {'$ref': '#/definitions/uuid'}, 'aktoertypekode': STRING, 'notetekst': STRING, }, ['from', 'to'] ), 'offentlighedundtaget': _generate_schema_object( { 'alternativtitel': STRING, 'hjemmel': STRING }, ['alternativtitel', 'hjemmel'] ) } return schema SCHEMAS = {} def get_schema(obj_type): try: return SCHEMAS[obj_type] except KeyError: pass schema = SCHEMAS[obj_type] = copy.deepcopy(generate_json_schema(obj_type)) return schema def validate(input_json, obj_type): """ Validate request JSON according to JSON schema. :param input_json: The request JSON :raise jsonschema.exceptions.ValidationError: If the request JSON is not valid according to the JSON schema. """ jsonschema.validate(input_json, get_schema(obj_type))
mpl-2.0
kumar303/olympia
src/olympia/amo/tests/test_csp_headers.py
3
5843
import os from django.conf import settings from django.test.utils import override_settings from olympia.amo.tests import TestCase from olympia.lib import settings_base as base_settings def test_default_settings_no_report_only(): assert settings.CSP_REPORT_ONLY is False @override_settings(CSP_REPORT_ONLY=False) class TestCSPHeaders(TestCase): def test_for_specific_csp_settings(self): """Test that required settings are provided as headers.""" response = self.client.get('/en-US/developers/') assert response.status_code == 200 # Make sure a default-src is set. assert "default-src 'self'" in response['content-security-policy'] # Make sure a object-src is locked down. assert "object-src 'none'" in response['content-security-policy'] # The report-uri should be set. assert "report-uri" in response['content-security-policy'] # Basic assertions on the things we've defined. assert "script-src" in response['content-security-policy'] assert "style-src" in response['content-security-policy'] assert "font-src" in response['content-security-policy'] assert "form-action" in response['content-security-policy'] assert "frame-src" in response['content-security-policy'] assert "child-src" in response['content-security-policy'] assert "base-uri" in response['content-security-policy'] def test_unsafe_inline_not_in_script_src(self): """Make sure a script-src does not have unsafe-inline.""" assert "'unsafe-inline'" not in base_settings.CSP_SCRIPT_SRC def test_unsafe_eval_not_in_script_src(self): """Make sure a script-src does not have unsafe-eval.""" assert "'unsafe-eval'" not in base_settings.CSP_SCRIPT_SRC def test_data_uri_not_in_script_src(self): """Make sure a script-src does not have data:.""" assert 'data:' not in base_settings.CSP_SCRIPT_SRC def test_http_protocol_not_in_base_uri(self): """Make sure a base-uri does not have hosts using http:.""" for val in base_settings.CSP_BASE_URI: assert not val.startswith('http:') def test_http_protocol_not_in_script_src(self): """Make sure a script-src does not have hosts using http:.""" for val in base_settings.CSP_SCRIPT_SRC: assert not val.startswith('http:') def test_http_protocol_not_in_frame_src(self): """Make sure a frame-src does not have hosts using http:.""" for val in base_settings.CSP_FRAME_SRC: assert not val.startswith('http:') def test_http_protocol_not_in_child_src(self): """Make sure a child-src does not have hosts using http:.""" for val in base_settings.CSP_CHILD_SRC: assert not val.startswith('http:') def test_http_protocol_not_in_style_src(self): """Make sure a style-src does not have hosts using http:.""" for val in base_settings.CSP_STYLE_SRC: assert not val.startswith('http:') def test_http_protocol_not_in_img_src(self): """Make sure a img-src does not have hosts using http:.""" for val in base_settings.CSP_IMG_SRC: assert not val.startswith('http:') def test_http_protocol_not_in_form_action(self): """Make sure a form-action does not have hosts using http:.""" for val in base_settings.CSP_FORM_ACTION: assert not val.startswith('http:') def test_child_src_matches_frame_src(self): """Check frame-src directive has same settings as child-src""" assert base_settings.CSP_FRAME_SRC == base_settings.CSP_CHILD_SRC def test_prod_cdn_in_common_settings(self): """Make sure prod cdn is specified by default for statics.""" prod_cdn_host = base_settings.PROD_CDN_HOST assert prod_cdn_host in base_settings.CSP_FONT_SRC assert prod_cdn_host in base_settings.CSP_IMG_SRC assert prod_cdn_host in base_settings.CSP_SCRIPT_SRC assert prod_cdn_host in base_settings.CSP_STYLE_SRC def test_self_in_common_settings(self): """Check 'self' is defined for common settings.""" assert "'self'" in base_settings.CSP_BASE_URI assert "'self'" in base_settings.CSP_CONNECT_SRC assert "'self'" in base_settings.CSP_CHILD_SRC assert "'self'" in base_settings.CSP_FRAME_SRC assert "'self'" in base_settings.CSP_FORM_ACTION assert "'self'" in base_settings.CSP_IMG_SRC assert "'self'" in base_settings.CSP_STYLE_SRC def test_not_self_in_script_src(self): """script-src should not need 'self' or a.m.o for services.a.m.o""" assert "'self'" not in base_settings.CSP_SCRIPT_SRC assert "https://addons.mozilla.org" not in base_settings.CSP_SCRIPT_SRC def test_analytics_in_common_settings(self): """Check for anaytics hosts in img-src and script-src""" analytics_host = base_settings.ANALYTICS_HOST assert analytics_host in base_settings.CSP_IMG_SRC assert 'https://ssl.google-analytics.com/ga.js' in \ base_settings.CSP_SCRIPT_SRC def test_csp_settings_not_overriden_for_prod(self): """Checks sites/prod/settings.py doesn't have CSP_* settings. Because testing the import of site settings is difficult due to env vars, we specify prod settings in lib/base_settings and then override them for local-dev/-dev/stage. This way the default settings in lib/base_settings should represent what is used for prod and thus are more readily testable. """ path = os.path.join( settings.ROOT, 'src', 'olympia', 'conf', 'prod', 'settings.py') with open(path) as f: data = f.read() assert 'CSP_' not in data
bsd-3-clause
pniedzielski/fb-hackathon-2013-11-21
src/repl.it/jsrepl/extern/python/unclosured/lib/python2.7/encodings/shift_jisx0213.py
816
1059
# # shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213 # # Written by Hye-Shik Chang <perky@FreeBSD.org> # import _codecs_jp, codecs import _multibytecodec as mbc codec = _codecs_jp.getcodec('shift_jisx0213') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='shift_jisx0213', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
agpl-3.0
withtone/depot_tools
tests/fix_encoding_test.py
50
1817
#!/usr/bin/env python # coding=utf8 # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for fix_encoding.py.""" import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import fix_encoding class FixEncodingTest(unittest.TestCase): # Nice mix of latin, hebrew, arabic and chinese. Doesn't mean anything. text = u'Héllô 偉大 سيد' def test_code_page(self): # Make sure printing garbage won't throw. print self.text.encode() + '\xff' print >> sys.stderr, self.text.encode() + '\xff' def test_utf8(self): # Make sure printing utf-8 works. print self.text.encode('utf-8') print >> sys.stderr, self.text.encode('utf-8') def test_unicode(self): # Make sure printing unicode works. print self.text print >> sys.stderr, self.text def test_default_encoding(self): self.assertEquals('utf-8', sys.getdefaultencoding()) def test_win_console(self): if sys.platform != 'win32': return # This should fail if not redirected, e.g. run directly instead of through # the presubmit check. Can be checked with: # python tests\fix_encoding_test.py self.assertEquals( sys.stdout.__class__, fix_encoding.WinUnicodeOutput) self.assertEquals( sys.stderr.__class__, fix_encoding.WinUnicodeOutput) self.assertEquals(sys.stdout.encoding, sys.getdefaultencoding()) self.assertEquals(sys.stderr.encoding, sys.getdefaultencoding()) def test_multiple_calls(self): # Shouldn't do anything. self.assertEquals(False, fix_encoding.fix_encoding()) if __name__ == '__main__': assert fix_encoding.fix_encoding() unittest.main()
bsd-3-clause
hendradarwin/VTK
Examples/Infovis/Python/simple_selection.py
17
1259
from vtk import * from vtk import * source = vtkRandomGraphSource() source.SetNumberOfVertices(25) source.SetStartWithTree(True) source.SetIncludeEdgeWeights(True) view1 = vtkGraphLayoutView() view1.AddRepresentationFromInputConnection(source.GetOutputPort()) view1.SetColorVertices(True) view1.SetEdgeColorArrayName("edge weight") view1.SetColorEdges(True) view1.SetLayoutStrategyToSimple2D() view2 = vtkGraphLayoutView() view2.AddRepresentationFromInputConnection(source.GetOutputPort()) view2.SetColorVertices(True) view2.SetEdgeColorArrayName("edge weight") view2.SetColorEdges(True) view2.SetLayoutStrategyToTree() # Create a annotation link and set both views to use it annotationLink = vtkAnnotationLink() view1.GetRepresentation(0).SetAnnotationLink(annotationLink) view2.GetRepresentation(0).SetAnnotationLink(annotationLink) updater = vtkViewUpdater() updater.AddAnnotationLink(annotationLink) updater.AddView(view1) updater.AddView(view2) theme = vtkViewTheme.CreateNeonTheme() view1.ApplyViewTheme(theme) view2.ApplyViewTheme(theme) theme.FastDelete() view1.GetRenderWindow().SetSize(600, 600) view1.ResetCamera() view1.Render() view2.GetRenderWindow().SetSize(600, 600) view2.ResetCamera() view2.Render() view1.GetInteractor().Start()
bsd-3-clause
Bitl/RBXLegacy-src
Cut/RBXLegacyDiscordBot/lib/youtube_dl/extractor/moevideo.py
46
3728
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, sanitized_Request, urlencode_postdata, ) class MoeVideoIE(InfoExtractor): IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net' _VALID_URL = r'''(?x) https?://(?P<host>(?:www\.)? (?:(?:moevideo|playreplay|videochart)\.net))/ (?:video|framevideo)/(?P<id>[0-9]+\.[0-9A-Za-z]+)''' _API_URL = 'http://api.letitbit.net/' _API_KEY = 'tVL0gjqo5' _TESTS = [ { 'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29', 'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a', 'info_dict': { 'id': '00297.0036103fe3d513ef27915216fd29', 'ext': 'flv', 'title': 'Sink cut out machine', 'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8', 'thumbnail': r're:^https?://.*\.jpg$', 'width': 540, 'height': 360, 'duration': 179, 'filesize': 17822500, }, 'skip': 'Video has been removed', }, { 'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a', 'md5': '74f0a014d5b661f0f0e2361300d1620e', 'info_dict': { 'id': '77107.7f325710a627383d40540d8e991a', 'ext': 'flv', 'title': 'Operacion Condor.', 'description': 'md5:7e68cb2fcda66833d5081c542491a9a3', 'thumbnail': r're:^https?://.*\.jpg$', 'width': 480, 'height': 296, 'duration': 6027, 'filesize': 588257923, }, 'skip': 'Video has been removed', }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage( 'http://%s/video/%s' % (mobj.group('host'), video_id), video_id, 'Downloading webpage') title = self._og_search_title(webpage) thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage) r = [ self._API_KEY, [ 'preview/flv_link', { 'uid': video_id, }, ], ] r_json = json.dumps(r) post = urlencode_postdata({'r': r_json}) req = sanitized_Request(self._API_URL, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') response = self._download_json(req, video_id) if response['status'] != 'OK': raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, response['data']), expected=True ) item = response['data'][0] video_url = item['link'] duration = int_or_none(item['length']) width = int_or_none(item['width']) height = int_or_none(item['height']) filesize = int_or_none(item['convert_size']) formats = [{ 'format_id': 'sd', 'http_headers': {'Range': 'bytes=0-'}, # Required to download 'url': video_url, 'width': width, 'height': height, 'filesize': filesize, }] return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'description': description, 'duration': duration, 'formats': formats, }
gpl-3.0
naslanidis/ansible
lib/ansible/modules/notification/twilio.py
38
5973
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Matt Makai <matthew.makai@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - Sends a text message to a phone number through the Twilio messaging API. notes: - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased or verified phone number to send the text message. options: account_sid: description: user's Twilio account token found on the account page required: true auth_token: description: user's Twilio authentication token required: true msg: description: the body of the text message required: true to_number: description: one or more phone numbers to send the text message to, format +15551112222 required: true from_number: description: the Twilio number to send the text message from, format +15551112222 required: true media_url: description: a URL with a picture, video or sound clip to send with an MMS (multimedia message) instead of a plain SMS required: false author: "Matt Makai (@makaimc)" ''' EXAMPLES = ''' # send an SMS about the build status to (555) 303 5681 # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: All servers with webserver role are now configured. account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX from_number: +15552014545 to_number: +15553035681 delegate_to: localhost # send an SMS to multiple phone numbers about the deployment # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: This server configuration is now complete. account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX from_number: +15553258899 to_number: - +15551113232 - +12025551235 - +19735559010 delegate_to: localhost # send an MMS to a single recipient with an update on the deployment # and an image of the results # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: Deployment complete! account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX from_number: +15552014545 to_number: +15553035681 media_url: https://demo.twilio.com/logo.png delegate_to: localhost ''' # ======================================= # twilio module support methods # import urllib def post_twilio_api(module, account_sid, auth_token, msg, from_number, to_number, media_url=None): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) AGENT = "Ansible" data = {'From':from_number, 'To':to_number, 'Body':msg} if media_url: data['MediaUrl'] = media_url encoded_data = urllib.urlencode(data) headers = {'User-Agent': AGENT, 'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'application/json', } # Hack module params to have the Basic auth params that fetch_url expects module.params['url_username'] = account_sid.replace('\n', '') module.params['url_password'] = auth_token.replace('\n', '') return fetch_url(module, URI, data=encoded_data, headers=headers) # ======================================= # Main # def main(): module = AnsibleModule( argument_spec=dict( account_sid=dict(required=True), auth_token=dict(required=True, no_log=True), msg=dict(required=True), from_number=dict(required=True), to_number=dict(required=True), media_url=dict(default=None, required=False), ), supports_check_mode=True ) account_sid = module.params['account_sid'] auth_token = module.params['auth_token'] msg = module.params['msg'] from_number = module.params['from_number'] to_number = module.params['to_number'] media_url = module.params['media_url'] if not isinstance(to_number, list): to_number = [to_number] for number in to_number: r, info = post_twilio_api(module, account_sid, auth_token, msg, from_number, number, media_url) if info['status'] not in [200, 201]: body_message = "unknown error" if 'body' in info: body = json.loads(info['body']) body_message = body['message'] module.fail_json(msg="unable to send message to %s: %s" % (number, body_message)) module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': main()
gpl-3.0
weebygames/boto
boto/mturk/price.py
170
1967
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class Price(object): def __init__(self, amount=0.0, currency_code='USD'): self.amount = amount self.currency_code = currency_code self.formatted_price = '' def __repr__(self): if self.formatted_price: return self.formatted_price else: return str(self.amount) def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'Amount': self.amount = float(value) elif name == 'CurrencyCode': self.currency_code = value elif name == 'FormattedPrice': self.formatted_price = value def get_as_params(self, label, ord=1): return {'%s.%d.Amount'%(label, ord) : str(self.amount), '%s.%d.CurrencyCode'%(label, ord) : self.currency_code}
mit
heidtn/MeteorTracker
meteortracker/meteor_tracker.py
1
2196
""" @author(s): Nathan Heidt, Jean Nassar This is the primary program for detecting and logging Meteors. Running `python meteor_tracker.py` is sufficient. Make sure the parameters specified in the config.ini file are correct. """ import configparser from . import camera from . import find_events from . import save_event class Tracker(object): """ A class for running and managing the primary meteor detection tasks. Parameters ---------- source : str, optional If given a the path of a video file, this will use that. Otherwise it will use the primary camera. Attributes ---------- cam : Camera This is used to either access the camera or a video file stream config : ConfigParser This is to parse the config file for different user settings event_logger : EventLogger When an event is detected, the images are passed to this class for logging event_finder : EventFinder This class is in charge of viewing images to actually find the events themselves. """ def __init__(self, source=None): self.cam = camera.Camera(source) self.config = configparser.ConfigParser() self.config.read('config.ini') self.event_logger = save_event.EventLogger() self.event_finder = find_events.EventFinder() def run(self): """ Run the meteor tracker program. If a potential meteor is detected, log the result. """ while True: current_image = self.cam.get_frame() previous_image = self.cam.get_previous_frame() # detect number of anomalies (keypoints) and highlight them in im keypoints, im = self.event_finder.find_motion_anomaly( previous_image, current_image ) # we have found an anomaly if keypoints: print("Anomaly found!") self.event_logger.add_event(curImg, prevImg) if __name__ == "__main__": Tracker().run()
mit
OpenSPA/dvbapp
lib/python/Screens/MessageBox.py
1
4589
from Screen import Screen from Components.ActionMap import ActionMap from Components.Label import Label from Components.Pixmap import Pixmap from Components.Sources.StaticText import StaticText from Components.MenuList import MenuList from enigma import eTimer class MessageBox(Screen): TYPE_YESNO = 0 TYPE_INFO = 1 TYPE_WARNING = 2 TYPE_ERROR = 3 TYPE_MESSAGE = 4 def __init__(self, session, text, type=TYPE_YESNO, timeout=-1, close_on_any_key=False, default=True, enable_input=True, msgBoxID=None, picon=None, simple=False, list=[], timeout_default=None): self.type = type Screen.__init__(self, session) self.skinName = ["MessageBox"] if self.type == self.TYPE_YESNO: self.setTitle(_("Question")) elif self.type == self.TYPE_INFO: self.setTitle(_("Information")) elif self.type == self.TYPE_WARNING: self.setTitle(_("Warning")) elif self.type == self.TYPE_ERROR: self.setTitle(_("Error")) else: self.setTitle(_("Message")) if simple: self.skinName="MessageBoxSimple" self.msgBoxID = msgBoxID self["text"] = Label(text) self["Text"] = StaticText(text) self["selectedChoice"] = StaticText() self.text = text self.close_on_any_key = close_on_any_key self.timeout_default = timeout_default self["ErrorPixmap"] = Pixmap() self["QuestionPixmap"] = Pixmap() self["InfoPixmap"] = Pixmap() self["WarningPixmap"] = Pixmap() self.timerRunning = False self.initTimeout(timeout) picon = picon or type if picon != self.TYPE_ERROR: self["ErrorPixmap"].hide() if picon != self.TYPE_YESNO: self["QuestionPixmap"].hide() if picon != self.TYPE_INFO: self["InfoPixmap"].hide() if picon != self.TYPE_WARNING: self["WarningPixmap"].hide() self.title = self.type < self.TYPE_MESSAGE and [_("Question"), _("Information"), _("Warning"), _("Error")][self.type] or _("Message") if type == self.TYPE_YESNO: if list: self.list = list elif default == True: self.list = [ (_("yes"), True), (_("no"), False) ] else: self.list = [ (_("no"), False), (_("yes"), True) ] else: self.list = [] self["list"] = MenuList(self.list) if self.list: self["selectedChoice"].setText(self.list[0][0]) else: self["list"].hide() if enable_input: self["actions"] = ActionMap(["MsgBoxActions", "DirectionActions"], { "cancel": self.cancel, "ok": self.ok, "alwaysOK": self.alwaysOK, "up": self.up, "down": self.down, "left": self.left, "right": self.right, "upRepeated": self.up, "downRepeated": self.down, "leftRepeated": self.left, "rightRepeated": self.right }, -1) self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): self.setTitle(self.title) def initTimeout(self, timeout): self.timeout = timeout if timeout > 0: self.timer = eTimer() self.timer.callback.append(self.timerTick) self.onExecBegin.append(self.startTimer) self.origTitle = None if self.execing: self.timerTick() else: self.onShown.append(self.__onShown) self.timerRunning = True else: self.timerRunning = False def __onShown(self): self.onShown.remove(self.__onShown) self.timerTick() def startTimer(self): self.timer.start(1000) def stopTimer(self): if self.timerRunning: del self.timer self.onExecBegin.remove(self.startTimer) self.setTitle(self.origTitle) self.timerRunning = False def timerTick(self): if self.execing: self.timeout -= 1 if self.origTitle is None: self.origTitle = self.instance.getTitle() self.setTitle(self.origTitle + " (" + str(self.timeout) + ")") if self.timeout == 0: self.timer.stop() self.timerRunning = False self.timeoutCallback() def timeoutCallback(self): print "Timeout!" if self.timeout_default is not None: self.close(self.timeout_default) else: self.ok() def cancel(self): self.close(False) def ok(self): if self.list: self.close(self["list"].getCurrent()[1]) else: self.close(True) def alwaysOK(self): self.close(True) def up(self): self.move(self["list"].instance.moveUp) def down(self): self.move(self["list"].instance.moveDown) def left(self): self.move(self["list"].instance.pageUp) def right(self): self.move(self["list"].instance.pageDown) def move(self, direction): if self.close_on_any_key: self.close(True) self["list"].instance.moveSelection(direction) if self.list: self["selectedChoice"].setText(self["list"].getCurrent()[0]) self.stopTimer() def __repr__(self): return str(type(self)) + "(" + self.text + ")"
gpl-2.0
gabelula/b-counted
.google_appengine/demos/guestbook/guestbook.py
11
2240
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import cgi import datetime import wsgiref.handlers from google.appengine.ext import db from google.appengine.api import users from google.appengine.ext import webapp class Greeting(db.Model): author = db.UserProperty() content = db.StringProperty(multiline=True) date = db.DateTimeProperty(auto_now_add=True) class MainPage(webapp.RequestHandler): def get(self): self.response.out.write('<html><body>') greetings = db.GqlQuery("SELECT * " "FROM Greeting " "ORDER BY date DESC LIMIT 10") for greeting in greetings: if greeting.author: self.response.out.write('<b>%s</b> wrote:' % greeting.author.nickname()) else: self.response.out.write('An anonymous person wrote:') self.response.out.write('<blockquote>%s</blockquote>' % cgi.escape(greeting.content)) self.response.out.write(""" <form action="/sign" method="post"> <div><textarea name="content" rows="3" cols="60"></textarea></div> <div><input type="submit" value="Sign Guestbook"></div> </form> </body> </html>""") class Guestbook(webapp.RequestHandler): def post(self): greeting = Greeting() if users.get_current_user(): greeting.author = users.get_current_user() greeting.content = self.request.get('content') greeting.put() self.redirect('/') application = webapp.WSGIApplication([ ('/', MainPage), ('/sign', Guestbook) ], debug=True) def main(): wsgiref.handlers.CGIHandler().run(application) if __name__ == '__main__': main()
apache-2.0
GheRivero/ansible
lib/ansible/modules/monitoring/logicmonitor.py
23
74401
#!/usr/bin/python # LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups # Copyright (C) 2015 LogicMonitor # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type RETURN = ''' --- success: description: flag indicating that execution was successful returned: success type: boolean sample: True ... ''' ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: logicmonitor short_description: Manage your LogicMonitor account through Ansible Playbooks description: - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform. - This module manages hosts, host groups, and collectors within your LogicMonitor account. version_added: "2.2" author: [Ethan Culler-Mayeno (@ethanculler), Jeff Wozniak (@woz5999)] notes: - You must have an existing LogicMonitor account for this module to function. requirements: ["An existing LogicMonitor account", "Linux"] options: target: description: - The type of LogicMonitor object you wish to manage. - "Collector: Perform actions on a LogicMonitor collector." - NOTE You should use Ansible service modules such as M(service) or M(supervisorctl) for managing the Collector 'logicmonitor-agent' and 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services before a Collector remove. - "Host: Perform actions on a host device." - "Hostgroup: Perform actions on a LogicMonitor host group." - > NOTE Host and Hostgroup tasks should always be performed via delegate_to: localhost. There are no benefits to running these tasks on the remote host and doing so will typically cause problems. required: true choices: ['collector', 'host', 'datsource', 'hostgroup'] action: description: - The action you wish to perform on target. - "Add: Add an object to your LogicMonitor account." - "Remove: Remove an object from your LogicMonitor account." - "Update: Update properties, description, or groups (target=host) for an object in your LogicMonitor account." - "SDT: Schedule downtime for an object in your LogicMonitor account." required: true choices: ['add', 'remove', 'update', 'sdt'] company: description: - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes." required: true user: description: - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user. required: true password: description: - The password of the specified LogicMonitor user required: true collector: description: - The fully qualified domain name of a collector in your LogicMonitor account. - This is required for the creation of a LogicMonitor host (target=host action=add). - This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't specified (target=host action=update action=remove action=sdt). hostname: description: - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to manage. - Optional for managing hosts (target=host). default: 'hostname -f' displayname: description: - The display name of a host in your LogicMonitor account or the desired display name of a device to manage. - Optional for managing hosts (target=host). default: 'hostname -f' description: description: - The long text description of the object in your LogicMonitor account. - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update). default: "" properties: description: - A dictionary of properties to set on the LogicMonitor host or host group. - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update). - This parameter will add or update existing properties in your LogicMonitor account. default: {} groups: description: - A list of groups that the host should be a member of. - Optional for managing hosts (target=host; action=add or action=update). default: [] id: description: - ID of the datasource to target. - Required for management of LogicMonitor datasources (target=datasource). fullpath: description: - The fullpath of the host group object you would like to manage. - Recommend running on a single Ansible host. - Required for management of LogicMonitor host groups (target=hostgroup). alertenable: description: - A boolean flag to turn alerting on or off for an object. - Optional for managing all hosts (action=add or action=update). type: bool default: 'yes' starttime: description: - The time that the Scheduled Down Time (SDT) should begin. - Optional for managing SDT (action=sdt). - Y-m-d H:M default: Now duration: description: - The duration (minutes) of the Scheduled Down Time (SDT). - Optional for putting an object into SDT (action=sdt). default: 30 ... ''' EXAMPLES = ''' # example of adding a new LogicMonitor collector to these devices --- - hosts: collectors remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Deploy/verify LogicMonitor collectors become: yes logicmonitor: target: collector action: add company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' #example of adding a list of hosts into monitoring --- - hosts: hosts remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Deploy LogicMonitor Host # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: host action: add collector: mycompany-Collector company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' groups: /servers/production,/datacenter1 properties: snmp.community: secret dc: 1 type: prod delegate_to: localhost #example of putting a datasource in SDT --- - hosts: localhost remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: SDT a datasource # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: datasource action: sdt id: 123 duration: 3000 starttime: '2017-03-04 05:06' company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' #example of creating a hostgroup --- - hosts: localhost remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Create a host group # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: hostgroup action: add fullpath: /servers/development company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' properties: snmp.community: commstring type: dev #example of putting a list of hosts into SDT --- - hosts: hosts remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: SDT hosts # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: host action: sdt duration: 3000 starttime: '2016-11-10 09:08' company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' collector: mycompany-Collector delegate_to: localhost #example of putting a host group in SDT --- - hosts: localhost remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: SDT a host group # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: hostgroup action: sdt fullpath: /servers/development duration: 3000 starttime: '2017-03-04 05:06' company=: '{{ company }}' user: '{{ user }}' password: '{{ password }}' #example of updating a list of hosts --- - hosts: hosts remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Update a list of hosts # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: host action: update company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' collector: mycompany-Collector groups: /servers/production,/datacenter5 properties: snmp.community: commstring dc: 5 delegate_to: localhost #example of updating a hostgroup --- - hosts: hosts remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Update a host group # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: hostgroup action: update fullpath: /servers/development company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' properties: snmp.community: hg type: dev status: test delegate_to: localhost #example of removing a list of hosts from monitoring --- - hosts: hosts remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Remove LogicMonitor hosts # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: host action: remove company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' collector: mycompany-Collector delegate_to: localhost #example of removing a host group --- - hosts: hosts remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Remove LogicMonitor development servers hostgroup # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: hostgroup action: remove company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' fullpath: /servers/development delegate_to: localhost - name: Remove LogicMonitor servers hostgroup # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: hostgroup action: remove company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' fullpath: /servers delegate_to: localhost - name: Remove LogicMonitor datacenter1 hostgroup # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: hostgroup action: remove company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' fullpath: /datacenter1 delegate_to: localhost - name: Remove LogicMonitor datacenter5 hostgroup # All tasks except for target=collector should use delegate_to: localhost logicmonitor: target: hostgroup action: remove company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' fullpath: /datacenter5 delegate_to: localhost ### example of removing a new LogicMonitor collector to these devices --- - hosts: collectors remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Remove LogicMonitor collectors become: yes logicmonitor: target: collector action: remove company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' #complete example --- - hosts: localhost remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Create a host group logicmonitor: target: hostgroup action: add fullpath: /servers/production/database company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' properties: snmp.community: commstring - name: SDT a host group logicmonitor: target: hostgroup action: sdt fullpath: /servers/production/web duration: 3000 starttime: '2012-03-04 05:06' company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' - hosts: collectors remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: Deploy/verify LogicMonitor collectors logicmonitor: target: collector action: add company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' - name: Place LogicMonitor collectors into 30 minute Scheduled downtime logicmonitor: target: collector action: sdt company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' - name: Deploy LogicMonitor Host logicmonitor: target: host action: add collector: agent1.ethandev.com company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' properties: snmp.community: commstring dc: 1 groups: /servers/production/collectors, /datacenter1 delegate_to: localhost - hosts: database-servers remote_user: '{{ username }}' vars: company: mycompany user: myusername password: mypassword tasks: - name: deploy logicmonitor hosts logicmonitor: target: host action: add collector: monitoring.dev.com company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' properties: snmp.community: commstring type: db dc: 1 groups: /servers/production/database, /datacenter1 delegate_to: localhost - name: schedule 5 hour downtime for 2012-11-10 09:08 logicmonitor: target: host action: sdt duration: 3000 starttime: '2012-11-10 09:08' company: '{{ company }}' user: '{{ user }}' password: '{{ password }}' delegate_to: localhost ''' import datetime import os import platform import socket import sys import types from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import open_url HAS_LIB_JSON = True try: import json # Detect the python-json library which is incompatible # Look for simplejson if that's the case try: if ( not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType) ): raise ImportError except AttributeError: raise ImportError except ImportError: try: import simplejson as json except ImportError: HAS_LIB_JSON = False except SyntaxError: HAS_LIB_JSON = False class LogicMonitor(object): def __init__(self, module, **params): self.__version__ = "1.0-python" self.module = module self.module.debug("Instantiating LogicMonitor object") self.check_mode = False self.company = params["company"] self.user = params["user"] self.password = params["password"] self.fqdn = socket.getfqdn() self.lm_url = "logicmonitor.com/santaba" self.__version__ = self.__version__ + "-ansible-module" def rpc(self, action, params): """Make a call to the LogicMonitor RPC library and return the response""" self.module.debug("Running LogicMonitor.rpc") param_str = urlencode(params) creds = urlencode( {"c": self.company, "u": self.user, "p": self.password}) if param_str: param_str = param_str + "&" param_str = param_str + creds try: url = ("https://" + self.company + "." + self.lm_url + "/rpc/" + action + "?" + param_str) # Set custom LogicMonitor header with version headers = {"X-LM-User-Agent": self.__version__} # Set headers f = open_url(url, headers=headers) raw = f.read() resp = json.loads(raw) if resp["status"] == 403: self.module.debug("Authentication failed.") self.fail(msg="Error: " + resp["errmsg"]) else: return raw except IOError as ioe: self.fail(msg="Error: Exception making RPC call to " + "https://" + self.company + "." + self.lm_url + "/rpc/" + action + "\nException" + str(ioe)) def do(self, action, params): """Make a call to the LogicMonitor server \"do\" function""" self.module.debug("Running LogicMonitor.do...") param_str = urlencode(params) creds = (urlencode( {"c": self.company, "u": self.user, "p": self.password})) if param_str: param_str = param_str + "&" param_str = param_str + creds try: self.module.debug("Attempting to open URL: " + "https://" + self.company + "." + self.lm_url + "/do/" + action + "?" + param_str) f = open_url( "https://" + self.company + "." + self.lm_url + "/do/" + action + "?" + param_str) return f.read() except IOError as ioe: self.fail(msg="Error: Exception making RPC call to " + "https://" + self.company + "." + self.lm_url + "/do/" + action + "\nException" + str(ioe)) def get_collectors(self): """Returns a JSON object containing a list of LogicMonitor collectors""" self.module.debug("Running LogicMonitor.get_collectors...") self.module.debug("Making RPC call to 'getAgents'") resp = self.rpc("getAgents", {}) resp_json = json.loads(resp) if resp_json["status"] is 200: self.module.debug("RPC call succeeded") return resp_json["data"] else: self.fail(msg=resp) def get_host_by_hostname(self, hostname, collector): """Returns a host object for the host matching the specified hostname""" self.module.debug("Running LogicMonitor.get_host_by_hostname...") self.module.debug("Looking for hostname " + hostname) self.module.debug("Making RPC call to 'getHosts'") hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1})) if collector: if hostlist_json["status"] == 200: self.module.debug("RPC call succeeded") hosts = hostlist_json["data"]["hosts"] self.module.debug( "Looking for host matching: hostname " + hostname + " and collector " + str(collector["id"])) for host in hosts: if (host["hostName"] == hostname and host["agentId"] == collector["id"]): self.module.debug("Host match found") return host self.module.debug("No host match found") return None else: self.module.debug("RPC call failed") self.module.debug(hostlist_json) else: self.module.debug("No collector specified") return None def get_host_by_displayname(self, displayname): """Returns a host object for the host matching the specified display name""" self.module.debug("Running LogicMonitor.get_host_by_displayname...") self.module.debug("Looking for displayname " + displayname) self.module.debug("Making RPC call to 'getHost'") host_json = (json.loads(self.rpc("getHost", {"displayName": displayname}))) if host_json["status"] == 200: self.module.debug("RPC call succeeded") return host_json["data"] else: self.module.debug("RPC call failed") self.module.debug(host_json) return None def get_collector_by_description(self, description): """Returns a JSON collector object for the collector matching the specified FQDN (description)""" self.module.debug( "Running LogicMonitor.get_collector_by_description..." ) collector_list = self.get_collectors() if collector_list is not None: self.module.debug("Looking for collector with description {0}" + description) for collector in collector_list: if collector["description"] == description: self.module.debug("Collector match found") return collector self.module.debug("No collector match found") return None def get_group(self, fullpath): """Returns a JSON group object for the group matching the specified path""" self.module.debug("Running LogicMonitor.get_group...") self.module.debug("Making RPC call to getHostGroups") resp = json.loads(self.rpc("getHostGroups", {})) if resp["status"] == 200: self.module.debug("RPC called succeeded") groups = resp["data"] self.module.debug("Looking for group matching " + fullpath) for group in groups: if group["fullPath"] == fullpath.lstrip('/'): self.module.debug("Group match found") return group self.module.debug("No group match found") return None else: self.module.debug("RPC call failed") self.module.debug(resp) return None def create_group(self, fullpath): """Recursively create a path of host groups. Returns the id of the newly created hostgroup""" self.module.debug("Running LogicMonitor.create_group...") res = self.get_group(fullpath) if res: self.module.debug("Group {0} exists." + fullpath) return res["id"] if fullpath == "/": self.module.debug("Specified group is root. Doing nothing.") return 1 else: self.module.debug("Creating group named " + fullpath) self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) parentpath, name = fullpath.rsplit('/', 1) parentgroup = self.get_group(parentpath) parentid = 1 if parentpath == "": parentid = 1 elif parentgroup: parentid = parentgroup["id"] else: parentid = self.create_group(parentpath) h = None # Determine if we're creating a group from host or hostgroup class if hasattr(self, '_build_host_group_hash'): h = self._build_host_group_hash( fullpath, self.description, self.properties, self.alertenable) h["name"] = name h["parentId"] = parentid else: h = {"name": name, "parentId": parentid, "alertEnable": True, "description": ""} self.module.debug("Making RPC call to 'addHostGroup'") resp = json.loads( self.rpc("addHostGroup", h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"]["id"] elif resp["errmsg"] == "The record already exists": self.module.debug("The hostgroup already exists") group = self.get_group(fullpath) return group["id"] else: self.module.debug("RPC call failed") self.fail( msg="Error: unable to create new hostgroup \"" + name + "\".\n" + resp["errmsg"]) def fail(self, msg): self.module.fail_json(msg=msg, changed=self.change, failed=True) def exit(self, changed): self.module.debug("Changed: " + changed) self.module.exit_json(changed=changed, success=True) def output_info(self, info): self.module.debug("Registering properties as Ansible facts") self.module.exit_json(changed=False, ansible_facts=info) class Collector(LogicMonitor): def __init__(self, params, module=None): """Initializor for the LogicMonitor Collector object""" self.change = False self.params = params LogicMonitor.__init__(self, module, **params) self.module.debug("Instantiating Collector object") if self.params['description']: self.description = self.params['description'] else: self.description = self.fqdn self.info = self._get() self.installdir = "/usr/local/logicmonitor" self.platform = platform.system() self.is_64bits = sys.maxsize > 2**32 self.duration = self.params['duration'] self.starttime = self.params['starttime'] if self.info is None: self.id = None else: self.id = self.info["id"] def create(self): """Idempotent function to make sure that there is a running collector installed and registered""" self.module.debug("Running Collector.create...") self._create() self.get_installer_binary() self.install() def remove(self): """Idempotent function to make sure that there is not a running collector installed and registered""" self.module.debug("Running Collector.destroy...") self._unreigster() self.uninstall() def get_installer_binary(self): """Download the LogicMonitor collector installer binary""" self.module.debug("Running Collector.get_installer_binary...") arch = 32 if self.is_64bits: self.module.debug("64 bit system") arch = 64 else: self.module.debug("32 bit system") if self.platform == "Linux" and self.id is not None: self.module.debug("Platform is Linux") self.module.debug("Agent ID is " + str(self.id)) installfilepath = (self.installdir + "/logicmonitorsetup" + str(self.id) + "_" + str(arch) + ".bin") self.module.debug("Looking for existing installer at " + installfilepath) if not os.path.isfile(installfilepath): self.module.debug("No previous installer found") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.module.debug("Downloading installer file") # attempt to create the install dir before download self.module.run_command("mkdir " + self.installdir) try: installer = (self.do("logicmonitorsetup", {"id": self.id, "arch": arch})) with open(installfilepath, "w") as write_file: write_file.write(installer) except: self.fail(msg="Unable to open installer file for writing") else: self.module.debug("Collector installer already exists") return installfilepath elif self.id is None: self.fail( msg="Error: There is currently no collector " + "associated with this device. To download " + " the installer, first create a collector " + "for this device.") elif self.platform != "Linux": self.fail( msg="Error: LogicMonitor Collector must be " + "installed on a Linux device.") else: self.fail( msg="Error: Unable to retrieve the installer from the server") def install(self): """Execute the LogicMonitor installer if not already installed""" self.module.debug("Running Collector.install...") if self.platform == "Linux": self.module.debug("Platform is Linux") installer = self.get_installer_binary() if self.info is None: self.module.debug("Retrieving collector information") self.info = self._get() if not os.path.exists(self.installdir + "/agent"): self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.module.debug("Setting installer file permissions") os.chmod(installer, 484) # decimal for 0o744 self.module.debug("Executing installer") ret_code, out, err = self.module.run_command(installer + " -y") if ret_code != 0: self.fail(msg="Error: Unable to install collector: " + err) else: self.module.debug("Collector installed successfully") else: self.module.debug("Collector already installed") else: self.fail( msg="Error: LogicMonitor Collector must be " + "installed on a Linux device") def uninstall(self): """Uninstall LogicMontitor collector from the system""" self.module.debug("Running Collector.uninstall...") uninstallfile = self.installdir + "/agent/bin/uninstall.pl" if os.path.isfile(uninstallfile): self.module.debug("Collector uninstall file exists") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.module.debug("Running collector uninstaller") ret_code, out, err = self.module.run_command(uninstallfile) if ret_code != 0: self.fail( msg="Error: Unable to uninstall collector: " + err) else: self.module.debug("Collector successfully uninstalled") else: if os.path.exists(self.installdir + "/agent"): (self.fail( msg="Unable to uninstall LogicMonitor " + "Collector. Can not find LogicMonitor " + "uninstaller.")) def sdt(self): """Create a scheduled down time (maintenance window) for this host""" self.module.debug("Running Collector.sdt...") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) duration = self.duration starttime = self.starttime offsetstart = starttime if starttime: self.module.debug("Start time specified") start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') offsetstart = start else: self.module.debug("No start time specified. Using default.") start = datetime.datetime.utcnow() # Use user UTC offset self.module.debug("Making RPC call to 'getTimeZoneSetting'") accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) if accountresp["status"] == 200: self.module.debug("RPC call succeeded") offset = accountresp["data"]["offset"] offsetstart = start + datetime.timedelta(0, offset) else: self.fail(msg="Error: Unable to retrieve timezone offset") offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60) h = {"agentId": self.id, "type": 1, "notifyCC": True, "year": offsetstart.year, "month": offsetstart.month - 1, "day": offsetstart.day, "hour": offsetstart.hour, "minute": offsetstart.minute, "endYear": offsetend.year, "endMonth": offsetend.month - 1, "endDay": offsetend.day, "endHour": offsetend.hour, "endMinute": offsetend.minute} self.module.debug("Making RPC call to 'setAgentSDT'") resp = json.loads(self.rpc("setAgentSDT", h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"] else: self.module.debug("RPC call failed") self.fail(msg=resp["errmsg"]) def site_facts(self): """Output current properties information for the Collector""" self.module.debug("Running Collector.site_facts...") if self.info: self.module.debug("Collector exists") props = self.get_properties(True) self.output_info(props) else: self.fail(msg="Error: Collector doesn't exit.") def _get(self): """Returns a JSON object representing this collector""" self.module.debug("Running Collector._get...") collector_list = self.get_collectors() if collector_list is not None: self.module.debug("Collectors returned") for collector in collector_list: if collector["description"] == self.description: return collector else: self.module.debug("No collectors returned") return None def _create(self): """Create a new collector in the associated LogicMonitor account""" self.module.debug("Running Collector._create...") if self.platform == "Linux": self.module.debug("Platform is Linux") ret = self.info or self._get() if ret is None: self.change = True self.module.debug("System changed") if self.check_mode: self.exit(changed=True) h = {"autogen": True, "description": self.description} self.module.debug("Making RPC call to 'addAgent'") create = (json.loads(self.rpc("addAgent", h))) if create["status"] is 200: self.module.debug("RPC call succeeded") self.info = create["data"] self.id = create["data"]["id"] return create["data"] else: self.fail(msg=create["errmsg"]) else: self.info = ret self.id = ret["id"] return ret else: self.fail( msg="Error: LogicMonitor Collector must be " + "installed on a Linux device.") def _unreigster(self): """Delete this collector from the associated LogicMonitor account""" self.module.debug("Running Collector._unreigster...") if self.info is None: self.module.debug("Retrieving collector information") self.info = self._get() if self.info is not None: self.module.debug("Collector found") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.module.debug("Making RPC call to 'deleteAgent'") delete = json.loads(self.rpc("deleteAgent", {"id": self.id})) if delete["status"] is 200: self.module.debug("RPC call succeeded") return delete else: # The collector couldn't unregister. Start the service again self.module.debug("Error unregistering collecting. " + delete["errmsg"]) self.fail(msg=delete["errmsg"]) else: self.module.debug("Collector not found") return None class Host(LogicMonitor): def __init__(self, params, module=None): """Initializor for the LogicMonitor host object""" self.change = False self.params = params self.collector = None LogicMonitor.__init__(self, module, **self.params) self.module.debug("Instantiating Host object") if self.params["hostname"]: self.module.debug("Hostname is " + self.params["hostname"]) self.hostname = self.params['hostname'] else: self.module.debug("No hostname specified. Using " + self.fqdn) self.hostname = self.fqdn if self.params["displayname"]: self.module.debug("Display name is " + self.params["displayname"]) self.displayname = self.params['displayname'] else: self.module.debug("No display name specified. Using " + self.fqdn) self.displayname = self.fqdn # Attempt to host information via display name of host name self.module.debug("Attempting to find host by displayname " + self.displayname) info = self.get_host_by_displayname(self.displayname) if info is not None: self.module.debug("Host found by displayname") # Used the host information to grab the collector description # if not provided if (not hasattr(self.params, "collector") and "agentDescription" in info): self.module.debug("Setting collector from host response. " + "Collector " + info["agentDescription"]) self.params["collector"] = info["agentDescription"] else: self.module.debug("Host not found by displayname") # At this point, a valid collector description is required for success # Check that the description exists or fail if self.params["collector"]: self.module.debug( "Collector specified is " + self.params["collector"] ) self.collector = (self.get_collector_by_description( self.params["collector"])) else: self.fail(msg="No collector specified.") # If the host wasn't found via displayname, attempt by hostname if info is None: self.module.debug("Attempting to find host by hostname " + self.hostname) info = self.get_host_by_hostname(self.hostname, self.collector) self.info = info self.properties = self.params["properties"] self.description = self.params["description"] self.starttime = self.params["starttime"] self.duration = self.params["duration"] self.alertenable = self.params["alertenable"] if self.params["groups"] is not None: self.groups = self._strip_groups(self.params["groups"]) else: self.groups = None def create(self): """Idemopotent function to create if missing, update if changed, or skip""" self.module.debug("Running Host.create...") self.update() def get_properties(self): """Returns a hash of the properties associated with this LogicMonitor host""" self.module.debug("Running Host.get_properties...") if self.info: self.module.debug("Making RPC call to 'getHostProperties'") properties_json = (json.loads(self.rpc("getHostProperties", {'hostId': self.info["id"], "filterSystemProperties": True}))) if properties_json["status"] == 200: self.module.debug("RPC call succeeded") return properties_json["data"] else: self.module.debug("Error: there was an issue retrieving the " + "host properties") self.module.debug(properties_json["errmsg"]) self.fail(msg=properties_json["status"]) else: self.module.debug( "Unable to find LogicMonitor host which matches " + self.displayname + " (" + self.hostname + ")" ) return None def set_properties(self, propertyhash): """update the host to have the properties contained in the property hash""" self.module.debug("Running Host.set_properties...") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.module.debug("Assigning property hash to host object") self.properties = propertyhash def add(self): """Add this device to monitoring in your LogicMonitor account""" self.module.debug("Running Host.add...") if self.collector and not self.info: self.module.debug("Host not registered. Registering.") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) h = self._build_host_hash( self.hostname, self.displayname, self.collector, self.description, self.groups, self.properties, self.alertenable) self.module.debug("Making RPC call to 'addHost'") resp = json.loads(self.rpc("addHost", h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"] else: self.module.debug("RPC call failed") self.module.debug(resp) return resp["errmsg"] elif self.collector is None: self.fail(msg="Specified collector doesn't exist") else: self.module.debug("Host already registered") def update(self): """This method takes changes made to this host and applies them to the corresponding host in your LogicMonitor account.""" self.module.debug("Running Host.update...") if self.info: self.module.debug("Host already registed") if self.is_changed(): self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) h = (self._build_host_hash( self.hostname, self.displayname, self.collector, self.description, self.groups, self.properties, self.alertenable)) h["id"] = self.info["id"] h["opType"] = "replace" self.module.debug("Making RPC call to 'updateHost'") resp = json.loads(self.rpc("updateHost", h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") else: self.module.debug("RPC call failed") self.fail(msg="Error: unable to update the host.") else: self.module.debug( "Host properties match supplied properties. " + "No changes to make." ) return self.info else: self.module.debug("Host not registed. Registering") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) return self.add() def remove(self): """Remove this host from your LogicMonitor account""" self.module.debug("Running Host.remove...") if self.info: self.module.debug("Host registered") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.module.debug("Making RPC call to 'deleteHost'") resp = json.loads(self.rpc("deleteHost", {"hostId": self.info["id"], "deleteFromSystem": True, "hostGroupId": 1})) if resp["status"] == 200: self.module.debug(resp) self.module.debug("RPC call succeeded") return resp else: self.module.debug("RPC call failed") self.module.debug(resp) self.fail(msg=resp["errmsg"]) else: self.module.debug("Host not registered") def is_changed(self): """Return true if the host doesn't match the LogicMonitor account""" self.module.debug("Running Host.is_changed") ignore = ['system.categories', 'snmp.version'] hostresp = self.get_host_by_displayname(self.displayname) if hostresp is None: hostresp = self.get_host_by_hostname(self.hostname, self.collector) if hostresp: self.module.debug("Comparing simple host properties") if hostresp["alertEnable"] != self.alertenable: return True if hostresp["description"] != self.description: return True if hostresp["displayedAs"] != self.displayname: return True if (self.collector and hasattr(self.collector, "id") and hostresp["agentId"] != self.collector["id"]): return True self.module.debug("Comparing groups.") if self._compare_groups(hostresp) is True: return True propresp = self.get_properties() if propresp: self.module.debug("Comparing properties.") if self._compare_props(propresp, ignore) is True: return True else: self.fail( msg="Error: Unknown error retrieving host properties") return False else: self.fail(msg="Error: Unknown error retrieving host information") def sdt(self): """Create a scheduled down time (maintenance window) for this host""" self.module.debug("Running Host.sdt...") if self.info: self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) duration = self.duration starttime = self.starttime offset = starttime if starttime: self.module.debug("Start time specified") start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') offsetstart = start else: self.module.debug("No start time specified. Using default.") start = datetime.datetime.utcnow() # Use user UTC offset self.module.debug("Making RPC call to 'getTimeZoneSetting'") accountresp = (json.loads(self.rpc("getTimeZoneSetting", {}))) if accountresp["status"] == 200: self.module.debug("RPC call succeeded") offset = accountresp["data"]["offset"] offsetstart = start + datetime.timedelta(0, offset) else: self.fail( msg="Error: Unable to retrieve timezone offset") offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60) h = {"hostId": self.info["id"], "type": 1, "year": offsetstart.year, "month": offsetstart.month - 1, "day": offsetstart.day, "hour": offsetstart.hour, "minute": offsetstart.minute, "endYear": offsetend.year, "endMonth": offsetend.month - 1, "endDay": offsetend.day, "endHour": offsetend.hour, "endMinute": offsetend.minute} self.module.debug("Making RPC call to 'setHostSDT'") resp = (json.loads(self.rpc("setHostSDT", h))) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"] else: self.module.debug("RPC call failed") self.fail(msg=resp["errmsg"]) else: self.fail(msg="Error: Host doesn't exit.") def site_facts(self): """Output current properties information for the Host""" self.module.debug("Running Host.site_facts...") if self.info: self.module.debug("Host exists") props = self.get_properties() self.output_info(props) else: self.fail(msg="Error: Host doesn't exit.") def _build_host_hash(self, hostname, displayname, collector, description, groups, properties, alertenable): """Return a property formatted hash for the creation of a host using the rpc function""" self.module.debug("Running Host._build_host_hash...") h = {} h["hostName"] = hostname h["displayedAs"] = displayname h["alertEnable"] = alertenable if collector: self.module.debug("Collector property exists") h["agentId"] = collector["id"] else: self.fail( msg="Error: No collector found. Unable to build host hash.") if description: h["description"] = description if groups is not None and groups is not []: self.module.debug("Group property exists") groupids = "" for group in groups: groupids = groupids + str(self.create_group(group)) + "," h["hostGroupIds"] = groupids.rstrip(',') if properties is not None and properties is not {}: self.module.debug("Properties hash exists") propnum = 0 for key, value in properties.items(): h["propName" + str(propnum)] = key h["propValue" + str(propnum)] = value propnum = propnum + 1 return h def _verify_property(self, propname): """Check with LogicMonitor server to verify property is unchanged""" self.module.debug("Running Host._verify_property...") if self.info: self.module.debug("Host is registered") if propname not in self.properties: self.module.debug("Property " + propname + " does not exist") return False else: self.module.debug("Property " + propname + " exists") h = {"hostId": self.info["id"], "propName0": propname, "propValue0": self.properties[propname]} self.module.debug("Making RCP call to 'verifyProperties'") resp = json.loads(self.rpc('verifyProperties', h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"]["match"] else: self.fail( msg="Error: unable to get verification " + "from server.\n%s" % resp["errmsg"]) else: self.fail( msg="Error: Host doesn't exist. Unable to verify properties") def _compare_groups(self, hostresp): """Function to compare the host's current groups against provided groups""" self.module.debug("Running Host._compare_groups") g = [] fullpathinids = hostresp["fullPathInIds"] self.module.debug("Building list of groups") for path in fullpathinids: if path != []: h = {'hostGroupId': path[-1]} hgresp = json.loads(self.rpc("getHostGroup", h)) if (hgresp["status"] == 200 and hgresp["data"]["appliesTo"] == ""): g.append(path[-1]) if self.groups is not None: self.module.debug("Comparing group lists") for group in self.groups: groupjson = self.get_group(group) if groupjson is None: self.module.debug("Group mismatch. No result.") return True elif groupjson['id'] not in g: self.module.debug("Group mismatch. ID doesn't exist.") return True else: g.remove(groupjson['id']) if g != []: self.module.debug("Group mismatch. New ID exists.") return True self.module.debug("Groups match") def _compare_props(self, propresp, ignore): """Function to compare the host's current properties against provided properties""" self.module.debug("Running Host._compare_props...") p = {} self.module.debug("Creating list of properties") for prop in propresp: if prop["name"] not in ignore: if ("*******" in prop["value"] and self._verify_property(prop["name"])): p[prop["name"]] = self.properties[prop["name"]] else: p[prop["name"]] = prop["value"] self.module.debug("Comparing properties") # Iterate provided properties and compare to received properties for prop in self.properties: if (prop not in p or p[prop] != self.properties[prop]): self.module.debug("Properties mismatch") return True self.module.debug("Properties match") def _strip_groups(self, groups): """Function to strip whitespace from group list. This function provides the user some flexibility when formatting group arguments """ self.module.debug("Running Host._strip_groups...") return map(lambda x: x.strip(), groups) class Datasource(LogicMonitor): def __init__(self, params, module=None): """Initializor for the LogicMonitor Datasource object""" self.change = False self.params = params LogicMonitor.__init__(self, module, **params) self.module.debug("Instantiating Datasource object") self.id = self.params["id"] self.starttime = self.params["starttime"] self.duration = self.params["duration"] def sdt(self): """Create a scheduled down time (maintenance window) for this host""" self.module.debug("Running Datasource.sdt...") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) duration = self.duration starttime = self.starttime offsetstart = starttime if starttime: self.module.debug("Start time specified") start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') offsetstart = start else: self.module.debug("No start time specified. Using default.") start = datetime.datetime.utcnow() # Use user UTC offset self.module.debug("Making RPC call to 'getTimeZoneSetting'") accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) if accountresp["status"] == 200: self.module.debug("RPC call succeeded") offset = accountresp["data"]["offset"] offsetstart = start + datetime.timedelta(0, offset) else: self.fail(msg="Error: Unable to retrieve timezone offset") offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60) h = {"hostDataSourceId": self.id, "type": 1, "notifyCC": True, "year": offsetstart.year, "month": offsetstart.month - 1, "day": offsetstart.day, "hour": offsetstart.hour, "minute": offsetstart.minute, "endYear": offsetend.year, "endMonth": offsetend.month - 1, "endDay": offsetend.day, "endHour": offsetend.hour, "endMinute": offsetend.minute} self.module.debug("Making RPC call to 'setHostDataSourceSDT'") resp = json.loads(self.rpc("setHostDataSourceSDT", h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"] else: self.module.debug("RPC call failed") self.fail(msg=resp["errmsg"]) class Hostgroup(LogicMonitor): def __init__(self, params, module=None): """Initializor for the LogicMonitor host object""" self.change = False self.params = params LogicMonitor.__init__(self, module, **self.params) self.module.debug("Instantiating Hostgroup object") self.fullpath = self.params["fullpath"] self.info = self.get_group(self.fullpath) self.properties = self.params["properties"] self.description = self.params["description"] self.starttime = self.params["starttime"] self.duration = self.params["duration"] self.alertenable = self.params["alertenable"] def create(self): """Wrapper for self.update()""" self.module.debug("Running Hostgroup.create...") self.update() def get_properties(self, final=False): """Returns a hash of the properties associated with this LogicMonitor host""" self.module.debug("Running Hostgroup.get_properties...") if self.info: self.module.debug("Group found") self.module.debug("Making RPC call to 'getHostGroupProperties'") properties_json = json.loads(self.rpc( "getHostGroupProperties", {'hostGroupId': self.info["id"], "finalResult": final})) if properties_json["status"] == 200: self.module.debug("RPC call succeeded") return properties_json["data"] else: self.module.debug("RPC call failed") self.fail(msg=properties_json["status"]) else: self.module.debug("Group not found") return None def set_properties(self, propertyhash): """Update the host to have the properties contained in the property hash""" self.module.debug("Running Hostgroup.set_properties") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.module.debug("Assigning property has to host object") self.properties = propertyhash def add(self): """Idempotent function to ensure that the host group exists in your LogicMonitor account""" self.module.debug("Running Hostgroup.add") if self.info is None: self.module.debug("Group doesn't exist. Creating.") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.create_group(self.fullpath) self.info = self.get_group(self.fullpath) self.module.debug("Group created") return self.info else: self.module.debug("Group already exists") def update(self): """Idempotent function to ensure the host group settings (alertenable, properties, etc) in the LogicMonitor account match the current object.""" self.module.debug("Running Hostgroup.update") if self.info: if self.is_changed(): self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) h = self._build_host_group_hash( self.fullpath, self.description, self.properties, self.alertenable) h["opType"] = "replace" if self.fullpath != "/": h["id"] = self.info["id"] self.module.debug("Making RPC call to 'updateHostGroup'") resp = json.loads(self.rpc("updateHostGroup", h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"] else: self.module.debug("RPC call failed") self.fail(msg="Error: Unable to update the " + "host.\n" + resp["errmsg"]) else: self.module.debug( "Group properties match supplied properties. " + "No changes to make" ) return self.info else: self.module.debug("Group doesn't exist. Creating.") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) return self.add() def remove(self): """Idempotent function to ensure the host group does not exist in your LogicMonitor account""" self.module.debug("Running Hostgroup.remove...") if self.info: self.module.debug("Group exists") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) self.module.debug("Making RPC call to 'deleteHostGroup'") resp = json.loads(self.rpc("deleteHostGroup", {"hgId": self.info["id"]})) if resp["status"] == 200: self.module.debug(resp) self.module.debug("RPC call succeeded") return resp elif resp["errmsg"] == "No such group": self.module.debug("Group doesn't exist") else: self.module.debug("RPC call failed") self.module.debug(resp) self.fail(msg=resp["errmsg"]) else: self.module.debug("Group doesn't exist") def is_changed(self): """Return true if the host doesn't match the LogicMonitor account""" self.module.debug("Running Hostgroup.is_changed...") ignore = [] group = self.get_group(self.fullpath) properties = self.get_properties() if properties is not None and group is not None: self.module.debug("Comparing simple group properties") if (group["alertEnable"] != self.alertenable or group["description"] != self.description): return True p = {} self.module.debug("Creating list of properties") for prop in properties: if prop["name"] not in ignore: if ("*******" in prop["value"] and self._verify_property(prop["name"])): p[prop["name"]] = ( self.properties[prop["name"]]) else: p[prop["name"]] = prop["value"] self.module.debug("Comparing properties") if set(p) != set(self.properties): return True else: self.module.debug("No property information received") return False def sdt(self, duration=30, starttime=None): """Create a scheduled down time (maintenance window) for this host""" self.module.debug("Running Hostgroup.sdt") self.module.debug("System changed") self.change = True if self.check_mode: self.exit(changed=True) duration = self.duration starttime = self.starttime offset = starttime if starttime: self.module.debug("Start time specified") start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') offsetstart = start else: self.module.debug("No start time specified. Using default.") start = datetime.datetime.utcnow() # Use user UTC offset self.module.debug("Making RPC call to 'getTimeZoneSetting'") accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) if accountresp["status"] == 200: self.module.debug("RPC call succeeded") offset = accountresp["data"]["offset"] offsetstart = start + datetime.timedelta(0, offset) else: self.fail( msg="Error: Unable to retrieve timezone offset") offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60) h = {"hostGroupId": self.info["id"], "type": 1, "year": offsetstart.year, "month": offsetstart.month - 1, "day": offsetstart.day, "hour": offsetstart.hour, "minute": offsetstart.minute, "endYear": offsetend.year, "endMonth": offsetend.month - 1, "endDay": offsetend.day, "endHour": offsetend.hour, "endMinute": offsetend.minute} self.module.debug("Making RPC call to setHostGroupSDT") resp = json.loads(self.rpc("setHostGroupSDT", h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"] else: self.module.debug("RPC call failed") self.fail(msg=resp["errmsg"]) def site_facts(self): """Output current properties information for the Hostgroup""" self.module.debug("Running Hostgroup.site_facts...") if self.info: self.module.debug("Group exists") props = self.get_properties(True) self.output_info(props) else: self.fail(msg="Error: Group doesn't exit.") def _build_host_group_hash(self, fullpath, description, properties, alertenable): """Return a property formatted hash for the creation of a hostgroup using the rpc function""" self.module.debug("Running Hostgroup._build_host_hash") h = {} h["alertEnable"] = alertenable if fullpath == "/": self.module.debug("Group is root") h["id"] = 1 else: self.module.debug("Determining group path") parentpath, name = fullpath.rsplit('/', 1) parent = self.get_group(parentpath) h["name"] = name if parent: self.module.debug("Parent group " + str(parent["id"]) + " found.") h["parentID"] = parent["id"] else: self.module.debug("No parent group found. Using root.") h["parentID"] = 1 if description: self.module.debug("Description property exists") h["description"] = description if properties != {}: self.module.debug("Properties hash exists") propnum = 0 for key, value in properties.items(): h["propName" + str(propnum)] = key h["propValue" + str(propnum)] = value propnum = propnum + 1 return h def _verify_property(self, propname): """Check with LogicMonitor server to verify property is unchanged""" self.module.debug("Running Hostgroup._verify_property") if self.info: self.module.debug("Group exists") if propname not in self.properties: self.module.debug("Property " + propname + " does not exist") return False else: self.module.debug("Property " + propname + " exists") h = {"hostGroupId": self.info["id"], "propName0": propname, "propValue0": self.properties[propname]} self.module.debug("Making RCP call to 'verifyProperties'") resp = json.loads(self.rpc('verifyProperties', h)) if resp["status"] == 200: self.module.debug("RPC call succeeded") return resp["data"]["match"] else: self.fail( msg="Error: unable to get verification " + "from server.\n%s" % resp["errmsg"]) else: self.fail( msg="Error: Group doesn't exist. Unable to verify properties") def selector(module): """Figure out which object and which actions to take given the right parameters""" if module.params["target"] == "collector": target = Collector(module.params, module) elif module.params["target"] == "host": # Make sure required parameter collector is specified if ((module.params["action"] == "add" or module.params["displayname"] is None) and module.params["collector"] is None): module.fail_json( msg="Parameter 'collector' required.") target = Host(module.params, module) elif module.params["target"] == "datasource": # Validate target specific required parameters if module.params["id"] is not None: # make sure a supported action was specified if module.params["action"] == "sdt": target = Datasource(module.params, module) else: errmsg = ("Error: Unexpected action \"" + module.params["action"] + "\" was specified.") module.fail_json(msg=errmsg) elif module.params["target"] == "hostgroup": # Validate target specific required parameters if module.params["fullpath"] is not None: target = Hostgroup(module.params, module) else: module.fail_json( msg="Parameter 'fullpath' required for target 'hostgroup'") else: module.fail_json( msg="Error: Unexpected target \"" + module.params["target"] + "\" was specified.") if module.params["action"].lower() == "add": action = target.create elif module.params["action"].lower() == "remove": action = target.remove elif module.params["action"].lower() == "sdt": action = target.sdt elif module.params["action"].lower() == "update": action = target.update else: errmsg = ("Error: Unexpected action \"" + module.params["action"] + "\" was specified.") module.fail_json(msg=errmsg) action() module.exit_json(changed=target.change) def main(): TARGETS = [ "collector", "host", "datasource", "hostgroup"] ACTIONS = [ "add", "remove", "sdt", "update"] module = AnsibleModule( argument_spec=dict( target=dict(required=True, default=None, choices=TARGETS), action=dict(required=True, default=None, choices=ACTIONS), company=dict(required=True, default=None), user=dict(required=True, default=None), password=dict(required=True, default=None, no_log=True), collector=dict(required=False, default=None), hostname=dict(required=False, default=None), displayname=dict(required=False, default=None), id=dict(required=False, default=None), description=dict(required=False, default=""), fullpath=dict(required=False, default=None), starttime=dict(required=False, default=None), duration=dict(required=False, default=30), properties=dict(required=False, default={}, type="dict"), groups=dict(required=False, default=[], type="list"), alertenable=dict(required=False, default="true", type="bool") ), supports_check_mode=True ) if HAS_LIB_JSON is not True: module.fail_json(msg="Unable to load JSON library") selector(module) if __name__ == "__main__": main()
gpl-3.0
HaoboGu/Structure-Similarity
Drugbank.py
1
6376
import random import numpy as np import time from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score def read_drugbank_data(): # read interaction data interaction_file = open('data/interacts.csv') interact_dict = {} line = interaction_file.readline() while line: db_id1, db_id2, interact_level = line[0:-1].split('\t') interact_dict[db_id1, db_id2] = int(interact_level) # use multiple keys line = interaction_file.readline() interaction_file.close() # read similarity data similarity_file = open('data/chemicalsimilarity.csv') similarity_dict = {} line = similarity_file.readline() while line: db_id1, db_id2, similarity = line[0:-1].split('\t') similarity_dict[db_id1, db_id2] = float(similarity) line = similarity_file.readline() similarity_file.close() return interact_dict, similarity_dict class Validation: def __init__(self, interact_dict, similarity_dict): self.interaction = interact_dict self.similarity = similarity_dict self.train_set = {} self.validation_set = {} self.sim_link = {} self.positive_train = {} self.max_sim_with_positive_link = {} self.max_sim_with_positive_link_for_val = {} def divide_data(self): self.train_set = {} self.validation_set = {} index = random.sample(range(0, 9892), 989) # randomly select 1/10 interactions as test_set flag = 0 for i in self.interaction: if flag in index: self.validation_set[i] = self.interaction[i] else: self.train_set[i] = self.interaction[i] flag += 1 # create known ddi dict: for key in self.train_set: if self.train_set[key] == 1: self.positive_train[key] = 1 def compute_link_sim(self, key1, key2): link_sim1 = (self.similarity[key1[0], key2[0]] + self.similarity[key1[1], key2[1]]) / 2.0 link_sim2 = (self.similarity[key1[0], key2[1]] + self.similarity[key1[1], key2[0]]) / 2.0 return max(link_sim1, link_sim2) def create_simlink(self): self.sim_link = {} # num = 1 for inter_key in self.train_set: max_link_sim = 0 for inter_key2 in self.positive_train: if inter_key[0] in inter_key2 and inter_key[1] in inter_key2: continue else: link_sim = self.compute_link_sim(inter_key, inter_key2) if link_sim > max_link_sim: max_link_sim = link_sim self.sim_link[inter_key] = inter_key2 self.max_sim_with_positive_link[inter_key] = max_link_sim # print('iter', num) # num += 1 def create_simlink_for_val(self): self.sim_link = {} # num = 1 for inter_key in self.validation_set: max_link_sim = 0 for inter_key2 in self.positive_train: if inter_key[0] in inter_key2 and inter_key[1] in inter_key2: continue else: link_sim = self.compute_link_sim(inter_key, inter_key2) if link_sim > max_link_sim: max_link_sim = link_sim # self.sim_link[inter_key] = inter_key2 self.max_sim_with_positive_link_for_val[inter_key] = max_link_sim sim_list = [] inter_list = [] for inter_key in self.validation_set: feature = self.max_sim_with_positive_link_for_val[inter_key] sim_list.append(feature) inter_list.append(self.validation_set[inter_key]) return sim_list, inter_list def create_train_array(self): sim_list = [] inter_list = [] num = 0 for inter_key in self.train_set: if self.train_set[inter_key] == 1: feature = self.max_sim_with_positive_link[inter_key] sim_list.append(feature) inter_list.append(self.train_set[inter_key]) num += 1 print('num of positive samples in train set: ', num) num = num * 3 for inter_key in self.train_set: if self.train_set[inter_key] == 0: feature = self.max_sim_with_positive_link[inter_key] sim_list.append(feature) inter_list.append(self.train_set[inter_key]) num = num - 1 if num == 0: break return sim_list, inter_list def lr(self, sim_list, inter_list): lr = LogisticRegression(solver='sag') sim_list = np.array(sim_list) sim_list = sim_list.reshape(sim_list.shape[0], 1) inter_list = np.array(inter_list) inter_list = inter_list.reshape(inter_list.shape[0], 1) lr.fit(sim_list, inter_list) val_sim, val_inter = self.create_simlink_for_val() val_sim = np.array(val_sim) val_sim = val_sim.reshape(val_sim.shape[0], 1) val_inter = np.array(val_inter).reshape(val_inter.__len__(), 1) result = lr.predict(val_sim) prob_re = lr.predict_proba(val_sim) prob_re = prob_re.transpose() auroc = roc_auc_score(val_inter, prob_re[1]) print('roc score:', auroc) return result, prob_re, val_inter start = time.time() interact_dict, sim_dict = read_drugbank_data() v = Validation(interact_dict, sim_dict) v.divide_data() v.create_simlink() sim_list, inter_list = v.create_train_array() result, prob_re, val_inter = v.lr(sim_list, inter_list) TP = 0 # predict 1, actual 1 FP = 0 # predict 1, actual 0 TN = 0 # predict 0, actual 0 FN = 0 # predict 0, actual 1 for i in range(0, 989): if result[i] == 0 and result[i] == 0: TN += 1 elif result[i] == 0 and val_inter[i] == 1: FN += 1 elif result[i] == 1 and val_inter[i] == 0: FP += 1 elif result[i] == 1 and val_inter[i] == 1: TP += 1 print('tp:', TP, ' tn:', TN, ' fp:', FP, ' fn:', FN) precision = TP / (TP + FP) recall = TP / (TP + FN) print('precision:', precision) print('recall:', recall) print('f-score: ', 2 * precision * recall / (precision + recall)) end = time.time() print(end-start)
mit
Smarsh/django
django/utils/regex_helper.py
361
12079
""" Functions for reversing a regular expression (used in reverse URL resolving). Used internally by Django and not intended for external use. This is not, and is not intended to be, a complete reg-exp decompiler. It should be good enough for a large class of URLS, however. """ # Mapping of an escape character to a representative of that class. So, e.g., # "\w" is replaced by "x" in a reverse URL. A value of None means to ignore # this sequence. Any missing key is mapped to itself. ESCAPE_MAPPINGS = { "A": None, "b": None, "B": None, "d": u"0", "D": u"x", "s": u" ", "S": u"x", "w": u"x", "W": u"!", "Z": None, } class Choice(list): """ Used to represent multiple possibilities at this point in a pattern string. We use a distinguished type, rather than a list, so that the usage in the code is clear. """ class Group(list): """ Used to represent a capturing group in the pattern string. """ class NonCapture(list): """ Used to represent a non-capturing group in the pattern string. """ def normalize(pattern): """ Given a reg-exp pattern, normalizes it to a list of forms that suffice for reverse matching. This does the following: (1) For any repeating sections, keeps the minimum number of occurrences permitted (this means zero for optional groups). (2) If an optional group includes parameters, include one occurrence of that group (along with the zero occurrence case from step (1)). (3) Select the first (essentially an arbitrary) element from any character class. Select an arbitrary character for any unordered class (e.g. '.' or '\w') in the pattern. (5) Ignore comments and any of the reg-exp flags that won't change what we construct ("iLmsu"). "(?x)" is an error, however. (6) Raise an error on all other non-capturing (?...) forms (e.g. look-ahead and look-behind matches) and any disjunctive ('|') constructs. Django's URLs for forward resolving are either all positional arguments or all keyword arguments. That is assumed here, as well. Although reverse resolving can be done using positional args when keyword args are specified, the two cannot be mixed in the same reverse() call. """ # Do a linear scan to work out the special features of this pattern. The # idea is that we scan once here and collect all the information we need to # make future decisions. result = [] non_capturing_groups = [] consume_next = True pattern_iter = next_char(iter(pattern)) num_args = 0 # A "while" loop is used here because later on we need to be able to peek # at the next character and possibly go around without consuming another # one at the top of the loop. try: ch, escaped = pattern_iter.next() except StopIteration: return zip([u''], [[]]) try: while True: if escaped: result.append(ch) elif ch == '.': # Replace "any character" with an arbitrary representative. result.append(u".") elif ch == '|': # FIXME: One day we'll should do this, but not in 1.0. raise NotImplementedError elif ch == "^": pass elif ch == '$': break elif ch == ')': # This can only be the end of a non-capturing group, since all # other unescaped parentheses are handled by the grouping # section later (and the full group is handled there). # # We regroup everything inside the capturing group so that it # can be quantified, if necessary. start = non_capturing_groups.pop() inner = NonCapture(result[start:]) result = result[:start] + [inner] elif ch == '[': # Replace ranges with the first character in the range. ch, escaped = pattern_iter.next() result.append(ch) ch, escaped = pattern_iter.next() while escaped or ch != ']': ch, escaped = pattern_iter.next() elif ch == '(': # Some kind of group. ch, escaped = pattern_iter.next() if ch != '?' or escaped: # A positional group name = "_%d" % num_args num_args += 1 result.append(Group(((u"%%(%s)s" % name), name))) walk_to_end(ch, pattern_iter) else: ch, escaped = pattern_iter.next() if ch in "iLmsu#": # All of these are ignorable. Walk to the end of the # group. walk_to_end(ch, pattern_iter) elif ch == ':': # Non-capturing group non_capturing_groups.append(len(result)) elif ch != 'P': # Anything else, other than a named group, is something # we cannot reverse. raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch) else: ch, escaped = pattern_iter.next() if ch != '<': raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch) # We are in a named capturing group. Extra the name and # then skip to the end. name = [] ch, escaped = pattern_iter.next() while ch != '>': name.append(ch) ch, escaped = pattern_iter.next() param = ''.join(name) result.append(Group(((u"%%(%s)s" % param), param))) walk_to_end(ch, pattern_iter) elif ch in "*?+{": # Quanitifers affect the previous item in the result list. count, ch = get_quantifier(ch, pattern_iter) if ch: # We had to look ahead, but it wasn't need to compute the # quanitifer, so use this character next time around the # main loop. consume_next = False if count == 0: if contains(result[-1], Group): # If we are quantifying a capturing group (or # something containing such a group) and the minimum is # zero, we must also handle the case of one occurrence # being present. All the quantifiers (except {0,0}, # which we conveniently ignore) that have a 0 minimum # also allow a single occurrence. result[-1] = Choice([None, result[-1]]) else: result.pop() elif count > 1: result.extend([result[-1]] * (count - 1)) else: # Anything else is a literal. result.append(ch) if consume_next: ch, escaped = pattern_iter.next() else: consume_next = True except StopIteration: pass except NotImplementedError: # A case of using the disjunctive form. No results for you! return zip([u''], [[]]) return zip(*flatten_result(result)) def next_char(input_iter): """ An iterator that yields the next character from "pattern_iter", respecting escape sequences. An escaped character is replaced by a representative of its class (e.g. \w -> "x"). If the escaped character is one that is skipped, it is not returned (the next character is returned instead). Yields the next character, along with a boolean indicating whether it is a raw (unescaped) character or not. """ for ch in input_iter: if ch != '\\': yield ch, False continue ch = input_iter.next() representative = ESCAPE_MAPPINGS.get(ch, ch) if representative is None: continue yield representative, True def walk_to_end(ch, input_iter): """ The iterator is currently inside a capturing group. We want to walk to the close of this group, skipping over any nested groups and handling escaped parentheses correctly. """ if ch == '(': nesting = 1 else: nesting = 0 for ch, escaped in input_iter: if escaped: continue elif ch == '(': nesting += 1 elif ch == ')': if not nesting: return nesting -= 1 def get_quantifier(ch, input_iter): """ Parse a quantifier from the input, where "ch" is the first character in the quantifier. Returns the minimum number of occurences permitted by the quantifier and either None or the next character from the input_iter if the next character is not part of the quantifier. """ if ch in '*?+': try: ch2, escaped = input_iter.next() except StopIteration: ch2 = None if ch2 == '?': ch2 = None if ch == '+': return 1, ch2 return 0, ch2 quant = [] while ch != '}': ch, escaped = input_iter.next() quant.append(ch) quant = quant[:-1] values = ''.join(quant).split(',') # Consume the trailing '?', if necessary. try: ch, escaped = input_iter.next() except StopIteration: ch = None if ch == '?': ch = None return int(values[0]), ch def contains(source, inst): """ Returns True if the "source" contains an instance of "inst". False, otherwise. """ if isinstance(source, inst): return True if isinstance(source, NonCapture): for elt in source: if contains(elt, inst): return True return False def flatten_result(source): """ Turns the given source sequence into a list of reg-exp possibilities and their arguments. Returns a list of strings and a list of argument lists. Each of the two lists will be of the same length. """ if source is None: return [u''], [[]] if isinstance(source, Group): if source[1] is None: params = [] else: params = [source[1]] return [source[0]], [params] result = [u''] result_args = [[]] pos = last = 0 for pos, elt in enumerate(source): if isinstance(elt, basestring): continue piece = u''.join(source[last:pos]) if isinstance(elt, Group): piece += elt[0] param = elt[1] else: param = None last = pos + 1 for i in range(len(result)): result[i] += piece if param: result_args[i].append(param) if isinstance(elt, (Choice, NonCapture)): if isinstance(elt, NonCapture): elt = [elt] inner_result, inner_args = [], [] for item in elt: res, args = flatten_result(item) inner_result.extend(res) inner_args.extend(args) new_result = [] new_args = [] for item, args in zip(result, result_args): for i_item, i_args in zip(inner_result, inner_args): new_result.append(item + i_item) new_args.append(args[:] + i_args) result = new_result result_args = new_args if pos >= last: piece = u''.join(source[last:]) for i in range(len(result)): result[i] += piece return result, result_args
bsd-3-clause
jcftang/ansible
test/units/module_utils/test_eos.py
7
4233
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleModuleExit from ansible.module_utils import eos fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except: pass fixture_data[path] = data return data class TestEosModuleUtil(unittest.TestCase): def setUp(self): eos._DEVICE_CONFIGS = {} def test_eos_get_config(self): mock_module = MagicMock(name='AnsibleModule') mock_module.exec_command.return_value = (0, ' sample config\n', '') self.assertFalse('show running-config' in eos._DEVICE_CONFIGS) out = eos.get_config(mock_module) self.assertEqual(out, 'sample config') self.assertTrue('show running-config' in eos._DEVICE_CONFIGS) self.assertEqual(eos._DEVICE_CONFIGS['show running-config'], 'sample config') def test_eos_get_config_cached(self): mock_module = MagicMock(name='AnsibleModule') mock_module.exec_command.return_value = (0, ' sample config\n', '') eos._DEVICE_CONFIGS['show running-config'] = 'different config' out = eos.get_config(mock_module) self.assertEqual(out, 'different config') self.assertFalse(mock_module.exec_command.called) def test_eos_get_config_error(self): mock_module = MagicMock(name='AnsibleModule') mock_module.exec_command.return_value = (1, '', 'error') out = eos.get_config(mock_module, 'show running_config') self.assertTrue(mock_module.fail_json.called) def test_eos_supports_sessions_fail(self): mock_module = MagicMock(name='AnsibleModule') mock_module.exec_command.return_value = (1, '', '') self.assertFalse(eos.supports_sessions(mock_module)) mock_module.exec_command.called_with_args(['show configuration sessions']) def test_eos_supports_sessions_pass(self): mock_module = MagicMock(name='AnsibleModule') mock_module.exec_command.return_value = (0, '', '') self.assertTrue(eos.supports_sessions(mock_module)) mock_module.exec_command.called_with_args(['show configuration sessions']) def test_eos_run_commands(self): mock_module = MagicMock(name='AnsibleModule') mock_module.exec_command.return_value = (0, 'stdout', '') mock_module.from_json.side_effect = ValueError out = eos.run_commands(mock_module, 'command') self.assertEqual(out, ['stdout']) def test_eos_run_commands_returns_json(self): mock_module = MagicMock(name='AnsibleModule') mock_module.exec_command.return_value = (0, '{"key": "value"}', '') mock_module.from_json.return_value = json.loads('{"key": "value"}') out = eos.run_commands(mock_module, 'command') self.assertEqual(out, [{'key': 'value'}]) def test_eos_run_commands_check_rc_fails(self): mock_module = MagicMock(name='AnsibleModule') mock_module.exec_command.return_value = (1, '', 'stderr') out = eos.run_commands(mock_module, 'command') mock_module.fail_json.called_with_args({'msg': 'stderr'})
gpl-3.0
nurmd2/nurmd
addons/gamification/models/goal.py
11
25827
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF from openerp.tools.safe_eval import safe_eval from openerp.tools.translate import _ from openerp.exceptions import UserError import logging import time from datetime import date, datetime, timedelta _logger = logging.getLogger(__name__) class gamification_goal_definition(osv.Model): """Goal definition A goal definition contains the way to evaluate an objective Each module wanting to be able to set goals to the users needs to create a new gamification_goal_definition """ _name = 'gamification.goal.definition' _description = 'Gamification goal definition' def _get_suffix(self, cr, uid, ids, field_name, arg, context=None): res = dict.fromkeys(ids, '') for goal in self.browse(cr, uid, ids, context=context): if goal.suffix and not goal.monetary: res[goal.id] = goal.suffix elif goal.monetary: # use the current user's company currency user = self.pool.get('res.users').browse(cr, uid, uid, context) if goal.suffix: res[goal.id] = "%s %s" % (user.company_id.currency_id.symbol, goal.suffix) else: res[goal.id] = user.company_id.currency_id.symbol else: res[goal.id] = "" return res _columns = { 'name': fields.char('Goal Definition', required=True, translate=True), 'description': fields.text('Goal Description'), 'monetary': fields.boolean('Monetary Value', help="The target and current value are defined in the company currency."), 'suffix': fields.char('Suffix', help="The unit of the target and current values", translate=True), 'full_suffix': fields.function(_get_suffix, type="char", string="Full Suffix", help="The currency and suffix field"), 'computation_mode': fields.selection([ ('manually', 'Recorded manually'), ('count', 'Automatic: number of records'), ('sum', 'Automatic: sum on a field'), ('python', 'Automatic: execute a specific Python code'), ], string="Computation Mode", help="Defined how will be computed the goals. The result of the operation will be stored in the field 'Current'.", required=True), 'display_mode': fields.selection([ ('progress', 'Progressive (using numerical values)'), ('boolean', 'Exclusive (done or not-done)'), ], string="Displayed as", required=True), 'model_id': fields.many2one('ir.model', string='Model', help='The model object for the field to evaluate'), 'model_inherited_model_ids': fields.related('model_id', 'inherited_model_ids', type="many2many", obj="ir.model", string="Inherited models", readonly="True"), 'field_id': fields.many2one('ir.model.fields', string='Field to Sum', help='The field containing the value to evaluate'), 'field_date_id': fields.many2one('ir.model.fields', string='Date Field', help='The date to use for the time period evaluated'), 'domain': fields.char("Filter Domain", help="Domain for filtering records. General rule, not user depending, e.g. [('state', '=', 'done')]. The expression can contain reference to 'user' which is a browse record of the current user if not in batch mode.", required=True), 'batch_mode': fields.boolean('Batch Mode', help="Evaluate the expression in batch instead of once for each user"), 'batch_distinctive_field': fields.many2one('ir.model.fields', string="Distinctive field for batch user", help="In batch mode, this indicates which field distinct one user form the other, e.g. user_id, partner_id..."), 'batch_user_expression': fields.char("Evaluted expression for batch mode", help="The value to compare with the distinctive field. The expression can contain reference to 'user' which is a browse record of the current user, e.g. user.id, user.partner_id.id..."), 'compute_code': fields.text('Python Code', help="Python code to be executed for each user. 'result' should contains the new current value. Evaluated user can be access through object.user_id."), 'condition': fields.selection([ ('higher', 'The higher the better'), ('lower', 'The lower the better') ], string='Goal Performance', help='A goal is considered as completed when the current value is compared to the value to reach', required=True), 'action_id': fields.many2one('ir.actions.act_window', string="Action", help="The action that will be called to update the goal value."), 'res_id_field': fields.char("ID Field of user", help="The field name on the user profile (res.users) containing the value for res_id for action."), } _defaults = { 'condition': 'higher', 'computation_mode': 'manually', 'domain': "[]", 'monetary': False, 'display_mode': 'progress', } def number_following(self, cr, uid, model_name="mail.thread", context=None): """Return the number of 'model_name' objects the user is following The model specified in 'model_name' must inherit from mail.thread """ user = self.pool.get('res.users').browse(cr, uid, uid, context=context) return self.pool.get('mail.followers').search(cr, uid, [('res_model', '=', model_name), ('partner_id', '=', user.partner_id.id)], count=True, context=context) def _check_domain_validity(self, cr, uid, ids, context=None): # take admin as should always be present superuser = self.pool['res.users'].browse(cr, uid, SUPERUSER_ID, context=context) for definition in self.browse(cr, uid, ids, context=context): if definition.computation_mode not in ('count', 'sum'): continue obj = self.pool[definition.model_id.model] try: domain = safe_eval(definition.domain, {'user': superuser}) # demmy search to make sure the domain is valid obj.search(cr, uid, domain, context=context, count=True) except (ValueError, SyntaxError), e: msg = e.message or (e.msg + '\n' + e.text) raise UserError(_("The domain for the definition %s seems incorrect, please check it.\n\n%s" % (definition.name, msg))) return True def create(self, cr, uid, vals, context=None): res_id = super(gamification_goal_definition, self).create(cr, uid, vals, context=context) if vals.get('computation_mode') in ('count', 'sum'): self._check_domain_validity(cr, uid, [res_id], context=context) return res_id def write(self, cr, uid, ids, vals, context=None): res = super(gamification_goal_definition, self).write(cr, uid, ids, vals, context=context) if vals.get('computation_mode', 'count') in ('count', 'sum') and (vals.get('domain') or vals.get('model_id')): self._check_domain_validity(cr, uid, ids, context=context) return res def on_change_model_id(self, cr, uid, ids, model_id, context=None): """Prefill field model_inherited_model_ids""" if not model_id: return {'value': {'model_inherited_model_ids': []}} model = self.pool['ir.model'].browse(cr, uid, model_id, context=context) # format (6, 0, []) to construct the domain ('model_id', 'in', m and m[0] and m[0][2]) return {'value': {'model_inherited_model_ids': [(6, 0, [m.id for m in model.inherited_model_ids])]}} class gamification_goal(osv.Model): """Goal instance for a user An individual goal for a user on a specified time period""" _name = 'gamification.goal' _description = 'Gamification goal instance' def _get_completion(self, cr, uid, ids, field_name, arg, context=None): """Return the percentage of completeness of the goal, between 0 and 100""" res = dict.fromkeys(ids, 0.0) for goal in self.browse(cr, uid, ids, context=context): if goal.definition_condition == 'higher': if goal.current >= goal.target_goal: res[goal.id] = 100.0 else: res[goal.id] = round(100.0 * goal.current / goal.target_goal, 2) elif goal.current < goal.target_goal: # a goal 'lower than' has only two values possible: 0 or 100% res[goal.id] = 100.0 else: res[goal.id] = 0.0 return res def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None): goal_definition = self.pool.get('gamification.goal.definition') if not definition_id: return {'value': {'definition_id': False}} goal_definition = goal_definition.browse(cr, uid, definition_id, context=context) return {'value': {'computation_mode': goal_definition.computation_mode, 'definition_condition': goal_definition.condition}} _columns = { 'definition_id': fields.many2one('gamification.goal.definition', string='Goal Definition', required=True, ondelete="cascade"), 'user_id': fields.many2one('res.users', string='User', required=True, auto_join=True, ondelete="cascade"), 'line_id': fields.many2one('gamification.challenge.line', string='Challenge Line', ondelete="cascade"), 'challenge_id': fields.related('line_id', 'challenge_id', string="Challenge", type='many2one', relation='gamification.challenge', store=True, readonly=True, help="Challenge that generated the goal, assign challenge to users to generate goals with a value in this field."), 'start_date': fields.date('Start Date'), 'end_date': fields.date('End Date'), # no start and end = always active 'target_goal': fields.float('To Reach', required=True, track_visibility='always'), # no goal = global index 'current': fields.float('Current Value', required=True, track_visibility='always'), 'completeness': fields.function(_get_completion, type='float', string='Completeness'), 'state': fields.selection([ ('draft', 'Draft'), ('inprogress', 'In progress'), ('reached', 'Reached'), ('failed', 'Failed'), ('canceled', 'Canceled'), ], string='State', required=True, track_visibility='always'), 'to_update': fields.boolean('To update'), 'closed': fields.boolean('Closed goal', help="These goals will not be recomputed."), 'computation_mode': fields.related('definition_id', 'computation_mode', type='char', string="Computation mode"), 'remind_update_delay': fields.integer('Remind delay', help="The number of days after which the user assigned to a manual goal will be reminded. Never reminded if no value is specified."), 'last_update': fields.date('Last Update', help="In case of manual goal, reminders are sent if the goal as not been updated for a while (defined in challenge). Ignored in case of non-manual goal or goal not linked to a challenge."), 'definition_description': fields.related('definition_id', 'description', type='char', string='Definition Description', readonly=True), 'definition_condition': fields.related('definition_id', 'condition', type='char', string='Definition Condition', readonly=True), 'definition_suffix': fields.related('definition_id', 'full_suffix', type="char", string="Suffix", readonly=True), 'definition_display': fields.related('definition_id', 'display_mode', type="char", string="Display Mode", readonly=True), } _defaults = { 'current': 0, 'state': 'draft', 'start_date': fields.date.today, } _order = 'start_date desc, end_date desc, definition_id, id' def _check_remind_delay(self, cr, uid, goal, context=None): """Verify if a goal has not been updated for some time and send a reminder message of needed. :return: data to write on the goal object """ temp_obj = self.pool['mail.template'] if goal.remind_update_delay and goal.last_update: delta_max = timedelta(days=goal.remind_update_delay) last_update = datetime.strptime(goal.last_update, DF).date() if date.today() - last_update > delta_max: # generate a remind report temp_obj = self.pool.get('mail.template') template_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'gamification', 'email_template_goal_reminder')[0] template = temp_obj.get_email_template(cr, uid, template_id, goal.id, context=context) body_html = temp_obj.render_template(cr, uid, template.body_html, 'gamification.goal', goal.id, context=template._context) self.pool['mail.thread'].message_post(cr, uid, 0, body=body_html, partner_ids=[goal.user_id.partner_id.id], context=context, subtype='mail.mt_comment') return {'to_update': True} return {} def _get_write_values(self, cr, uid, goal, new_value, context=None): """Generate values to write after recomputation of a goal score""" if new_value == goal.current: # avoid useless write if the new value is the same as the old one return {} result = {goal.id: {'current': new_value}} if (goal.definition_id.condition == 'higher' and new_value >= goal.target_goal) \ or (goal.definition_id.condition == 'lower' and new_value <= goal.target_goal): # success, do no set closed as can still change result[goal.id]['state'] = 'reached' elif goal.end_date and fields.date.today() > goal.end_date: # check goal failure result[goal.id]['state'] = 'failed' result[goal.id]['closed'] = True return result def update(self, cr, uid, ids, context=None): """Update the goals to recomputes values and change of states If a manual goal is not updated for enough time, the user will be reminded to do so (done only once, in 'inprogress' state). If a goal reaches the target value, the status is set to reached If the end date is passed (at least +1 day, time not considered) without the target value being reached, the goal is set as failed.""" if context is None: context = {} commit = context.get('commit_gamification', False) goals_by_definition = {} for goal in self.browse(cr, uid, ids, context=context): goals_by_definition.setdefault(goal.definition_id, []).append(goal) for definition, goals in goals_by_definition.items(): goals_to_write = dict((goal.id, {}) for goal in goals) if definition.computation_mode == 'manually': for goal in goals: goals_to_write[goal.id].update(self._check_remind_delay(cr, uid, goal, context)) elif definition.computation_mode == 'python': # TODO batch execution for goal in goals: # execute the chosen method cxt = { 'self': self.pool.get('gamification.goal'), 'object': goal, 'pool': self.pool, 'cr': cr, 'context': dict(context), # copy context to prevent side-effects of eval 'uid': uid, 'date': date, 'datetime': datetime, 'timedelta': timedelta, 'time': time } code = definition.compute_code.strip() safe_eval(code, cxt, mode="exec", nocopy=True) # the result of the evaluated codeis put in the 'result' local variable, propagated to the context result = cxt.get('result') if result is not None and type(result) in (float, int, long): goals_to_write.update( self._get_write_values(cr, uid, goal, result, context=context) ) else: _logger.exception(_('Invalid return content from the evaluation of code for definition %s') % definition.name) else: # count or sum obj = self.pool.get(definition.model_id.model) field_date_name = definition.field_date_id and definition.field_date_id.name or False if definition.computation_mode == 'count' and definition.batch_mode: # batch mode, trying to do as much as possible in one request general_domain = safe_eval(definition.domain) field_name = definition.batch_distinctive_field.name subqueries = {} for goal in goals: start_date = field_date_name and goal.start_date or False end_date = field_date_name and goal.end_date or False subqueries.setdefault((start_date, end_date), {}).update({goal.id:safe_eval(definition.batch_user_expression, {'user': goal.user_id})}) # the global query should be split by time periods (especially for recurrent goals) for (start_date, end_date), query_goals in subqueries.items(): subquery_domain = list(general_domain) subquery_domain.append((field_name, 'in', list(set(query_goals.values())))) if start_date: subquery_domain.append((field_date_name, '>=', start_date)) if end_date: subquery_domain.append((field_date_name, '<=', end_date)) if field_name == 'id': # grouping on id does not work and is similar to search anyway user_ids = obj.search(cr, uid, subquery_domain, context=context) user_values = [{'id': user_id, 'id_count': 1} for user_id in user_ids] else: user_values = obj.read_group(cr, uid, subquery_domain, fields=[field_name], groupby=[field_name], context=context) # user_values has format of read_group: [{'partner_id': 42, 'partner_id_count': 3},...] for goal in [g for g in goals if g.id in query_goals.keys()]: for user_value in user_values: queried_value = field_name in user_value and user_value[field_name] or False if isinstance(queried_value, tuple) and len(queried_value) == 2 and isinstance(queried_value[0], (int, long)): queried_value = queried_value[0] if queried_value == query_goals[goal.id]: new_value = user_value.get(field_name+'_count', goal.current) goals_to_write.update( self._get_write_values(cr, uid, goal, new_value, context=context) ) else: for goal in goals: # eval the domain with user replaced by goal user object domain = safe_eval(definition.domain, {'user': goal.user_id}) # add temporal clause(s) to the domain if fields are filled on the goal if goal.start_date and field_date_name: domain.append((field_date_name, '>=', goal.start_date)) if goal.end_date and field_date_name: domain.append((field_date_name, '<=', goal.end_date)) if definition.computation_mode == 'sum': field_name = definition.field_id.name # TODO for master: group on user field in batch mode res = obj.read_group(cr, uid, domain, [field_name], [], context=context) new_value = res and res[0][field_name] or 0.0 else: # computation mode = count new_value = obj.search(cr, uid, domain, context=context, count=True) goals_to_write.update( self._get_write_values(cr, uid, goal, new_value, context=context) ) for goal_id, value in goals_to_write.items(): if not value: continue self.write(cr, uid, [goal_id], value, context=context) if commit: cr.commit() return True def action_start(self, cr, uid, ids, context=None): """Mark a goal as started. This should only be used when creating goals manually (in draft state)""" self.write(cr, uid, ids, {'state': 'inprogress'}, context=context) return self.update(cr, uid, ids, context=context) def action_reach(self, cr, uid, ids, context=None): """Mark a goal as reached. If the target goal condition is not met, the state will be reset to In Progress at the next goal update until the end date.""" return self.write(cr, uid, ids, {'state': 'reached'}, context=context) def action_fail(self, cr, uid, ids, context=None): """Set the state of the goal to failed. A failed goal will be ignored in future checks.""" return self.write(cr, uid, ids, {'state': 'failed'}, context=context) def action_cancel(self, cr, uid, ids, context=None): """Reset the completion after setting a goal as reached or failed. This is only the current state, if the date and/or target criterias match the conditions for a change of state, this will be applied at the next goal update.""" return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context) def create(self, cr, uid, vals, context=None): """Overwrite the create method to add a 'no_remind_goal' field to True""" context = dict(context or {}) context['no_remind_goal'] = True return super(gamification_goal, self).create(cr, uid, vals, context=context) def write(self, cr, uid, ids, vals, context=None): """Overwrite the write method to update the last_update field to today If the current value is changed and the report frequency is set to On change, a report is generated """ if context is None: context = {} vals['last_update'] = fields.date.today() result = super(gamification_goal, self).write(cr, uid, ids, vals, context=context) for goal in self.browse(cr, uid, ids, context=context): if goal.state != "draft" and ('definition_id' in vals or 'user_id' in vals): # avoid drag&drop in kanban view raise UserError(_('Can not modify the configuration of a started goal')) if vals.get('current'): if 'no_remind_goal' in context: # new goals should not be reported continue if goal.challenge_id and goal.challenge_id.report_message_frequency == 'onchange': self.pool.get('gamification.challenge').report_progress(cr, SUPERUSER_ID, goal.challenge_id, users=[goal.user_id], context=context) return result def get_action(self, cr, uid, goal_id, context=None): """Get the ir.action related to update the goal In case of a manual goal, should return a wizard to update the value :return: action description in a dictionnary """ goal = self.browse(cr, uid, goal_id, context=context) if goal.definition_id.action_id: # open a the action linked to the goal action = goal.definition_id.action_id.read()[0] if goal.definition_id.res_id_field: current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context) action['res_id'] = safe_eval(goal.definition_id.res_id_field, {'user': current_user}) # if one element to display, should see it in form mode if possible action['views'] = [(view_id, mode) for (view_id, mode) in action['views'] if mode == 'form'] or action['views'] return action if goal.computation_mode == 'manually': # open a wizard window to update the value manually action = { 'name': _("Update %s") % goal.definition_id.name, 'id': goal_id, 'type': 'ir.actions.act_window', 'views': [[False, 'form']], 'target': 'new', 'context': {'default_goal_id': goal_id, 'default_current': goal.current}, 'res_model': 'gamification.goal.wizard' } return action return False
gpl-3.0
dotskapes/dotSkapes
tests/register.py
7
2013
from selenium import selenium import unittest, time, re import seleniumtest class NewTest(unittest.TestCase): def setUp(self): self.verificationErrors = [] self.selenium = selenium("localhost", 4444, "*chrome", "http://127.0.0.1:8000/") self.selenium.start() def test_new(self): sel = self.selenium # Register sel.open("/sahana") sel.click("link=Register") sel.wait_for_page_to_load("30000") sel.click("t2_person_name") #sel.store_random("8", "user") #sel.store_random("8", "domain") email = "${user}@${domain}.com" sel.type("t2_person_name", "${user}") sel.type("t2_person_email", email) #sel.store_random("8", "password") sel.type("t2_person_password", "${password}") sel.click("//input[@value='Submit']") sel.wait_for_page_to_load("30000") try: self.failUnless(sel.is_text_present("You have been successfully registered")) except AssertionError, e: self.verificationErrors.append(str(e)) # Logout sel.open("/sahana/default/login") sel.click("link=Logout") sel.wait_for_page_to_load("30000") try: self.failUnless(sel.is_text_present("Logged Out")) except AssertionError, e: self.verificationErrors.append(str(e)) # Login sel.open("/sahana/default/login") sel.click("link=Login") sel.wait_for_page_to_load("30000") sel.type("t2_person_email", "${email}") sel.type("t2_person_password", "${password}") sel.click("//input[@value='Submit']") sel.wait_for_page_to_load("30000") try: self.failUnless(sel.is_text_present("Logged In")) except AssertionError, e: self.verificationErrors.append(str(e)) def tearDown(self): self.selenium.stop() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": #unittest.main() seleniumtest.runInSeleniumRC(unittest.main)()
mit
ingadhoc/odoo
addons/l10n_si/account_wizard.py
255
1120
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) conexus.at # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv class AccountWizard_cd(osv.osv_memory): _inherit='wizard.multi.charts.accounts' _defaults = { 'code_digits' : 6, }
agpl-3.0
xiandiancloud/edx-platform-Y
cms/djangoapps/contentstore/views/public.py
41
2264
""" Public views """ from django_future.csrf import ensure_csrf_cookie from django.core.context_processors import csrf from django.core.urlresolvers import reverse from django.shortcuts import redirect from django.conf import settings from edxmako.shortcuts import render_to_response from external_auth.views import (ssl_login_shortcut, ssl_get_cert_from_request, redirect_with_get) from microsite_configuration import microsite __all__ = ['signup', 'login_page', 'howitworks'] @ensure_csrf_cookie def signup(request): """ Display the signup form. """ csrf_token = csrf(request)['csrf_token'] if request.user.is_authenticated(): return redirect('/course/') if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'): # Redirect to course to login to process their certificate if SSL is enabled # and registration is disabled. return redirect_with_get('login', request.GET, False) return render_to_response('register.html', {'csrf': csrf_token}) @ssl_login_shortcut @ensure_csrf_cookie def login_page(request): """ Display the login form. """ csrf_token = csrf(request)['csrf_token'] if (settings.FEATURES['AUTH_USE_CERTIFICATES'] and ssl_get_cert_from_request(request)): # SSL login doesn't require a login view, so redirect # to course now that the user is authenticated via # the decorator. next_url = request.GET.get('next') if next_url: return redirect(next_url) else: return redirect('/course/') if settings.FEATURES.get('AUTH_USE_CAS'): # If CAS is enabled, redirect auth handling to there return redirect(reverse('cas-login')) return render_to_response( 'login.html', { 'csrf': csrf_token, 'forgot_password_link': "//{base}/login#forgot-password-modal".format(base=settings.LMS_BASE), 'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME), } ) def howitworks(request): "Proxy view" if request.user.is_authenticated(): return redirect('/course/') else: return render_to_response('howitworks.html', {})
agpl-3.0
dunkenj/smpy
scripts/data/fitting.py
1
27412
import numpy as np import array import os, sys import re import time import multiprocessing import h5py import logging from astropy.table import Table, Column from astropy import units as u import argparse parser = argparse.ArgumentParser() parser.add_argument("-p","--params", type=str, help = "Parameter file") parser.add_argument("-q", "--quiet", help = "Suppress extra outputs", action = "store_true") args = parser.parse_args() quiet = args.quiet params_root = re.split(".py", args.params)[0] if os.path.isfile(params_root+".pyc"): os.remove(params_root+".pyc") import importlib try: params = importlib.import_module(params_root) print('Successfully loaded "{0}" as params'.format(args.params)) importlib.reload(params) except: print('Failed to load "{0}" as params'.format(args.params)) raise if quiet: quietprint = lambda *a: None else: def quietprint(*args): for arg in args: print(arg, end=' ') print() # Fitting function definition for later use by Processess def galaxyFit(inputQueue, printQueue, printlock): for gal in iter(inputQueue.get, 'STOP'): j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift flux_obs = obs[gal,:] flux_err = obs_err[gal,:] #flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero I = np.where(flux_err > 0.)[0] # Find bands with no observation if len(I) == 0: if include_rest: M_scaled = np.ones(len(fo)) * -99. restframe_output = ' '.join(M_scaled.astype('str')) output_string = '{0} {1} {2} {3} {4} {5} {6} {7}' \ ' {8} {9} {10} {11} {12} {13} {14} {15} {16}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99, -99, -99,-99,len(I),-99,z[j],restframe_output,'\n') else: output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99,-99, -99,-99,len(I),-99,'\n') printQueue.put(output_string) continue flux_obs = flux_obs[I] # and exclude from fit flux_err = flux_err[I] flux_models = f[j,I,:] tot_err = np.sqrt(flux_err**2 + (0.1*flux_obs)**2) top = 0. bottom = 0. for i in range(len(flux_obs)): top += (flux_models[i,:]*flux_obs[i])/(tot_err[i]**2) bottom += (flux_models[i,:]**2)/(tot_err[i]**2) scale = top/bottom scale = np.reshape(scale, (n_metal, n_tg, n_tau, n_tauv, n_fesc)) chisq = 0. for i in range(len(flux_obs)): chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(flux_err[i])**2) chimin, minind = np.nanmin(chisq), np.nanargmin(chisq) if np.isinf(chimin) or np.isnan(minind): if include_rest: M_scaled = np.ones(len(flux_obs)) * -99. restframe_output = ' '.join(M_scaled.astype('str')) output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15} {16}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99, -99, -99,-99,len(I),-99,z[j],restframe_output,'\n') else: output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99,-99, -99,-99,len(I),-99,'\n') printQueue.put(output_string) continue #Find the coordinate of the model with the bestfit mass mi, tgi, ti, tvi, fi = np.unravel_index(minind, (n_metal, n_tg, n_tau, n_tauv, n_fesc)) Bestfit_Mass = np.log10(scale[mi, tgi, ti, tvi, fi]*flux_corr) Bestfit_SFR = (scale[mi, tgi, ti, tvi, fi] * SFR[mi, tgi, ti, tvi, fi]*flux_corr) #Bestfit_Beta = beta[tgi,tvi,ti,mi] Bestfit_Beta = -99. #Scale the observed tot_mag band of the template to be the same as the observed tot_mag band of the galaxy #Convert the templates so they are no longer units of per stellar mass F_rest = f[0,:]*scale[mi, tgi, ti, tvi, fi]*flux_corr restframeMags = 23.9 - 2.5*np.log10(F_rest) #UV_rest = UV_flux[0]*scale[tgi,tvi,ti,mi]*flux_corr #restframeMUV = 23.9 - 2.5*np.log10(UV_rest) M_scaled = restframeMags[:, mi, tgi, ti, tvi, fi] #MUV_scaled = restframeMUV[tgi,tvi,ti,mi] MUV_scaled = -99. if np.isnan(Bestfit_Mass) or np.isinf(chimin): Bestfit_Mass = -99 #M_scaled[:] = -99 tgs = -99 tvs = -99 taus = -99 mis = -99 escape_fraction = -99 else: tgs = tg[tgi]/1e9 tvs = tv[tvi] taus = tau[ti] mis = metallicities[mi] escape_fraction = fesc[fi] printlock.acquire() print('{0:6d} {1:8d} {2:>5.2f} {3:>7.2f} {4:>8.1f} {5:>8.3f} {6:>5.1f} {7:>8.2f} {8:>4.2f} {9:>5.2f}'.format(gal+1,ID[gal], zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis,np.log10(Bestfit_SFR))) if include_rest: restframe_output = ' '.join(M_scaled.astype('str')) output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15} {16}'.format(gal+1,ID[gal],zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis, MUV_scaled, minind,Bestfit_SFR,len(I),Bestfit_Beta,z[j],restframe_output,'\n') else: output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14}'.format(gal+1,ID[gal],zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis, MUV_scaled, minind,Bestfit_SFR,len(I),Bestfit_Beta,'\n') printlock.release() printQueue.put(output_string) def galaxyFit2(inputQueue, printQueue, printlock): for gal in iter(inputQueue.get, 'STOP'): output_string = '{0[0]} {0[1]} {0[2]} {0[3]} {0[4]} {0[5]} ' + \ '{0[6]} {0[7]} {0[8]} {0[9]} {0[10]} {0[11]} ' + \ '{0[12]} {0[13]} {0[14]}' j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift log_mass_min, log_mass_max = 7, 13 log_sfr_min, log_sfr_max = -3, 4 flux_obs = obs[gal,:] flux_err = obs_err[gal,:] #flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero I = np.where(flux_err > 0.)[0] # Find bands with no observation if len(I) == 0: output_array = [gal+1, ID[gal], zobs[gal], z[j], -99, -99, -99, -99, -99, -99, -99, -99,-99,len(I),-99,'\n'] output = output_string.format(output_array) if include_rest: M_scaled = np.ones(len(flux_obs)) * -99. restframe_output = ' '.join(M_scaled.astype('str')) output = output + restframe_output + ' \n' else: output = output + ' \n' printQueue.put(output_string) continue flux_obs = flux_obs[I] # and exclude from fit flux_err = flux_err[I] flux_models = f[j,I,:] tot_err = np.sqrt(flux_err**2 + (params.flux_err*flux_obs)**2) top = 0. bottom = 0. for i in range(len(flux_obs)): top += (flux_models[i,:]*flux_obs[i])/(tot_err[i]**2) bottom += (flux_models[i,:]**2)/(tot_err[i]**2) scale = top/bottom scale = np.reshape(scale, (n_metal, n_tg, n_tau, n_tauv, n_fesc)) chisq = 0. for i in range(len(flux_obs)): chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(tot_err[i])**2) chimin, minind = np.nanmin(chisq), np.nanargmin(chisq) likelihood = np.reshape(np.exp(-0.5*chisq), (n_metal, n_tg, n_tau, n_tauv, n_fesc)) likelihood[np.isnan(likelihood)] = 0. likelihood = np.abs(likelihood/likelihood.sum()) if np.isinf(chimin) or np.isnan(minind): output_array = [gal+1, ID[gal], zobs[gal], z[j], -99, -99, -99, -99, -99, -99, -99, -99,-99,len(I),-99,'\n'] output = output_string.format(output_array) else: #Find the coordinate of the model with the bestfit mass mi, tgi, ti, tvi, fi = np.unravel_index(minind, (n_metal, n_tg, n_tau, n_tauv, n_fesc)) Masses = np.abs(np.log10(scale*flux_corr)) SFRs = np.abs(np.log10(scale * SFR * flux_corr)) mass_hist = np.histogram(Masses.flatten(), range = (log_mass_min, log_mass_max), bins = 120, weights = likelihood.flatten(), density = True) sfr_hist = np.histogram(SFRs.flatten(), range = (log_sfr_min, log_sfr_max), bins = 140, weights = likelihood.flatten(), density = True) Bestfit_Mass = np.abs(np.log10(scale[mi, tgi, ti, tvi, fi]*flux_corr)) Bestfit_SFR = np.abs(np.log10(scale[mi, tgi, ti, tvi, fi] * SFR[mi, tgi, ti, tvi, fi]*flux_corr)) if np.isnan(Bestfit_Mass) or np.isinf(chimin): Bestfit_Mass = -99 #M_scaled[:] = -99 tgs = -99 tvs = -99 taus = -99 mis = -99 escape_fraction = -99 else: tgs = tg[tgi]/1e9 tvs = tv[tvi] taus = tau[ti] mis = metallicities[mi] escape_fraction = fesc[fi] m16, m50, m84 = weighted_quantile(Masses.flatten(), [0.16, 0.5, 0.84], sample_weight=likelihood.flatten(), values_sorted=False) s16, s50, s84 = weighted_quantile(SFRs.flatten(), [0.16, 0.5, 0.84], sample_weight=likelihood.flatten(), values_sorted=False) printlock.acquire() MUV_scaled = -99. Bestfit_Beta = -99. print_string = "{0[0]:6d} {0[1]:8d} {0[2]:>5.2f} " + \ "{0[3]:>7.2f} {0[4]:>8.1f} {0[5]:>8.3f} " + \ "{0[6]:>5.1f} {0[7]:>8.2f} {0[8]:>4.2f} " + \ "{0[9]:>5.2f}" print_array = [gal+1, ID[gal], zobs[gal], Bestfit_Mass, chimin, tgs, tvs, taus, mis, Bestfit_SFR] print(print_string.format(print_array)) output_string = '{n} {id} {zobs} {ztemp} {mass_best} {sfr_best} '+ \ '{chi_best} {tg} {tvs} {taus} {mis} {fesc} '+ \ '{mass_med} {mass_l68} {mass_u68} ' + \ '{sfr_med} {sfr_l68} {sfr_u68} ' + \ '{nfilts} ' output_values = {'n': gal+1, 'id': ID[gal], 'zobs': zobs[gal], 'ztemp':z[j], 'mass_best': Bestfit_Mass, 'sfr_best': Bestfit_SFR, 'chi_best': chimin, 'tg': tgs, 'tvs': tvs, 'taus': taus, 'mis': mis, 'fesc': escape_fraction, 'mass_med': m50, 'mass_l68': m16, 'mass_u68': m84, 'sfr_med': s50, 'sfr_l68': s16, 'sfr_u68': s84, 'nfilts': len(I)} output_array = [gal+1, ID[gal], zobs[gal], Bestfit_Mass, chimin, tgs, tvs, taus, mis, MUV_scaled, minind, Bestfit_SFR, len(I), -99., '\n'] output = output_string.format(**output_values) if include_rest: if np.isinf(chimin) or np.isnan(minind): M_scaled = np.ones(len(flux_obs)) * -99. restframe_output = ' '.join(M_scaled.astype('str')) output = output + restframe_output + ' \n' else: F_rest = np.array(f[0, :, mi, tgi, ti, tvi, fi] * scale[mi, tgi, ti, tvi, fi] * flux_corr) restframeMags = 23.9 - 2.5*np.log10(F_rest) restframe_output = ' '.join(restframeMags.astype('str')) output = output + restframe_output + ' \n' else: output = output + ' \n' printlock.release() printQueue.put([output, mass_hist, sfr_hist]) def galaxyFitPlus(inputQueue, printQueue, printlock): for gal in iter(inputQueue.get, 'STOP'): mass_range = 7, 13 log_sfr_min, log_sfr_max = -3, 4 j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift fo = obs[gal,:] ferr = obs_err[gal,:] flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero #print fo I = (ferr > 0.)*(ferr < 1e6) # Find bands with no observation fo = flux_obs[I] # and exclude from fit ferr = flux_err[I] fm = f[I,j,:] #print flux_models[:,0,0,0,0] top = 0. bottom = 0. for i in range(len(fo)): top += (flux_models[i,:]*flux_obs[i])/(flux_err[i]**2) bottom += (flux_models[i,:]**2)/(flux_err[i]**2) scale = top/bottom scale = np.reshape(scale, (n_metal, n_tg, n_tau, n_tauv, n_fesc)) chisq = 0. for i in range(len(fo)): chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(flux_err[i])**2) chimin, minind = np.nanmin(chisq), np.nanargmin(chisq) chisq -= (chisq.min() - 1) likelihood = np.exp(-0.5*chisq) likelihood /= likelihood.sum() if np.isinf(chimin) or np.isnan(minind) or len(fo) == 0: output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} \ {10} {11} {12} {13} {14} {15} {16} {17} {18}'.format(gal+1,ID[gal],zobs[gal], -99,-99,-99,-99,-99,-99, -99, -99, -99, -99,-99,-99,-99, len(I),-99,'\n') massLikelihood = np.zeros(mass_bins+1) massLikelihood[0] = gal muvLikelihood = np.zeros(muv_bins+1) muvLikelihood[0] = gal betaLikelihood = np.zeros(beta_bins+1) betaLikelihood[0] = gal #tauLikelihood = np.zeros(n_tau) #tauLikelihood = np.insert(tauLikelihood,0,gal) printQueue.put([output_string,massLikelihood,muvLikelihood,betaLikelihood]) continue #Find the coordinate of the model with the bestfit mass si,tgi,tvi,ti,mi = np.unravel_index(minind,(mass_bins,n_tg,n_tauv,n_tau,n_ssp)) Bestfit_Mass = np.log10(mass_range[si]*flux_corr) Bestfit_SFR = (mass_range[si]*SFR[tgi,ti,mi]*flux_corr) Bestfit_Beta = beta[tgi,tvi,ti,mi] F_rest = f[:,0]*mass_range[likelihood.argmax(0)]*flux_corr restframeMags = 23.9 - 2.5*np.log10(F_rest) UV_rest = UV_flux[0]*mass_range[likelihood.argmax(0)]*flux_corr restframeMUV = 23.9 - 2.5*np.log10(UV_rest) Bestfit_restframeMags = restframeMags[:,tgi,tvi,ti,mi] Bestfit_restframeMUV = restframeMUV[tgi,tvi,ti,mi] if np.isnan(Bestfit_Mass) or np.isinf(chimin): Bestfit_Mass = -99 #M_scaled[:] = -99 tgs = -99 tvs = -99 taus = -99 mis = -99 else: tgs = tg[tgi]/1.e9 tvs = tv[tvi] taus = tau[ti]/1.e9 mis = mi """ Likelihood array section: """ mass_hist = np.histogram(np.log10(mass_)) printlock.acquire() if calc_mode: print('{0:4d} {1:6d} {2:>6.2f} {3:>8.1f} {4:>6.2f}'.format(gal+1,ID[gal],Bestfit_Mass,chimin, np.log10(Mode_Mass), '/n')) else: print('{0:6d} {1:8f} {2:>5.2f} {3:>7.2f} {4:>8.1f} {5:>8.3f} {6:>5.1f} {7:>8.2f} {8:>3d} {9:>5.2f}'.format(gal+1,int(ID[gal]),zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis,np.log10(Bestfit_SFR))) output_string = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}'.format(gal+1,int(ID[gal]),zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis,Bestfit_restframeMags[tot],Bestfit_restframeMUV,minind,Bestfit_SFR,len(I),Bestfit_Beta,'\n') printlock.release() printQueue.put([output_string, massLikelihoods, muvLikelihoods, betaLikelihoods]) def getObservations(inputpath): input_data = Table.read(inputpath,format=input_format) column_names = list(input_data.columns.keys()) ID = input_data[ID_col] zobs = input_data[z_col] filter_names = [] k,l = 0,0 for ii in range(len(column_names)): if column_names[ii].lower().endswith(flux_col_end.lower()): if k == 0: fluxes = input_data[column_names[ii]] else: fluxes = np.column_stack((fluxes,input_data[column_names[ii]])) k+=1 filter_names.append(column_names[ii]) if column_names[ii].lower().endswith(fluxerr_col_end.lower()): if l == 0: fluxerrs = input_data[column_names[ii]] else: fluxerrs = np.column_stack((fluxerrs,input_data[column_names[ii]])) l+=1 """ if filts_used != None: try: fluxes = fluxes[:,filts_used] fluxerrs = fluxerrs[:,filts_used] except:r print('Filter mismatch 1') # Array slicing fail """ return ID, zobs, fluxes, fluxerrs, k, filter_names class _function_wrapper(object): """ This is a hack to make the likelihood function pickleable when ``args`` or ``kwargs`` are also included. Stolen from emcee """ def __init__(self, f, args, kwargs): self.f = f self.args = args self.kwargs = kwargs def __call__(self, x): try: return self.f(x, *self.args, **self.kwargs) except: import traceback print("emcee: Exception while calling your likelihood function:") print(" params:", x) print(" args:", self.args) print(" kwargs:", self.kwargs) print(" exception:") traceback.print_exc() raise def weighted_quantile(values, quantiles, sample_weight=None, values_sorted=False, old_style=False): """ Very close to np.percentile, but supports weights. NOTE: quantiles should be in [0, 1]! :param values: np.array with data :param quantiles: array-like with many quantiles needed :param sample_weight: array-like of the same length as `array` :param values_sorted: bool, if True, then will avoid sorting of initial array :param old_style: if True, will correct output to be consistent with np.percentile. :return: np.array with computed quantiles. """ values = np.array(values) quantiles = np.array(quantiles) if sample_weight is None: sample_weight = np.ones(len(values)) sample_weight = np.array(sample_weight) assert np.all(quantiles >= 0) and np.all(quantiles <= 1), 'quantiles should be in [0, 1]' if not values_sorted: sorter = np.argsort(values) values = values[sorter] sample_weight = sample_weight[sorter] weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight if old_style: # To be convenient with np.percentile weighted_quantiles -= weighted_quantiles[0] weighted_quantiles /= weighted_quantiles[-1] else: weighted_quantiles /= np.sum(sample_weight) return np.interp(quantiles, weighted_quantiles, values) if __name__ == '__main__': logfile = open("error.log", "w") original_stderr = sys.stderr sys.stderr = logfile start = time.time() """ SECTION 1 """ model_path = params.model_path input_catalog = params.input_catalog input_format = params.input_format z_col = params.z_col ID_col = params.ID_col flux_col_end = params.flux_col_end fluxerr_col_end = params.fluxerr_col_end ncpus = params.ncpus filts_used = params.filts_used include_rest = params.include_rest output_path = params.output_catalog_path output_format = params.output_format output_hdf_path = params.output_hdf_path calc_mode = params.fitting_mode flux_corr = params.flux_corr ID, zobs, obs, obs_err, filters_found, filter_names = getObservations(input_catalog) """ Section 2 """ print("Loading synthetic mags and mass array:") models = h5py.File(model_path, 'r') tg = models['ages'].value tv = models['dust'].value tau = models['sfh'].value metallicities = models['metallicities'].value fesc = models['fesc'].value Mshape = models['fluxes'].shape z = models['z'] nfilts = Mshape[1] n_metal = Mshape[2] n_tg = Mshape[3] n_tau = Mshape[4] n_tauv = Mshape[5] n_fesc = Mshape[6] #UV_flux = synmags['UV_flux'] SFR = models['SFR'] Ms = models['Ms'] if (nfilts == filters_found) and (filts_used == None): f = models['fluxes'] elif filts_used != None: try: f = models['fluxes'][:,filts_used] obs = obs[:,filts_used] obs_err = obs_err[:,filts_used] filter_names = np.array(filter_names)[filts_used] except: print('Mis-match between model and observed filter numbers') raise # Slice fail print ("Done.") """ SECTION 3 """ if os.path.isfile(output_path+".temp_output.txt"): os.remove(output_path+".temp_output.txt") temp_file = open(output_path+".temp_output.txt","w") """ SECTION 4 Chi-sq calculation """ out_string = '{0:6s} {1:8s} {2:>5s} {3:>7s} {4:>8s} {5:>8s}' + \ '{6:>5s} {7:>8s} {8:>4s} {9:>5s}' print(out_string.format('N','ID','zobs','Best', 'chimin', 'tg', 'tauv','tau','met', 'sfr')) loop_start = time.time() ncpus = np.clip(ncpus, 1, multiprocessing.cpu_count()) inputQueue = multiprocessing.Queue() printQueue = multiprocessing.Queue() printlock = multiprocessing.Lock() if calc_mode == 'hist': output_hdf = h5py.File(output_hdf_path, 'w') output_hdf.create_dataset("mass_pdf", (len(ID), 120), dtype="f") output_hdf.create_dataset("sfr_pdf", (len(ID), 140), dtype="f") fitFunction = galaxyFit2 else: fitFunction = galaxyFit for i in range( ncpus ): multiprocessing.Process(target = fitFunction, args = (inputQueue, printQueue, printlock)).start() # Put elements in the send queue for processing for gal in range( len(ID) ): inputQueue.put( gal ) if calc_mode == 'hist': for i, gal in enumerate(ID): printout, mass_hist, sfr_hist = printQueue.get() if i == 0: mass_centers = 0.5*(mass_hist[1][1:] + mass_hist[1][:-1]) sfr_centers = 0.5*(sfr_hist[1][1:] + sfr_hist[1][:-1]) output_hdf.create_dataset("mass_bins", data = mass_centers) output_hdf.create_dataset("sfr_bins", data = sfr_centers) output_hdf["mass_pdf"][i] = mass_hist[0] output_hdf["sfr_pdf"][i] = sfr_hist[0] temp_file.write( printout ) #tau_array.tofile(tau_file) else: for i, gal in enumerate(ID): printout = printQueue.get() temp_file.write( printout ) #print len(mass_array), len(muv_array), len(beta_array) # Stop all the running processes for i in range( ncpus ): inputQueue.put( 'STOP' ) # Close both send and receive queues inputQueue.close() printQueue.close() temp_file.close() models.close() output_hdf.close() print("Fitting time taken: {0:.2f} {1}".format(time.time()-loop_start, '\n')) """ Section 3 Reload, format and save output table """ while temp_file.closed == False: pause(0.1) data = np.loadtxt(output_path+".temp_output.txt") try: rows, cols = data.shape except: cols = len(data) output = Table() names = ['N', 'ID', 'z', 'zmodel', 'Mass_best', 'SFR_best', 'chi_best', 'Age_best','Dust_best', 'SFH_best', 'Metallicity_best', 'fesc_best', 'Mass_median', 'Mass_l68', 'Mass_u68', 'SFR_median', 'SFR_l68', 'SFR_u68', 'Nfilts'] units = [None, None, None, None, u.Msun, u.Msun/u.yr, None, u.Gyr, None, None, None, None, u.Msun, u.Msun, u.Msun, u.Msun/u.yr, u.Msun/u.yr, u.Msun/u.yr, None] types = ['i4', 'i4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'i4'] if include_rest: for name in filter_names: names.append(name[:-len(flux_col_end)]+'_rest') units.append(u.mag) types.append('f4') for col in range(cols): column = Column( data[:,col], name = names[col], unit=units[col], dtype=types[col]) output.add_column(column) table_format = 'ascii.commented_header' output.sort('ID') if os.path.isfile(output_path): os.remove(output_path) output.write(output_path,format=table_format) print('Catalog saved') os.remove(temp_file.name) print() print("Total time taken: "+str(time.time()-start)) sys.stderr = original_stderr logfile.close()
mit
Graghav/surabi
ADMIN/venv/lib/python2.7/site-packages/wheel/tool/__init__.py
93
13217
""" Wheel command-line utility. """ import os import hashlib import sys import json from glob import iglob from .. import signatures from ..util import (urlsafe_b64decode, urlsafe_b64encode, native, binary, matches_requirement) from ..install import WheelFile, VerifyingZipFile from ..paths import get_install_command def require_pkgresources(name): try: import pkg_resources except ImportError: raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name)) import argparse class WheelError(Exception): pass # For testability def get_keyring(): try: from ..signatures import keys import keyring assert keyring.get_keyring().priority except (ImportError, AssertionError): raise WheelError("Install wheel[signatures] (requires keyring, keyrings.alt, pyxdg) for signatures.") return keys.WheelKeys, keyring def keygen(get_keyring=get_keyring): """Generate a public/private key pair.""" WheelKeys, keyring = get_keyring() ed25519ll = signatures.get_ed25519ll() wk = WheelKeys().load() keypair = ed25519ll.crypto_sign_keypair() vk = native(urlsafe_b64encode(keypair.vk)) sk = native(urlsafe_b64encode(keypair.sk)) kr = keyring.get_keyring() kr.set_password("wheel", vk, sk) sys.stdout.write("Created Ed25519 keypair with vk={0}\n".format(vk)) sys.stdout.write("in {0!r}\n".format(kr)) sk2 = kr.get_password('wheel', vk) if sk2 != sk: raise WheelError("Keyring is broken. Could not retrieve secret key.") sys.stdout.write("Trusting {0} to sign and verify all packages.\n".format(vk)) wk.add_signer('+', vk) wk.trust('+', vk) wk.save() def sign(wheelfile, replace=False, get_keyring=get_keyring): """Sign a wheel""" WheelKeys, keyring = get_keyring() ed25519ll = signatures.get_ed25519ll() wf = WheelFile(wheelfile, append=True) wk = WheelKeys().load() name = wf.parsed_filename.group('name') sign_with = wk.signers(name)[0] sys.stdout.write("Signing {0} with {1}\n".format(name, sign_with[1])) vk = sign_with[1] kr = keyring.get_keyring() sk = kr.get_password('wheel', vk) keypair = ed25519ll.Keypair(urlsafe_b64decode(binary(vk)), urlsafe_b64decode(binary(sk))) record_name = wf.distinfo_name + '/RECORD' sig_name = wf.distinfo_name + '/RECORD.jws' if sig_name in wf.zipfile.namelist(): raise WheelError("Wheel is already signed.") record_data = wf.zipfile.read(record_name) payload = {"hash":"sha256=" + native(urlsafe_b64encode(hashlib.sha256(record_data).digest()))} sig = signatures.sign(payload, keypair) wf.zipfile.writestr(sig_name, json.dumps(sig, sort_keys=True)) wf.zipfile.close() def unsign(wheelfile): """ Remove RECORD.jws from a wheel by truncating the zip file. RECORD.jws must be at the end of the archive. The zip file must be an ordinary archive, with the compressed files and the directory in the same order, and without any non-zip content after the truncation point. """ vzf = VerifyingZipFile(wheelfile, "a") info = vzf.infolist() if not (len(info) and info[-1].filename.endswith('/RECORD.jws')): raise WheelError("RECORD.jws not found at end of archive.") vzf.pop() vzf.close() def verify(wheelfile): """Verify a wheel. The signature will be verified for internal consistency ONLY and printed. Wheel's own unpack/install commands verify the manifest against the signature and file contents. """ wf = WheelFile(wheelfile) sig_name = wf.distinfo_name + '/RECORD.jws' sig = json.loads(native(wf.zipfile.open(sig_name).read())) verified = signatures.verify(sig) sys.stderr.write("Signatures are internally consistent.\n") sys.stdout.write(json.dumps(verified, indent=2)) sys.stdout.write('\n') def unpack(wheelfile, dest='.'): """Unpack a wheel. Wheel content will be unpacked to {dest}/{name}-{ver}, where {name} is the package name and {ver} its version. :param wheelfile: The path to the wheel. :param dest: Destination directory (default to current directory). """ wf = WheelFile(wheelfile) namever = wf.parsed_filename.group('namever') destination = os.path.join(dest, namever) sys.stderr.write("Unpacking to: %s\n" % (destination)) wf.zipfile.extractall(destination) wf.zipfile.close() def install(requirements, requirements_file=None, wheel_dirs=None, force=False, list_files=False, dry_run=False): """Install wheels. :param requirements: A list of requirements or wheel files to install. :param requirements_file: A file containing requirements to install. :param wheel_dirs: A list of directories to search for wheels. :param force: Install a wheel file even if it is not compatible. :param list_files: Only list the files to install, don't install them. :param dry_run: Do everything but the actual install. """ # If no wheel directories specified, use the WHEELPATH environment # variable, or the current directory if that is not set. if not wheel_dirs: wheelpath = os.getenv("WHEELPATH") if wheelpath: wheel_dirs = wheelpath.split(os.pathsep) else: wheel_dirs = [ os.path.curdir ] # Get a list of all valid wheels in wheel_dirs all_wheels = [] for d in wheel_dirs: for w in os.listdir(d): if w.endswith('.whl'): wf = WheelFile(os.path.join(d, w)) if wf.compatible: all_wheels.append(wf) # If there is a requirements file, add it to the list of requirements if requirements_file: # If the file doesn't exist, search for it in wheel_dirs # This allows standard requirements files to be stored with the # wheels. if not os.path.exists(requirements_file): for d in wheel_dirs: name = os.path.join(d, requirements_file) if os.path.exists(name): requirements_file = name break with open(requirements_file) as fd: requirements.extend(fd) to_install = [] for req in requirements: if req.endswith('.whl'): # Explicitly specified wheel filename if os.path.exists(req): wf = WheelFile(req) if wf.compatible or force: to_install.append(wf) else: msg = ("{0} is not compatible with this Python. " "--force to install anyway.".format(req)) raise WheelError(msg) else: # We could search on wheel_dirs, but it's probably OK to # assume the user has made an error. raise WheelError("No such wheel file: {}".format(req)) continue # We have a requirement spec # If we don't have pkg_resources, this will raise an exception matches = matches_requirement(req, all_wheels) if not matches: raise WheelError("No match for requirement {}".format(req)) to_install.append(max(matches)) # We now have a list of wheels to install if list_files: sys.stdout.write("Installing:\n") if dry_run: return for wf in to_install: if list_files: sys.stdout.write(" {0}\n".format(wf.filename)) continue wf.install(force=force) wf.zipfile.close() def install_scripts(distributions): """ Regenerate the entry_points console_scripts for the named distribution. """ try: from setuptools.command import easy_install import pkg_resources except ImportError: raise RuntimeError("'wheel install_scripts' needs setuptools.") for dist in distributions: pkg_resources_dist = pkg_resources.get_distribution(dist) install = get_install_command(dist) command = easy_install.easy_install(install.distribution) command.args = ['wheel'] # dummy argument command.finalize_options() command.install_egg_scripts(pkg_resources_dist) def convert(installers, dest_dir, verbose): require_pkgresources('wheel convert') # Only support wheel convert if pkg_resources is present from ..wininst2wheel import bdist_wininst2wheel from ..egg2wheel import egg2wheel for pat in installers: for installer in iglob(pat): if os.path.splitext(installer)[1] == '.egg': conv = egg2wheel else: conv = bdist_wininst2wheel if verbose: sys.stdout.write("{0}... ".format(installer)) sys.stdout.flush() conv(installer, dest_dir) if verbose: sys.stdout.write("OK\n") def parser(): p = argparse.ArgumentParser() s = p.add_subparsers(help="commands") def keygen_f(args): keygen() keygen_parser = s.add_parser('keygen', help='Generate signing key') keygen_parser.set_defaults(func=keygen_f) def sign_f(args): sign(args.wheelfile) sign_parser = s.add_parser('sign', help='Sign wheel') sign_parser.add_argument('wheelfile', help='Wheel file') sign_parser.set_defaults(func=sign_f) def unsign_f(args): unsign(args.wheelfile) unsign_parser = s.add_parser('unsign', help=unsign.__doc__) unsign_parser.add_argument('wheelfile', help='Wheel file') unsign_parser.set_defaults(func=unsign_f) def verify_f(args): verify(args.wheelfile) verify_parser = s.add_parser('verify', help=verify.__doc__) verify_parser.add_argument('wheelfile', help='Wheel file') verify_parser.set_defaults(func=verify_f) def unpack_f(args): unpack(args.wheelfile, args.dest) unpack_parser = s.add_parser('unpack', help='Unpack wheel') unpack_parser.add_argument('--dest', '-d', help='Destination directory', default='.') unpack_parser.add_argument('wheelfile', help='Wheel file') unpack_parser.set_defaults(func=unpack_f) def install_f(args): install(args.requirements, args.requirements_file, args.wheel_dirs, args.force, args.list_files) install_parser = s.add_parser('install', help='Install wheels') install_parser.add_argument('requirements', nargs='*', help='Requirements to install.') install_parser.add_argument('--force', default=False, action='store_true', help='Install incompatible wheel files.') install_parser.add_argument('--wheel-dir', '-d', action='append', dest='wheel_dirs', help='Directories containing wheels.') install_parser.add_argument('--requirements-file', '-r', help="A file containing requirements to " "install.") install_parser.add_argument('--list', '-l', default=False, dest='list_files', action='store_true', help="List wheels which would be installed, " "but don't actually install anything.") install_parser.set_defaults(func=install_f) def install_scripts_f(args): install_scripts(args.distributions) install_scripts_parser = s.add_parser('install-scripts', help='Install console_scripts') install_scripts_parser.add_argument('distributions', nargs='*', help='Regenerate console_scripts for these distributions') install_scripts_parser.set_defaults(func=install_scripts_f) def convert_f(args): convert(args.installers, args.dest_dir, args.verbose) convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel') convert_parser.add_argument('installers', nargs='*', help='Installers to convert') convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir, help="Directory to store wheels (default %(default)s)") convert_parser.add_argument('--verbose', '-v', action='store_true') convert_parser.set_defaults(func=convert_f) def version_f(args): from .. import __version__ sys.stdout.write("wheel %s\n" % __version__) version_parser = s.add_parser('version', help='Print version and exit') version_parser.set_defaults(func=version_f) def help_f(args): p.print_help() help_parser = s.add_parser('help', help='Show this help') help_parser.set_defaults(func=help_f) return p def main(): p = parser() args = p.parse_args() if not hasattr(args, 'func'): p.print_help() else: # XXX on Python 3.3 we get 'args has no func' rather than short help. try: args.func(args) return 0 except WheelError as e: sys.stderr.write(e.message + "\n") return 1
apache-2.0
ledatelescope/bifrost
test/test_resizing.py
1
4303
# Copyright (c) 2016, The Bifrost Authors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Bifrost Authors nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """@module test_resizing This file checks different aspects of resizing a ring for segmentation faults.""" import unittest import json import numpy as np from bifrost.block import TestingBlock, SinkBlock, Pipeline class ModResizeAsciiBlock(SinkBlock): """Copies input ring's data into ascii format in a text file, after resizing late (after opening sequence).""" def __init__(self, filename, gulp_size=None): """@param[in] filename Name of file to write ascii to""" self.filename = filename self.gulp_size = gulp_size open(self.filename, "w").close() def load_settings(self, input_header): """Load the header, and set the gulp appropriately""" header_dict = json.loads(input_header.tostring()) self.shape = header_dict['shape'] size_of_float32 = 4 if self.gulp_size is None: self.gulp_size = np.product(self.shape) * size_of_float32 def iterate_ring_read(self, input_ring): """Iterate through one input ring @param[in] input_ring Ring to read through""" for sequence in input_ring.read(guarantee=True): self.load_settings(sequence.header) input_ring.resize(self.gulp_size) for span in sequence.read(self.gulp_size): yield span def main(self, input_ring): """Initiate the writing to file @param[in] input_rings First ring in this list will be used""" span_generator = self.iterate_ring_read(input_ring) span = span_generator.next() text_file = open(self.filename, 'a') np.savetxt(text_file, span.data_view(np.float32).reshape((1,-1))) text_file.close() class TestLateResize(unittest.TestCase): """Test late resizing of a ring in a pipeline""" def test_modified_write_ascii(self): """Using a modified WriteAciiBlock, test the late resize. This should fail if ModWriteAscii block does not read the size of the input ring ahead of time, and resize accordingly.""" blocks = [] blocks.append((TestingBlock([1, 2, 3]), [], [0])) blocks.append((ModResizeAsciiBlock('.log.txt'), [0], [])) Pipeline(blocks).main() np.testing.assert_almost_equal( np.loadtxt('.log.txt'), [1, 2, 3]) class TestLargeGulpSize(unittest.TestCase): """Create a gulp size larger than ring size""" def test_simple_large_gulp(self): """Test if a large gulp size produces a seg fault""" blocks = [] blocks.append((TestingBlock([1, 2, 3]), [], [0])) blocks.append((ModResizeAsciiBlock('.log.txt', gulp_size=1024), [0], [])) Pipeline(blocks).main() np.testing.assert_almost_equal( np.loadtxt('.log.txt'), [1, 2, 3])
bsd-3-clause
kszys/num2words
num2words/lang_ID.py
1
6232
# Copyright (c) 2003, Taro Ogawa. All Rights Reserved. # Copyright (c) 2013, Savoir-faire Linux inc. All Rights Reserved. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA from __future__ import unicode_literals, print_function class Num2Word_ID(): BASE = {0: [], 1: ["satu"], 2: ["dua"], 3: ["tiga"], 4: ["empat"], 5: ["lima"], 6: ["enam"], 7: ["tujuh"], 8: ["delapan"], 9: ["sembilan"]} TENS_TO = {3: "ribu", 6: "juta", 9: "miliar", 12: "triliun", 15: "kuadriliun", 18: "kuantiliun", 21: "sekstiliun", 24: "septiliun", 27: "oktiliun", 30: "noniliun", 33: "desiliun"} errmsg_floatord = "Cannot treat float number as ordinal" errmsg_negord = "Cannot treat negative number as ordinal" errmsg_toobig = "Too large" max_num = 10**36 def split_by_koma(self, number): return str(number).split('.') def split_by_3(self, number): """ starting here, it groups the number by three from the tail '1234567' -> (('1',),('234',),('567',)) :param number:str :rtype:tuple """ blocks = () length = len(number) if length < 3: blocks += ((number,),) else: len_of_first_block = length % 3 if len_of_first_block > 0: first_block = number[0:len_of_first_block], blocks += first_block, for i in range(len_of_first_block, length, 3): next_block = (number[i:i+3],), blocks += next_block return blocks def spell(self, blocks): """ it adds the list of spelling to the blocks (('1',),('034',)) -> (('1',['satu']),('234',['tiga', 'puluh', 'empat'])) :param blocks: tuple :rtype: tuple """ word_blocks = () first_block = blocks[0] if len(first_block[0]) == 1: if first_block[0] == '0': spelling = ['nol'] else: spelling = self.BASE[int(first_block[0])] elif len(first_block[0]) == 2: spelling = self.puluh(first_block[0]) else: spelling = self.ratus(first_block[0][0]) + self.puluh(first_block[0][1:3]) word_blocks += (first_block[0], spelling), for block in blocks[1:]: spelling = self.ratus(block[0][0]) + self.puluh(block[0][1:3]) block += spelling, word_blocks += block, return word_blocks def ratus(self, number): # it is used to spell if number == '1': return ['seratus'] elif number == '0': return [] else: return self.BASE[int(number)]+['ratus'] def puluh(self, number): # it is used to spell if number[0] == '1': if number[1]== '0': return ['sepuluh'] elif number[1] == '1': return ['sebelas'] else: return self.BASE[int(number[1])]+['belas'] elif number[0] == '0': return self.BASE[int(number[1])] else: return self.BASE[int(number[0])]+['puluh']+ self.BASE[int(number[1])] def spell_float(self, float_part): # spell the float number word_list = [] for n in float_part: if n == '0': word_list += ['nol'] continue word_list += self.BASE[int(n)] return ' '.join(['','koma']+word_list) def join(self, word_blocks, float_part): """ join the words by first join lists in the tuple :param word_blocks: tuple :rtype: str """ word_list = [] length = len(word_blocks)-1 first_block = word_blocks[0], start = 0 if length == 1 and first_block[0][0] == '1': word_list += ['seribu'] start = 1 for i in range(start, length+1, 1): word_list += word_blocks[i][1] if not word_blocks[i][1]: continue if i == length: break word_list += [self.TENS_TO[(length-i)*3]] return ' '.join(word_list)+float_part def to_cardinal(self, number): if number >= self.max_num: raise OverflowError(self.errmsg_toobig % (number, self.maxnum)) minus = '' if number < 0: minus = 'min ' float_word = '' n = self.split_by_koma(abs(number)) if len(n)==2: float_word = self.spell_float(n[1]) return minus + self.join(self.spell(self.split_by_3(n[0])), float_word) def to_ordinal(self, number): self.verify_ordinal(number) out_word = self.to_cardinal(number) if out_word == "satu": return "pertama" return "ke" + out_word def to_ordinal_num(self, number): self.verify_ordinal(number) return "ke-" + str(number) def to_currency(self, value): return self.to_cardinal(value)+" rupiah" def to_year(self, value): return self.to_cardinal(value) def verify_ordinal(self, value): if not value == int(value): raise TypeError(self.errmsg_floatord % value) if not abs(value) == value: raise TypeError(self.errmsg_negord % value)
lgpl-2.1
nhenezi/kuma
kuma/attachments/feeds.py
6
1116
from django.utils.translation import ugettext as _ from kuma.wiki.feeds import DocumentsFeed from .models import AttachmentRevision class AttachmentsFeed(DocumentsFeed): title = _("MDN recent file changes") subtitle = _("Recent revisions to MDN file attachments") def items(self): return AttachmentRevision.objects.order_by('-created')[:50] def item_title(self, item): return item.title def item_description(self, item): previous = item.get_previous() if previous is None: return '<p>Created by: %s</p>' % item.creator.username return "<p>Edited by %s: %s" % (item.creator.username, item.comment) def item_link(self, item): return self.request.build_absolute_uri( item.attachment.get_absolute_url()) def item_pubdate(self, item): return item.created def item_author_name(self, item): return '%s' % item.creator def item_author_link(self, item): return self.request.build_absolute_uri(item.creator.get_absolute_url()) def item_categories(self, item): return []
mpl-2.0
Itxaka/st2
st2common/tests/unit/test_mongoescape.py
10
3679
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from st2common.util import mongoescape class TestMongoEscape(unittest.TestCase): def test_unnested(self): field = {'k1.k1.k1': 'v1', 'k2$': 'v2', '$k3.': 'v3'} escaped = mongoescape.escape_chars(field) self.assertEqual(escaped, {u'k1\uff0ek1\uff0ek1': 'v1', u'k2\uff04': 'v2', u'\uff04k3\uff0e': 'v3'}, 'Escaping failed.') unescaped = mongoescape.unescape_chars(escaped) self.assertEqual(unescaped, field, 'Unescaping failed.') def test_nested(self): nested_field = {'nk1.nk1.nk1': 'v1', 'nk2$': 'v2', '$nk3.': 'v3'} field = {'k1.k1.k1': nested_field, 'k2$': 'v2', '$k3.': 'v3'} escaped = mongoescape.escape_chars(field) self.assertEqual(escaped, {u'k1\uff0ek1\uff0ek1': {u'\uff04nk3\uff0e': 'v3', u'nk1\uff0enk1\uff0enk1': 'v1', u'nk2\uff04': 'v2'}, u'k2\uff04': 'v2', u'\uff04k3\uff0e': 'v3'}, 'un-escaping failed.') unescaped = mongoescape.unescape_chars(escaped) self.assertEqual(unescaped, field, 'Unescaping failed.') def test_unescaping_of_rule_criteria(self): # Verify that dot escaped in rule criteria is correctly escaped. # Note: In the past we used different character to escape dot in the # rule criteria. escaped = { u'k1\u2024k1\u2024k1': 'v1', u'k2$': 'v2', u'$k3\u2024': 'v3' } unescaped = { 'k1.k1.k1': 'v1', 'k2$': 'v2', '$k3.': 'v3' } result = mongoescape.unescape_chars(escaped) self.assertEqual(result, unescaped) def test_original_value(self): field = {'k1.k2.k3': 'v1'} escaped = mongoescape.escape_chars(field) self.assertIn('k1.k2.k3', field.keys()) self.assertIn(u'k1\uff0ek2\uff0ek3', escaped.keys()) unescaped = mongoescape.unescape_chars(escaped) self.assertIn('k1.k2.k3', unescaped.keys()) self.assertIn(u'k1\uff0ek2\uff0ek3', escaped.keys()) def test_complex(self): field = { 'k1.k2': [{'l1.l2': '123'}, {'l3.l4': '456'}], 'k3': [{'l5.l6': '789'}], 'k4.k5': [1, 2, 3], 'k6': ['a', 'b'] } expected = { u'k1\uff0ek2': [{u'l1\uff0el2': '123'}, {u'l3\uff0el4': '456'}], 'k3': [{u'l5\uff0el6': '789'}], u'k4\uff0ek5': [1, 2, 3], 'k6': ['a', 'b'] } escaped = mongoescape.escape_chars(field) self.assertDictEqual(expected, escaped) unescaped = mongoescape.unescape_chars(escaped) self.assertDictEqual(field, unescaped)
apache-2.0
laszlocsomor/tensorflow
tensorflow/contrib/model_pruning/python/layers/layers.py
17
15379
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tensorflow layers with added variables for parameter masking. Branched from tensorflow/contrib/layers/python/layers/layers.py """ # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import six from tensorflow.contrib.framework.python.ops import add_arg_scope from tensorflow.contrib.framework.python.ops import variables from tensorflow.contrib.layers.python.layers import initializers from tensorflow.contrib.layers.python.layers import utils from tensorflow.contrib.model_pruning.python.layers import core_layers as core from tensorflow.python.framework import ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import nn from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as tf_variables def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, use_resource=None, **_): """Getter that uses model_variable for compatibility with core layers.""" short_name = name.split('/')[-1] if rename and short_name in rename: name_components = name.split('/') name_components[-1] = rename[short_name] name = '/'.join(name_components) return variables.model_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter, use_resource=use_resource) def _build_variable_getter(rename=None): """Build a model variable getter that respects scope getter and renames.""" # VariableScope will nest the getters def layer_variable_getter(getter, *args, **kwargs): kwargs['rename'] = rename return _model_variable_getter(getter, *args, **kwargs) return layer_variable_getter def _add_variable_to_collections(variable, collections_set, collections_name): """Adds variable (or all its parts) to all collections with that name.""" collections = utils.get_variable_collections(collections_set, collections_name) or [] variables_list = [variable] if isinstance(variable, tf_variables.PartitionedVariable): variables_list = [v for v in variable] for collection in collections: for var in variables_list: if var not in ops.get_collection(collection): ops.add_to_collection(collection, var) @add_arg_scope def masked_convolution(inputs, num_outputs, kernel_size, stride=1, padding='SAME', data_format=None, rate=1, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None): """Adds an 2D convolution followed by an optional batch_norm layer. The layer creates a mask variable on top of the weight variable. The input to the convolution operation is the elementwise multiplication of the mask variable and the weigh It is required that 1 <= N <= 3. `convolution` creates a variable called `weights`, representing the convolutional kernel, that is convolved (actually cross-correlated) with the `inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is provided (such as `batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is None and a `biases_initializer` is provided then a `biases` variable would be created and added the activations. Finally, if `activation_fn` is not `None`, it is applied to the activations as well. Performs atrous convolution with input stride/dilation rate equal to `rate` if a value > 1 for any dimension of `rate` is specified. In this case `stride` values != 1 are not supported. Args: inputs: A Tensor of rank N+2 of shape `[batch_size] + input_spatial_shape + [in_channels]` if data_format does not start with "NC" (default), or `[batch_size, in_channels] + input_spatial_shape` if data_format starts with "NC". num_outputs: Integer, the number of output filters. kernel_size: A sequence of N positive integers specifying the spatial dimensions of of the filters. Can be a single integer to specify the same value for all spatial dimensions. stride: A sequence of N positive integers specifying the stride at which to compute output. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `rate` value != 1. padding: One of `"VALID"` or `"SAME"`. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". rate: A sequence of N positive integers specifying the dilation rate to use for atrous convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `rate` value != 1 is incompatible with specifying any `stride` value != 1. activation_fn: Activation function. The default value is a ReLU function. Explicitly set it to None to skip it and maintain a linear activation. normalizer_fn: Normalization function to use instead of `biases`. If `normalizer_fn` is provided then `biases_initializer` and `biases_regularizer` are ignored and `biases` are not created nor added. default set to None for no normalizer function normalizer_params: Normalization function parameters. weights_initializer: An initializer for the weights. weights_regularizer: Optional regularizer for the weights. biases_initializer: An initializer for the biases. If None skip biases. biases_regularizer: Optional regularizer for the biases. reuse: Whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: Optional list of collections for all the variables or a dictionary containing a different list of collection per variable. outputs_collections: Collection to add the outputs. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional scope for `variable_scope`. Returns: A tensor representing the output of the operation. Raises: ValueError: If `data_format` is invalid. ValueError: Both 'rate' and `stride` are not uniformly 1. """ if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']: raise ValueError('Invalid data_format: %r' % (data_format,)) layer_variable_getter = _build_variable_getter({ 'bias': 'biases', 'kernel': 'weights' }) with variable_scope.variable_scope( scope, 'Conv', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc: inputs = ops.convert_to_tensor(inputs) input_rank = inputs.get_shape().ndims if input_rank == 3: raise ValueError('Sparse Convolution not supported for input with rank', input_rank) elif input_rank == 4: layer_class = core.MaskedConv2D elif input_rank == 5: raise ValueError('Sparse Convolution not supported for input with rank', input_rank) else: raise ValueError('Sparse Convolution not supported for input with rank', input_rank) if data_format is None or data_format == 'NHWC': df = 'channels_last' elif data_format == 'NCHW': df = 'channels_first' else: raise ValueError('Unsupported data fromat', data_format) layer = layer_class( filters=num_outputs, kernel_size=kernel_size, strides=stride, padding=padding, data_format=df, dilation_rate=rate, activation=None, use_bias=not normalizer_fn and biases_initializer, kernel_initializer=weights_initializer, bias_initializer=biases_initializer, kernel_regularizer=weights_regularizer, bias_regularizer=biases_regularizer, activity_regularizer=None, trainable=trainable, name=sc.name, dtype=inputs.dtype.base_dtype, _scope=sc, _reuse=reuse) outputs = layer.apply(inputs) # Add variables to collections. _add_variable_to_collections(layer.kernel, variables_collections, 'weights') if layer.use_bias: _add_variable_to_collections(layer.bias, variables_collections, 'biases') if normalizer_fn is not None: normalizer_params = normalizer_params or {} outputs = normalizer_fn(outputs, **normalizer_params) if activation_fn is not None: outputs = activation_fn(outputs) return utils.collect_named_outputs(outputs_collections, sc.original_name_scope, outputs) masked_conv2d = masked_convolution @add_arg_scope def masked_fully_connected( inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None): """Adds a sparse fully connected layer. The weight matrix is masked. `fully_connected` creates a variable called `weights`, representing a fully connected weight matrix, which is multiplied by the `inputs` to produce a `Tensor` of hidden units. If a `normalizer_fn` is provided (such as `batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is None and a `biases_initializer` is provided then a `biases` variable would be created and added the hidden units. Finally, if `activation_fn` is not `None`, it is applied to the hidden units as well. Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened prior to the initial matrix multiply by `weights`. Args: inputs: A tensor of at least rank 2 and static value for the last dimension; i.e. `[batch_size, depth]`, `[None, None, None, channels]`. num_outputs: Integer or long, the number of output units in the layer. activation_fn: Activation function. The default value is a ReLU function. Explicitly set it to None to skip it and maintain a linear activation. normalizer_fn: Normalization function to use instead of `biases`. If `normalizer_fn` is provided then `biases_initializer` and `biases_regularizer` are ignored and `biases` are not created nor added. default set to None for no normalizer function normalizer_params: Normalization function parameters. weights_initializer: An initializer for the weights. weights_regularizer: Optional regularizer for the weights. biases_initializer: An initializer for the biases. If None skip biases. biases_regularizer: Optional regularizer for the biases. reuse: Whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: Optional list of collections for all the variables or a dictionary containing a different list of collections per variable. outputs_collections: Collection to add the outputs. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional scope for variable_scope. Returns: The tensor variable representing the result of the series of operations. Raises: ValueError: If x has rank less than 2 or if its last dimension is not set. """ if not isinstance(num_outputs, six.integer_types): raise ValueError('num_outputs should be int or long, got %s.' % (num_outputs,)) layer_variable_getter = _build_variable_getter({ 'bias': 'biases', 'kernel': 'weights' }) with variable_scope.variable_scope( scope, 'fully_connected', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc: inputs = ops.convert_to_tensor(inputs) layer = core.MaskedFullyConnected( units=num_outputs, activation=None, use_bias=not normalizer_fn and biases_initializer, kernel_initializer=weights_initializer, bias_initializer=biases_initializer, kernel_regularizer=weights_regularizer, bias_regularizer=biases_regularizer, activity_regularizer=None, trainable=trainable, name=sc.name, dtype=inputs.dtype.base_dtype, _scope=sc, _reuse=reuse) outputs = layer.apply(inputs) # Add variables to collections. _add_variable_to_collections(layer.kernel, variables_collections, 'weights') if layer.bias is not None: _add_variable_to_collections(layer.bias, variables_collections, 'biases') # Apply normalizer function / layer. if normalizer_fn is not None: if not normalizer_params: normalizer_params = {} outputs = normalizer_fn(outputs, **normalizer_params) if activation_fn is not None: outputs = activation_fn(outputs) return utils.collect_named_outputs(outputs_collections, sc.original_name_scope, outputs)
apache-2.0