repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
h4r5h1t/django-hauthy
tests/sitemaps_tests/urls/http.py
13
4302
from datetime import date, datetime from django.conf.urls import url from django.conf.urls.i18n import i18n_patterns from django.contrib.sitemaps import ( FlatPageSitemap, GenericSitemap, Sitemap, views, ) from django.http import HttpResponse from django.utils import timezone from django.views.decorators.cache import cache_page from ..models import I18nTestModel, TestModel class SimpleSitemap(Sitemap): changefreq = "never" priority = 0.5 location = '/location/' lastmod = datetime.now() def items(self): return [object()] class SimpleI18nSitemap(Sitemap): changefreq = "never" priority = 0.5 i18n = True def items(self): return I18nTestModel.objects.all() class EmptySitemap(Sitemap): changefreq = "never" priority = 0.5 location = '/location/' def items(self): return [] class FixedLastmodSitemap(SimpleSitemap): lastmod = datetime(2013, 3, 13, 10, 0, 0) class FixedLastmodMixedSitemap(Sitemap): changefreq = "never" priority = 0.5 location = '/location/' loop = 0 def items(self): o1 = TestModel() o1.lastmod = datetime(2013, 3, 13, 10, 0, 0) o2 = TestModel() return [o1, o2] class DateSiteMap(SimpleSitemap): lastmod = date(2013, 3, 13) class TimezoneSiteMap(SimpleSitemap): lastmod = datetime(2013, 3, 13, 10, 0, 0, tzinfo=timezone.get_fixed_timezone(-300)) def testmodelview(request, id): return HttpResponse() simple_sitemaps = { 'simple': SimpleSitemap, } simple_i18nsitemaps = { 'simple': SimpleI18nSitemap, } empty_sitemaps = { 'empty': EmptySitemap, } fixed_lastmod_sitemaps = { 'fixed-lastmod': FixedLastmodSitemap, } fixed_lastmod__mixed_sitemaps = { 'fixed-lastmod-mixed': FixedLastmodMixedSitemap, } generic_sitemaps = { 'generic': GenericSitemap({'queryset': TestModel.objects.all()}), } flatpage_sitemaps = { 'flatpages': FlatPageSitemap, } urlpatterns = [ url(r'^simple/index\.xml$', views.index, {'sitemaps': simple_sitemaps}), url(r'^simple/custom-index\.xml$', views.index, {'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap_index.xml'}), url(r'^simple/sitemap-(?P<section>.+)\.xml$', views.sitemap, {'sitemaps': simple_sitemaps}, name='django.contrib.sitemaps.views.sitemap'), url(r'^simple/sitemap\.xml$', views.sitemap, {'sitemaps': simple_sitemaps}, name='django.contrib.sitemaps.views.sitemap'), url(r'^simple/i18n\.xml$', views.sitemap, {'sitemaps': simple_i18nsitemaps}, name='django.contrib.sitemaps.views.sitemap'), url(r'^simple/custom-sitemap\.xml$', views.sitemap, {'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap.xml'}, name='django.contrib.sitemaps.views.sitemap'), url(r'^empty/sitemap\.xml$', views.sitemap, {'sitemaps': empty_sitemaps}, name='django.contrib.sitemaps.views.sitemap'), url(r'^lastmod/sitemap\.xml$', views.sitemap, {'sitemaps': fixed_lastmod_sitemaps}, name='django.contrib.sitemaps.views.sitemap'), url(r'^lastmod-mixed/sitemap\.xml$', views.sitemap, {'sitemaps': fixed_lastmod__mixed_sitemaps}, name='django.contrib.sitemaps.views.sitemap'), url(r'^lastmod/date-sitemap.xml$', views.sitemap, {'sitemaps': {'date-sitemap': DateSiteMap}}, name='django.contrib.sitemaps.views.sitemap'), url(r'^lastmod/tz-sitemap.xml$', views.sitemap, {'sitemaps': {'tz-sitemap': TimezoneSiteMap}}, name='django.contrib.sitemaps.views.sitemap'), url(r'^generic/sitemap\.xml$', views.sitemap, {'sitemaps': generic_sitemaps}, name='django.contrib.sitemaps.views.sitemap'), url(r'^flatpages/sitemap\.xml$', views.sitemap, {'sitemaps': flatpage_sitemaps}, name='django.contrib.sitemaps.views.sitemap'), url(r'^cached/index\.xml$', cache_page(1)(views.index), {'sitemaps': simple_sitemaps, 'sitemap_url_name': 'cached_sitemap'}), url(r'^cached/sitemap-(?P<section>.+)\.xml', cache_page(1)(views.sitemap), {'sitemaps': simple_sitemaps}, name='cached_sitemap') ] urlpatterns += i18n_patterns( url(r'^i18n/testmodel/(?P<id>\d+)/$', testmodelview, name='i18n_testmodel'), )
bsd-3-clause
apporc/nova
nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
16
4167
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import objects from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs from nova import test from nova.tests.unit.scheduler import fakes @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase): def setUp(self): super(TestAggregateInstanceExtraSpecsFilter, self).setUp() self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter() def test_aggregate_filter_passes_no_extra_specs(self, agg_mock): capabilities = {'opt1': 1, 'opt2': 2} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024)) host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertFalse(agg_mock.called) def test_aggregate_filter_passes_empty_extra_specs(self, agg_mock): capabilities = {'opt1': 1, 'opt2': 2} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs={})) host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertFalse(agg_mock.called) def _do_test_aggregate_filter_extra_specs(self, especs, passes): spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=especs)) host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024}) assertion = self.assertTrue if passes else self.assertFalse assertion(self.filt_cls.host_passes(host, spec_obj)) def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock): agg_mock.return_value = {'opt1': '1', 'opt2': '2'} especs = { # Un-scoped extra spec 'opt1': '1', # Scoped extra spec that applies to this filter 'aggregate_instance_extra_specs:opt2': '2', # Scoped extra spec that does not apply to this filter 'trust:trusted_host': 'true', } self._do_test_aggregate_filter_extra_specs(especs, passes=True) def test_aggregate_filter_passes_extra_specs_simple_comma(self, agg_mock): agg_mock.return_value = {'opt1': '1,3', 'opt2': '2'} especs = { # Un-scoped extra spec 'opt1': '1', # Scoped extra spec that applies to this filter 'aggregate_instance_extra_specs:opt1': '3', # Scoped extra spec that does not apply to this filter 'trust:trusted_host': 'true', } self._do_test_aggregate_filter_extra_specs(especs, passes=True) def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock): agg_mock.return_value = {'aggregate_instance_extra_specs': '1'} especs = { # Un-scoped extra spec, make sure we don't blow up if it # happens to match our scope. 'aggregate_instance_extra_specs': '1', } self._do_test_aggregate_filter_extra_specs(especs, passes=True) def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock): agg_mock.return_value = {'opt1': '1', 'opt2': '2'} especs = { 'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true' } self._do_test_aggregate_filter_extra_specs(especs, passes=False)
apache-2.0
astuanax/django-postman
setup.py
1
1250
from setuptools import find_packages, setup setup( name='django-postman', version=__import__('postman').__version__, description='User-to-User messaging system for Django, with gateway to AnonymousUser,' \ ' moderation and thread management, user & exchange filters, inbox/sent/archives/trash folders,' \ ' support for apps: auto-complete, notification, mailer.', long_description=open('docs/index.rst').read().split('\n----\n', 1)[0], author='Patrick Samson', author_email='maxcom@laposte.net', url='http://bitbucket.org/psam/django-postman/overview', license='BSD', packages=find_packages(exclude=('docs',)), include_package_data=True, keywords='django messages messaging email moderation', zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Communications :: Email', ], install_requires=[ 'Django', ], )
bsd-3-clause
vivaxy/algorithms
python/problems/bulb_switcher_ii.py
1
2150
""" https://leetcode.com/problems/bulb-switcher-ii/ https://leetcode.com/submissions/detail/140962396/ """ class Solution1: def flipLights(self, n, m): """ :type n: int :type m: int :rtype: int """ def flipEvery(x, index): return not x def flipEven(x, index): if index % 2 == 1: return x return not x def flipOdd(x, index): if index % 2 == 1: return not x return x def flip3k1(x, index): if (index - 1) % 3 == 0: return not x return x status = set() actions = [flipEvery, flipEven, flipOdd, flip3k1] def stringifyLights(lights): st = '' for light in lights: if light: st += '1' else: st += '0' return st def flipLights(actions): lights = [True] * n for action in actions: lights = list( map(action, lights, list(range(1, len(lights) + 1)))) status.add(stringifyLights(lights)) def traverse(steps): if len(steps) == m: return flipLights(steps) for action in actions: traverse(steps + [action]) traverse([]) return len(status) class Solution: def flipLights(self, n, m): """ :type n: int :type m: int :rtype: int """ if m == 0: return 1 if n == 1: return 2 if n == 2: if m == 1: return 3 return 4 if m == 1: return 4 if m == 2: return 7 return 8 import unittest class Test(unittest.TestCase): def test(self): solution = Solution() self.assertEqual(solution.flipLights(3, 1), 4) self.assertEqual(solution.flipLights(2, 1), 3) self.assertEqual(solution.flipLights(1, 1), 2) if __name__ == '__main__': unittest.main()
mit
kevin-teddy/gajim
src/common/crypto.py
2
2816
# common crypto functions (mostly specific to XEP-0116, but useful elsewhere) # -*- coding:utf-8 -*- ## src/common/crypto.py ## ## Copyright (C) 2007 Brendan Taylor <whateley AT gmail.com> ## ## This file is part of Gajim. ## ## Gajim is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 3 only. ## ## Gajim is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Gajim. If not, see <http://www.gnu.org/licenses/>. ## import os import math from Crypto.Hash import SHA256 # convert a large integer to a big-endian bitstring def encode_mpi(n): if n >= 256: return encode_mpi(n / 256) + chr(n % 256) else: return chr(n) # convert a large integer to a big-endian bitstring, padded with \x00s to # a multiple of 16 bytes def encode_mpi_with_padding(n): return pad_to_multiple(encode_mpi(n), 16, '\x00', True) # pad 'string' to a multiple of 'multiple_of' with 'char'. # pad on the left if 'left', otherwise pad on the right. def pad_to_multiple(string, multiple_of, char, left): mod = len(string) % multiple_of if mod == 0: return string else: padding = (multiple_of - mod) * char if left: return padding + string else: return string + padding # convert a big-endian bitstring to an integer def decode_mpi(s): if len(s) == 0: return 0 else: return 256 * decode_mpi(s[:-1]) + ord(s[-1]) def sha256(string): sh = SHA256.new() sh.update(string) return sh.digest() base28_chr = "acdefghikmopqruvwxy123456789" def sas_28x5(m_a, form_b): sha = sha256(m_a + form_b + 'Short Authentication String') lsb24 = decode_mpi(sha[-3:]) return base28(lsb24) def base28(n): if n >= 28: return base28(n / 28) + base28_chr[n % 28] else: return base28_chr[n] def random_bytes(bytes_): return os.urandom(bytes_) def generate_nonce(): return random_bytes(8) # generate a random number between 'bottom' and 'top' def srand(bottom, top): # minimum number of bytes needed to represent that range bytes = int(math.ceil(math.log(top - bottom, 256))) # in retrospect, this is horribly inadequate. return (decode_mpi(random_bytes(bytes)) % (top - bottom)) + bottom # a faster version of (base ** exp) % mod # taken from <http://lists.danga.com/pipermail/yadis/2005-September/001445.html> def powmod(base, exp, mod): square = base % mod result = 1 while exp > 0: if exp & 1: # exponent is odd result = (result * square) % mod square = (square * square) % mod exp /= 2 return result # vim: se ts=3:
gpl-3.0
garethsaxby/python_koans
python3/koans/about_monkey_patching.py
13
1355
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Related to AboutOpenClasses in the Ruby Koans # from runner.koan import * class AboutMonkeyPatching(Koan): class Dog: def bark(self): return "WOOF" def test_as_defined_dogs_do_bark(self): fido = self.Dog() self.assertEqual(__, fido.bark()) # ------------------------------------------------------------------ # Add a new method to an existing class. def test_after_patching_dogs_can_both_wag_and_bark(self): def wag(self): return "HAPPY" self.Dog.wag = wag fido = self.Dog() self.assertEqual(__, fido.wag()) self.assertEqual(__, fido.bark()) # ------------------------------------------------------------------ def test_most_built_in_classes_cannot_be_monkey_patched(self): try: int.is_even = lambda self: (self % 2) == 0 except Exception as ex: err_msg = ex.args[0] self.assertRegex(err_msg, __) # ------------------------------------------------------------------ class MyInt(int): pass def test_subclasses_of_built_in_classes_can_be_be_monkey_patched(self): self.MyInt.is_even = lambda self: (self % 2) == 0 self.assertEqual(__, self.MyInt(1).is_even()) self.assertEqual(__, self.MyInt(2).is_even())
mit
snahelou/awx
awx/main/south_migrations/0065_v220_changes.py
1
59856
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'JobTemplate.become_enabled' db.add_column(u'main_jobtemplate', 'become_enabled', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) # Adding field 'Job.become_enabled' db.add_column(u'main_job', 'become_enabled', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) # Adding field 'Credential.become_method' db.add_column(u'main_credential', 'become_method', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True), keep_default=False) # Adding field 'Credential.become_username' db.add_column(u'main_credential', 'become_username', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True), keep_default=False) # Adding field 'Credential.become_password' db.add_column(u'main_credential', 'become_password', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'JobTemplate.become_enabled' db.delete_column(u'main_jobtemplate', 'become_enabled') # Deleting field 'Job.become_enabled' db.delete_column(u'main_job', 'become_enabled') # Deleting field 'Credential.become_method' db.delete_column(u'main_credential', 'become_method') # Deleting field 'Credential.become_username' db.delete_column(u'main_credential', 'become_username') # Deleting field 'Credential.become_password' db.delete_column(u'main_credential', 'become_password') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'main.activitystream': { 'Meta': {'object_name': 'ActivityStream'}, 'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_stream'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'ad_hoc_command': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.AdHocCommand']", 'symmetrical': 'False', 'blank': 'True'}), 'changes': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'credential': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Credential']", 'symmetrical': 'False', 'blank': 'True'}), 'custom_inventory_script': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.CustomInventoryScript']", 'symmetrical': 'False', 'blank': 'True'}), 'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'host': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Host']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inventory': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Inventory']", 'symmetrical': 'False', 'blank': 'True'}), 'inventory_source': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.InventorySource']", 'symmetrical': 'False', 'blank': 'True'}), 'inventory_update': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.InventoryUpdate']", 'symmetrical': 'False', 'blank': 'True'}), 'job': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Job']", 'symmetrical': 'False', 'blank': 'True'}), 'job_template': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.JobTemplate']", 'symmetrical': 'False', 'blank': 'True'}), 'object1': ('django.db.models.fields.TextField', [], {}), 'object2': ('django.db.models.fields.TextField', [], {}), 'object_relationship_type': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'operation': ('django.db.models.fields.CharField', [], {'max_length': '13'}), 'organization': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Organization']", 'symmetrical': 'False', 'blank': 'True'}), 'permission': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Project']", 'symmetrical': 'False', 'blank': 'True'}), 'project_update': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.ProjectUpdate']", 'symmetrical': 'False', 'blank': 'True'}), 'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Schedule']", 'symmetrical': 'False', 'blank': 'True'}), 'team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Team']", 'symmetrical': 'False', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'unified_job': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'activity_stream_as_unified_job+'", 'blank': 'True', 'to': "orm['main.UnifiedJob']"}), 'unified_job_template': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'activity_stream_as_unified_job_template+'", 'blank': 'True', 'to': "orm['main.UnifiedJobTemplate']"}), 'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}) }, 'main.adhoccommand': { 'Meta': {'object_name': 'AdHocCommand', '_ormbases': ['main.UnifiedJob']}, 'credential': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'ad_hoc_commands'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Credential']"}), 'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}), 'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'ad_hoc_commands'", 'symmetrical': 'False', 'through': "orm['main.AdHocCommandEvent']", 'to': "orm['main.Host']"}), 'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ad_hoc_commands'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}), 'job_type': ('django.db.models.fields.CharField', [], {'default': "'run'", 'max_length': '64'}), 'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'module_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'default': "'command'", 'max_length': '1024'}), 'privilege_escalation': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}), u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'}), 'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}) }, 'main.adhoccommandevent': { 'Meta': {'ordering': "('-pk',)", 'unique_together': "[('ad_hoc_command', 'host_name')]", 'object_name': 'AdHocCommandEvent'}, 'ad_hoc_command': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ad_hoc_command_events'", 'to': "orm['main.AdHocCommand']"}), 'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'counter': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'event': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'event_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), 'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'host': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'ad_hoc_command_events'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Host']"}), 'host_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}) }, 'main.authtoken': { 'Meta': {'object_name': 'AuthToken'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'request_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_tokens'", 'to': u"orm['auth.User']"}) }, 'main.credential': { 'Meta': {'ordering': "('kind', 'name')", 'unique_together': "[('user', 'team', 'kind', 'name')]", 'object_name': 'Credential'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'become_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), 'become_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'become_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'cloud': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'host': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kind': ('django.db.models.fields.CharField', [], {'default': "'ssh'", 'max_length': '32'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'project': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'ssh_key_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'ssh_key_unlock': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'su_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'su_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'sudo_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'sudo_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'credentials'", 'null': 'True', 'blank': 'True', 'to': "orm['main.Team']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'credentials'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}), 'username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'vault_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}) }, 'main.custominventoryscript': { 'Meta': {'ordering': "('name',)", 'unique_together': "[('name', 'organization')]", 'object_name': 'CustomInventoryScript'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'custominventoryscript\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'custominventoryscript\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custom_inventory_scripts'", 'to': "orm['main.Organization']"}), 'script': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}) }, 'main.group': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('name', 'inventory'),)", 'object_name': 'Group'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'groups_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups'", 'blank': 'True', 'to': "orm['main.Host']"}), 'hosts_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['main.Inventory']"}), 'inventory_sources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['main.InventorySource']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'parents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'children'", 'blank': 'True', 'to': "orm['main.Group']"}), 'total_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'total_hosts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}) }, 'main.host': { 'Meta': {'ordering': "('inventory', 'name')", 'unique_together': "(('name', 'inventory'),)", 'object_name': 'Host'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instance_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts'", 'to': "orm['main.Inventory']"}), 'inventory_sources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'hosts'", 'symmetrical': 'False', 'to': "orm['main.InventorySource']"}), 'last_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'hosts_as_last_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Job']"}), 'last_job_host_summary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts_as_last_job_summary+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobHostSummary']", 'blank': 'True', 'null': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}) }, 'main.instance': { 'Meta': {'object_name': 'Instance'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}) }, 'main.inventory': { 'Meta': {'ordering': "('name',)", 'unique_together': "[('name', 'organization')]", 'object_name': 'Inventory'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'groups_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hosts_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inventory_sources_with_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventories'", 'to': "orm['main.Organization']"}), 'total_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'total_hosts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'total_inventory_sources': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}) }, 'main.inventorysource': { 'Meta': {'object_name': 'InventorySource', '_ormbases': ['main.UnifiedJobTemplate']}, 'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventorysources'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}), 'group': ('awx.main.fields.AutoOneToOneField', [], {'default': 'None', 'related_name': "'inventory_source'", 'unique': 'True', 'null': 'True', 'to': "orm['main.Group']"}), 'group_by': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'instance_filters': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'inventory': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'inventory_sources'", 'null': 'True', 'to': "orm['main.Inventory']"}), 'overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'overwrite_vars': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'source': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), 'source_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'source_regions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'source_script': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['main.CustomInventoryScript']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'source_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'}), 'update_cache_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'update_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'main.inventoryupdate': { 'Meta': {'object_name': 'InventoryUpdate', '_ormbases': ['main.UnifiedJob']}, 'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventoryupdates'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}), 'group_by': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'instance_filters': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'inventory_source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_updates'", 'to': "orm['main.InventorySource']"}), 'license_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'overwrite_vars': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'source': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), 'source_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'source_regions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'source_script': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['main.CustomInventoryScript']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'source_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'}) }, 'main.job': { 'Meta': {'ordering': "('id',)", 'object_name': 'Job', '_ormbases': ['main.UnifiedJob']}, 'become_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'cloud_credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs_as_cloud_credential+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}), 'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}), 'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'force_handlers': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}), 'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'jobs'", 'symmetrical': 'False', 'through': "orm['main.JobHostSummary']", 'to': "orm['main.Host']"}), 'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}), 'job_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'job_template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobTemplate']", 'blank': 'True', 'null': 'True'}), 'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'playbook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Project']", 'blank': 'True', 'null': 'True'}), 'skip_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'start_at_task': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'}), 'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}) }, 'main.jobevent': { 'Meta': {'ordering': "('pk',)", 'object_name': 'JobEvent'}, 'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'counter': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'event': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'event_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), 'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'host': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'job_events_as_primary_host'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Host']"}), 'host_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'job_events'", 'symmetrical': 'False', 'to': "orm['main.Host']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_events'", 'to': "orm['main.Job']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.JobEvent']"}), 'play': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'role': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'task': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}) }, 'main.jobhostsummary': { 'Meta': {'ordering': "('-pk',)", 'unique_together': "[('job', 'host_name')]", 'object_name': 'JobHostSummary'}, 'changed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'dark': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'host': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'job_host_summaries'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Host']"}), 'host_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_host_summaries'", 'to': "orm['main.Job']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'ok': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'skipped': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'main.joborigin': { 'Meta': {'object_name': 'JobOrigin'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Instance']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'unified_job': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'job_origin'", 'unique': 'True', 'to': "orm['main.UnifiedJob']"}) }, 'main.jobtemplate': { 'Meta': {'ordering': "('name',)", 'object_name': 'JobTemplate', '_ormbases': ['main.UnifiedJobTemplate']}, 'ask_variables_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'become_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'cloud_credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplates_as_cloud_credential+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}), 'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplates'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}), 'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'force_handlers': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}), 'host_config_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplates'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}), 'job_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'playbook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplates'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Project']", 'blank': 'True', 'null': 'True'}), 'skip_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'start_at_task': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'survey_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'survey_spec': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'}), 'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}) }, 'main.organization': { 'Meta': {'ordering': "('name',)", 'object_name': 'Organization'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_of_organizations'", 'blank': 'True', 'to': u"orm['auth.User']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['main.Project']"}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': u"orm['auth.User']"}) }, 'main.permission': { 'Meta': {'object_name': 'Permission'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'permission_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Project']"}), 'run_ad_hoc_commands': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Team']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}) }, 'main.profile': { 'Meta': {'object_name': 'Profile'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ldap_dn': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'user': ('awx.main.fields.AutoOneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}) }, 'main.project': { 'Meta': {'ordering': "('id',)", 'object_name': 'Project', '_ormbases': ['main.UnifiedJobTemplate']}, 'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}), 'local_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'scm_branch': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}), 'scm_clean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'scm_delete_on_next_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'scm_delete_on_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'scm_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'blank': 'True'}), 'scm_update_cache_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'scm_update_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'scm_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'}) }, 'main.projectupdate': { 'Meta': {'object_name': 'ProjectUpdate', '_ormbases': ['main.UnifiedJob']}, 'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projectupdates'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}), 'local_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_updates'", 'to': "orm['main.Project']"}), 'scm_branch': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}), 'scm_clean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'scm_delete_on_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'scm_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'blank': 'True'}), 'scm_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'}) }, 'main.schedule': { 'Meta': {'ordering': "['-next_run']", 'object_name': 'Schedule'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'schedule\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'dtend': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'dtstart': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'extra_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'schedule\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}), 'next_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'rrule': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'unified_job_template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'schedules'", 'to': "orm['main.UnifiedJobTemplate']"}) }, 'main.systemjob': { 'Meta': {'ordering': "('id',)", 'object_name': 'SystemJob', '_ormbases': ['main.UnifiedJob']}, 'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'job_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), 'system_job_template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.SystemJobTemplate']", 'blank': 'True', 'null': 'True'}), u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'}) }, 'main.systemjobtemplate': { 'Meta': {'object_name': 'SystemJobTemplate', '_ormbases': ['main.UnifiedJobTemplate']}, 'job_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'}) }, 'main.team': { 'Meta': {'ordering': "('organization__name', 'name')", 'unique_together': "[('organization', 'name')]", 'object_name': 'Team'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'teams'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Organization']"}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': "orm['main.Project']"}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': u"orm['auth.User']"}) }, 'main.unifiedjob': { 'Meta': {'object_name': 'UnifiedJob'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'cancel_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'celery_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjob\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'dependent_jobs': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'dependent_jobs_rel_+'", 'to': "orm['main.UnifiedJob']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'elapsed': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '3'}), 'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'finished': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'job_cwd': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}), 'job_env': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), 'job_explanation': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'launch_type': ('django.db.models.fields.CharField', [], {'default': "'manual'", 'max_length': '20'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjob\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'old_pk': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True'}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_main.unifiedjob_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'result_stdout_file': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'result_stdout_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'result_traceback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'schedule': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['main.Schedule']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'start_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'started': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'}), 'unified_job_template': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjob_unified_jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJobTemplate']"}) }, 'main.unifiedjobtemplate': { 'Meta': {'unique_together': "[('polymorphic_ctype', 'name')]", 'object_name': 'UnifiedJobTemplate'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjobtemplate\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'current_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_current_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJob']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'has_schedules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_last_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJob']"}), 'last_job_failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_job_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjobtemplate\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'next_job_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'next_schedule': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_next_schedule+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Schedule']"}), 'old_pk': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True'}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_main.unifiedjobtemplate_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'ok'", 'max_length': '32'}) } } complete_apps = ['main']
apache-2.0
GuessWhoSamFoo/pandas
pandas/tests/util/test_validate_kwargs.py
2
2040
# -*- coding: utf-8 -*- from collections import OrderedDict import pytest from pandas.util._validators import validate_bool_kwarg, validate_kwargs _fname = "func" def test_bad_kwarg(): good_arg = "f" bad_arg = good_arg + "o" compat_args = OrderedDict() compat_args[good_arg] = "foo" compat_args[bad_arg + "o"] = "bar" kwargs = {good_arg: "foo", bad_arg: "bar"} msg = (r"{fname}\(\) got an unexpected " r"keyword argument '{arg}'".format(fname=_fname, arg=bad_arg)) with pytest.raises(TypeError, match=msg): validate_kwargs(_fname, kwargs, compat_args) @pytest.mark.parametrize("i", range(1, 3)) def test_not_all_none(i): bad_arg = "foo" msg = (r"the '{arg}' parameter is not supported " r"in the pandas implementation of {func}\(\)". format(arg=bad_arg, func=_fname)) compat_args = OrderedDict() compat_args["foo"] = 1 compat_args["bar"] = "s" compat_args["baz"] = None kwarg_keys = ("foo", "bar", "baz") kwarg_vals = (2, "s", None) kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i])) with pytest.raises(ValueError, match=msg): validate_kwargs(_fname, kwargs, compat_args) def test_validation(): # No exceptions should be raised. compat_args = OrderedDict() compat_args["f"] = None compat_args["b"] = 1 compat_args["ba"] = "s" kwargs = dict(f=None, b=1) validate_kwargs(_fname, kwargs, compat_args) @pytest.mark.parametrize("name", ["inplace", "copy"]) @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) def test_validate_bool_kwarg_fail(name, value): msg = ("For argument \"%s\" expected type bool, received type %s" % (name, type(value).__name__)) with pytest.raises(ValueError, match=msg): validate_bool_kwarg(value, name) @pytest.mark.parametrize("name", ["inplace", "copy"]) @pytest.mark.parametrize("value", [True, False, None]) def test_validate_bool_kwarg(name, value): assert validate_bool_kwarg(value, name) == value
bsd-3-clause
edx/edx-platform
openedx/core/djangoapps/catalog/management/commands/tests/test_create_catalog_integrations.py
4
3596
""" Test cases for catalog_integrations command. """ import pytest from django.core.management import call_command, CommandError from openedx.core.djangolib.testing.utils import CacheIsolationTestCase from openedx.core.djangoapps.catalog.models import CatalogIntegration from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin class TestCreateCatalogIntegrations(CatalogIntegrationMixin, CacheIsolationTestCase): """ Test the create_catalog_integrations command """ def test_without_required(self): ''' Test that required values are supplied ''' # test without service_username with pytest.raises(CommandError): call_command( "create_catalog_integrations", "--internal_api_url", self.catalog_integration_defaults['internal_api_url'], ) # test without internal_api_url with pytest.raises(CommandError): call_command( "create_catalog_integrations", "--service_username", self.catalog_integration_defaults['service_username'], ) def test_with_required(self): ''' Test with required arguments supplied''' initial = CatalogIntegration.current() # test with both required args call_command( "create_catalog_integrations", "--internal_api_url", self.catalog_integration_defaults['internal_api_url'], "--service_username", self.catalog_integration_defaults['service_username'] ) current = CatalogIntegration.current() # assert current has changed assert initial != current assert current.enabled is False assert current.internal_api_url == self.catalog_integration_defaults['internal_api_url'] assert current.service_username == self.catalog_integration_defaults['service_username'] def test_with_optional(self): ''' Test with optionals arguments supplied''' initial = CatalogIntegration.current() # test --enabled call_command( "create_catalog_integrations", "--internal_api_url", self.catalog_integration_defaults['internal_api_url'], "--service_username", self.catalog_integration_defaults['service_username'], "--enabled" ) current = CatalogIntegration.current() # assert current has changed assert initial != current assert current.enabled is True assert current.internal_api_url == self.catalog_integration_defaults['internal_api_url'] assert current.service_username == self.catalog_integration_defaults['service_username'] # test with all args call_command( "create_catalog_integrations", "--internal_api_url", self.catalog_integration_defaults['internal_api_url'], "--service_username", self.catalog_integration_defaults['service_username'], "--enabled", "--cache_ttl", 500, "--long_term_cache_ttl", 500, "--page_size", 500 ) current = CatalogIntegration.current() # assert current has changed assert initial != current assert current.enabled is True assert current.internal_api_url == self.catalog_integration_defaults['internal_api_url'] assert current.service_username == self.catalog_integration_defaults['service_username'] assert current.cache_ttl == 500 assert current.long_term_cache_ttl == 500 assert current.page_size == 500
agpl-3.0
dol-sen/gentoolkit
pym/gentoolkit/eshowkw/keywords_header.py
2
3622
# vim:fileencoding=utf-8 # Copyright 2001-2010 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ['keywords_header'] from portage import settings as ports from portage.output import colorize from gentoolkit.eshowkw.display_pretty import colorize_string from gentoolkit.eshowkw.display_pretty import align_string class keywords_header: __IMPARCHS = [ 'arm', 'amd64', 'x86' ] __ADDITIONAL_FIELDS = [ 'unused', 'slot' ] __EXTRA_FIELDS = [ 'repo' ] @staticmethod def __readKeywords(): """Read all available keywords from portage.""" return [x for x in ports.archlist() if not x.startswith('~')] @staticmethod def __sortKeywords(keywords, prefix = False, required_keywords = []): """Sort keywords with short archs first""" # user specified only some keywords to display if len(required_keywords) != 0: tmpkeywords = [k for k in keywords if k in required_keywords] # idiots might specify non-existant archs if len(tmpkeywords) != 0: keywords = tmpkeywords normal = [k for k in keywords if len(k.split('-')) == 1] normal.sort() if prefix: longer = [k for k in keywords if len(k.split('-')) != 1] longer.sort() normal.extend(longer) return normal def __readAdditionalFields(self): """Prepare list of aditional fileds displayed by eshowkw (2nd part)""" return self.__ADDITIONAL_FIELDS def __readExtraFields(self): """Prepare list of extra fileds displayed by eshowkw (3rd part)""" return self.__EXTRA_FIELDS def __formatKeywords(self, keywords, align, length): """Append colors and align keywords properly""" tmp = [] for keyword in keywords: tmp2 = keyword keyword = align_string(keyword, align, length) # % are used as separators for further split so we wont loose spaces and coloring keyword = '%'.join(list(keyword)) if tmp2 in self.__IMPARCHS: tmp.append(colorize_string('darkyellow', keyword)) else: tmp.append(keyword) return tmp @staticmethod def __formatAdditional(additional, align, length): """Align additional items properly""" # % are used as separators for further split so we wont loose spaces and coloring return ['%'.join(align_string(x, align, length)) for x in additional] def __prepareExtra(self, extra, align, length): content = [] content.append(''.ljust(length, '-')) content.extend(self.__formatAdditional(extra, align, length)) return content def __prepareResult(self, keywords, additional, align, length): """Parse keywords and additional fields into one list with proper separators""" content = [] content.append(''.ljust(length, '-')) content.extend(self.__formatKeywords(keywords, align, length)) content.append(''.ljust(length, '-')) content.extend(self.__formatAdditional(additional, align, length)) return content def __init__(self, prefix = False, required_keywords = [], keywords_align = 'bottom'): """Initialize keywords header.""" additional = self.__readAdditionalFields() extra = self.__readExtraFields() self.keywords = self.__sortKeywords(self.__readKeywords(), prefix, required_keywords) self.length = max( max([len(x) for x in self.keywords]), max([len(x) for x in additional]), max([len(x) for x in extra]) ) #len(max([max(self.keywords, key=len), max(additional, key=len)], key=len)) self.keywords_count = len(self.keywords) self.additional_count = len(additional) self.extra_count = len(extra) self.content = self.__prepareResult(self.keywords, additional, keywords_align, self.length) self.extra = self.__prepareExtra(extra, keywords_align, self.length)
gpl-2.0
ninjablocks/kernel-VAR-SOM-AMxx
tools/perf/scripts/python/syscall-counts.py
11181
1522
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
quoclieu/codebrew17-starving
env/lib/python3.5/site-packages/Crypto/Cipher/_mode_ccm.py
4
22663
# =================================================================== # # Copyright (c) 2014, Legrandin <helderijs@gmail.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # =================================================================== """ Counter with CBC-MAC (CCM) mode. """ __all__ = ['CcmMode'] from Crypto.Util.py3compat import byte_string, b, bchr, bord, unhexlify from Crypto.Util.strxor import strxor from Crypto.Util.number import long_to_bytes from Crypto.Hash import BLAKE2s from Crypto.Random import get_random_bytes def enum(**enums): return type('Enum', (), enums) MacStatus = enum(NOT_STARTED=0, PROCESSING_AUTH_DATA=1, PROCESSING_PLAINTEXT=2) class CcmMode(object): """Counter with CBC-MAC (CCM). This is an Authenticated Encryption with Associated Data (`AEAD`_) mode. It provides both confidentiality and authenticity. The header of the message may be left in the clear, if needed, and it will still be subject to authentication. The decryption step tells the receiver if the message comes from a source that really knowns the secret key. Additionally, decryption detects if any part of the message - including the header - has been modified or corrupted. This mode requires a nonce. The nonce shall never repeat for two different messages encrypted with the same key, but it does not need to be random. Note that there is a trade-off between the size of the nonce and the maximum size of a single message you can encrypt. It is important to use a large nonce if the key is reused across several messages and the nonce is chosen randomly. It is acceptable to us a short nonce if the key is only used a few times or if the nonce is taken from a counter. The following table shows the trade-off when the nonce is chosen at random. The column on the left shows how many messages it takes for the keystream to repeat **on average**. In practice, you will want to stop using the key way before that. +--------------------+---------------+-------------------+ | Avg. # of messages | nonce | Max. message | | before keystream | size | size | | repeats | (bytes) | (bytes) | +====================+===============+===================+ | 2^52 | 13 | 64K | +--------------------+---------------+-------------------+ | 2^48 | 12 | 16M | +--------------------+---------------+-------------------+ | 2^44 | 11 | 4G | +--------------------+---------------+-------------------+ | 2^40 | 10 | 1T | +--------------------+---------------+-------------------+ | 2^36 | 9 | 64P | +--------------------+---------------+-------------------+ | 2^32 | 8 | 16E | +--------------------+---------------+-------------------+ This mode is only available for ciphers that operate on 128 bits blocks (e.g. AES but not TDES). See `NIST SP800-38C`_ or RFC3610_. .. _`NIST SP800-38C`: http://csrc.nist.gov/publications/nistpubs/800-38C/SP800-38C.pdf .. _RFC3610: https://tools.ietf.org/html/rfc3610 .. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html :undocumented: __init__ """ def __init__(self, factory, key, nonce, mac_len, msg_len, assoc_len, cipher_params): self.block_size = factory.block_size """The block size of the underlying cipher, in bytes.""" self.nonce = nonce """The nonce used for this cipher instance""" self._factory = factory self._key = key self._mac_len = mac_len self._msg_len = msg_len self._assoc_len = assoc_len self._cipher_params = cipher_params self._mac_tag = None # Cache for MAC tag if self.block_size != 16: raise ValueError("CCM mode is only available for ciphers" " that operate on 128 bits blocks") # MAC tag length (Tlen) if mac_len not in (4, 6, 8, 10, 12, 14, 16): raise ValueError("Parameter 'mac_len' must be even" " and in the range 4..16 (not %d)" % mac_len) # Nonce value if not (nonce and 7 <= len(nonce) <= 13): raise ValueError("Length of parameter 'nonce' must be" " in the range 7..13 bytes") # Create MAC object (the tag will be the last block # bytes worth of ciphertext) self._mac = self._factory.new(key, factory.MODE_CBC, iv=bchr(0) * 16, **cipher_params) self._mac_status = MacStatus.NOT_STARTED self._t = None # Allowed transitions after initialization self._next = [self.update, self.encrypt, self.decrypt, self.digest, self.verify] # Cumulative lengths self._cumul_assoc_len = 0 self._cumul_msg_len = 0 # Cache for unaligned associated data/plaintext. # This is a list, but when the MAC starts, it will become a binary # string no longer than the block size. self._cache = [] # Start CTR cipher, by formatting the counter (A.3) q = 15 - len(nonce) # length of Q, the encoded message length self._cipher = self._factory.new(key, self._factory.MODE_CTR, nonce=bchr(q - 1) + nonce, **cipher_params) # S_0, step 6 in 6.1 for j=0 self._s_0 = self._cipher.encrypt(bchr(0) * 16) # Try to start the MAC if None not in (assoc_len, msg_len): self._start_mac() def _start_mac(self): assert(self._mac_status == MacStatus.NOT_STARTED) assert(None not in (self._assoc_len, self._msg_len)) assert(isinstance(self._cache, list)) # Formatting control information and nonce (A.2.1) q = 15 - len(self.nonce) # length of Q, the encoded message length flags = (64 * (self._assoc_len > 0) + 8 * ((self._mac_len - 2) // 2) + (q - 1)) b_0 = bchr(flags) + self.nonce + long_to_bytes(self._msg_len, q) # Formatting associated data (A.2.2) # Encoded 'a' is concatenated with the associated data 'A' assoc_len_encoded = b('') if self._assoc_len > 0: if self._assoc_len < (2 ** 16 - 2 ** 8): enc_size = 2 elif self._assoc_len < (2 ** 32): assoc_len_encoded = b('\xFF\xFE') enc_size = 4 else: assoc_len_encoded = b('\xFF\xFF') enc_size = 8 assoc_len_encoded += long_to_bytes(self._assoc_len, enc_size) # b_0 and assoc_len_encoded must be processed first self._cache.insert(0, b_0) self._cache.insert(1, assoc_len_encoded) # Process all the data cached so far first_data_to_mac = b("").join(self._cache) self._cache = b("") self._mac_status = MacStatus.PROCESSING_AUTH_DATA self._update(first_data_to_mac) def _pad_cache_and_update(self): assert(self._mac_status != MacStatus.NOT_STARTED) assert(byte_string(self._cache)) assert(len(self._cache) < self.block_size) # Associated data is concatenated with the least number # of zero bytes (possibly none) to reach alignment to # the 16 byte boundary (A.2.3) len_cache = len(self._cache) if len_cache > 0: self._update(bchr(0) * (self.block_size - len_cache)) def update(self, assoc_data): """Protect associated data If there is any associated data, the caller has to invoke this function one or more times, before using ``decrypt`` or ``encrypt``. By *associated data* it is meant any data (e.g. packet headers) that will not be encrypted and will be transmitted in the clear. However, the receiver is still able to detect any modification to it. In CCM, the *associated data* is also called *additional authenticated data* (AAD). If there is no associated data, this method must not be called. The caller may split associated data in segments of any size, and invoke this method multiple times, each time with the next segment. :Parameters: assoc_data : byte string A piece of associated data. There are no restrictions on its size. """ if self.update not in self._next: raise TypeError("update() can only be called" " immediately after initialization") self._next = [self.update, self.encrypt, self.decrypt, self.digest, self.verify] self._cumul_assoc_len += len(assoc_data) if self._assoc_len is not None and \ self._cumul_assoc_len > self._assoc_len: raise ValueError("Associated data is too long") self._update(assoc_data) return self def _update(self, assoc_data_pt=b("")): """Update the MAC with associated data or plaintext (without FSM checks)""" if self._mac_status == MacStatus.NOT_STARTED: self._cache.append(assoc_data_pt) return assert(byte_string(self._cache)) assert(len(self._cache) < self.block_size) if len(self._cache) > 0: filler = min(self.block_size - len(self._cache), len(assoc_data_pt)) self._cache += assoc_data_pt[:filler] assoc_data_pt = assoc_data_pt[filler:] if len(self._cache) < self.block_size: return # The cache is exactly one block self._t = self._mac.encrypt(self._cache) self._cache = b("") update_len = len(assoc_data_pt) // self.block_size * self.block_size self._cache = assoc_data_pt[update_len:] if update_len > 0: self._t = self._mac.encrypt(assoc_data_pt[:update_len])[-16:] def encrypt(self, plaintext): """Encrypt data with the key set at initialization. A cipher object is stateful: once you have encrypted a message you cannot encrypt (or decrypt) another message using the same object. This method can be called only **once** if ``msg_len`` was not passed at initialization. If ``msg_len`` was given, the data to encrypt can be broken up in two or more pieces and `encrypt` can be called multiple times. That is, the statement: >>> c.encrypt(a) + c.encrypt(b) is equivalent to: >>> c.encrypt(a+b) This function does not add any padding to the plaintext. :Parameters: plaintext : byte string The piece of data to encrypt. It can be of any length. :Return: the encrypted data, as a byte string. It is as long as *plaintext*. """ if self.encrypt not in self._next: raise TypeError("encrypt() can only be called after" " initialization or an update()") self._next = [self.encrypt, self.digest] # No more associated data allowed from now if self._assoc_len is None: assert(isinstance(self._cache, list)) self._assoc_len = sum([len(x) for x in self._cache]) if self._msg_len is not None: self._start_mac() else: if self._cumul_assoc_len < self._assoc_len: raise ValueError("Associated data is too short") # Only once piece of plaintext accepted if message length was # not declared in advance if self._msg_len is None: self._msg_len = len(plaintext) self._start_mac() self._next = [self.digest] self._cumul_msg_len += len(plaintext) if self._cumul_msg_len > self._msg_len: raise ValueError("Message is too long") if self._mac_status == MacStatus.PROCESSING_AUTH_DATA: # Associated data is concatenated with the least number # of zero bytes (possibly none) to reach alignment to # the 16 byte boundary (A.2.3) self._pad_cache_and_update() self._mac_status = MacStatus.PROCESSING_PLAINTEXT self._update(plaintext) return self._cipher.encrypt(plaintext) def decrypt(self, ciphertext): """Decrypt data with the key set at initialization. A cipher object is stateful: once you have decrypted a message you cannot decrypt (or encrypt) another message with the same object. This method can be called only **once** if ``msg_len`` was not passed at initialization. If ``msg_len`` was given, the data to decrypt can be broken up in two or more pieces and `decrypt` can be called multiple times. That is, the statement: >>> c.decrypt(a) + c.decrypt(b) is equivalent to: >>> c.decrypt(a+b) This function does not remove any padding from the plaintext. :Parameters: ciphertext : byte string The piece of data to decrypt. It can be of any length. :Return: the decrypted data (byte string). """ if self.decrypt not in self._next: raise TypeError("decrypt() can only be called" " after initialization or an update()") self._next = [self.decrypt, self.verify] # No more associated data allowed from now if self._assoc_len is None: assert(isinstance(self._cache, list)) self._assoc_len = sum([len(x) for x in self._cache]) if self._msg_len is not None: self._start_mac() else: if self._cumul_assoc_len < self._assoc_len: raise ValueError("Associated data is too short") # Only once piece of ciphertext accepted if message length was # not declared in advance if self._msg_len is None: self._msg_len = len(ciphertext) self._start_mac() self._next = [self.verify] self._cumul_msg_len += len(ciphertext) if self._cumul_msg_len > self._msg_len: raise ValueError("Message is too long") if self._mac_status == MacStatus.PROCESSING_AUTH_DATA: # Associated data is concatenated with the least number # of zero bytes (possibly none) to reach alignment to # the 16 byte boundary (A.2.3) self._pad_cache_and_update() self._mac_status = MacStatus.PROCESSING_PLAINTEXT # Encrypt is equivalent to decrypt with the CTR mode plaintext = self._cipher.encrypt(ciphertext) self._update(plaintext) return plaintext def digest(self): """Compute the *binary* MAC tag. The caller invokes this function at the very end. This method returns the MAC that shall be sent to the receiver, together with the ciphertext. :Return: the MAC, as a byte string. """ if self.digest not in self._next: raise TypeError("digest() cannot be called when decrypting" " or validating a message") self._next = [self.digest] return self._digest() def _digest(self): if self._mac_tag: return self._mac_tag if self._assoc_len is None: assert(isinstance(self._cache, list)) self._assoc_len = sum([len(x) for x in self._cache]) if self._msg_len is not None: self._start_mac() else: if self._cumul_assoc_len < self._assoc_len: raise ValueError("Associated data is too short") if self._msg_len is None: self._msg_len = 0 self._start_mac() if self._cumul_msg_len != self._msg_len: raise ValueError("Message is too short") # Both associated data and payload are concatenated with the least # number of zero bytes (possibly none) that align it to the # 16 byte boundary (A.2.2 and A.2.3) self._pad_cache_and_update() # Step 8 in 6.1 (T xor MSB_Tlen(S_0)) self._mac_tag = strxor(self._t, self._s_0)[:self._mac_len] return self._mac_tag def hexdigest(self): """Compute the *printable* MAC tag. This method is like `digest`. :Return: the MAC, as a hexadecimal string. """ return "".join(["%02x" % bord(x) for x in self.digest()]) def verify(self, received_mac_tag): """Validate the *binary* MAC tag. The caller invokes this function at the very end. This method checks if the decrypted message is indeed valid (that is, if the key is correct) and it has not been tampered with while in transit. :Parameters: received_mac_tag : byte string This is the *binary* MAC, as received from the sender. :Raises ValueError: if the MAC does not match. The message has been tampered with or the key is incorrect. """ if self.verify not in self._next: raise TypeError("verify() cannot be called" " when encrypting a message") self._next = [self.verify] self._digest() secret = get_random_bytes(16) mac1 = BLAKE2s.new(digest_bits=160, key=secret, data=self._mac_tag) mac2 = BLAKE2s.new(digest_bits=160, key=secret, data=received_mac_tag) if mac1.digest() != mac2.digest(): raise ValueError("MAC check failed") def hexverify(self, hex_mac_tag): """Validate the *printable* MAC tag. This method is like `verify`. :Parameters: hex_mac_tag : string This is the *printable* MAC, as received from the sender. :Raises ValueError: if the MAC does not match. The message has been tampered with or the key is incorrect. """ self.verify(unhexlify(hex_mac_tag)) def encrypt_and_digest(self, plaintext): """Perform encrypt() and digest() in one step. :Parameters: plaintext : byte string The piece of data to encrypt. :Return: a tuple with two byte strings: - the encrypted data - the MAC """ return self.encrypt(plaintext), self.digest() def decrypt_and_verify(self, ciphertext, received_mac_tag): """Perform decrypt() and verify() in one step. :Parameters: ciphertext : byte string The piece of data to decrypt. received_mac_tag : byte string This is the *binary* MAC, as received from the sender. :Return: the decrypted data (byte string). :Raises ValueError: if the MAC does not match. The message has been tampered with or the key is incorrect. """ plaintext = self.decrypt(ciphertext) self.verify(received_mac_tag) return plaintext def _create_ccm_cipher(factory, **kwargs): """Create a new block cipher, configured in CCM mode. :Parameters: factory : module A symmetric cipher module from `Crypto.Cipher` (like `Crypto.Cipher.AES`). :Keywords: key : byte string The secret key to use in the symmetric cipher. nonce : byte string A value that must never be reused for any other encryption. Its length must be in the range ``[7..13]``. 11 or 12 bytes are reasonable values in general. Bear in mind that with CCM there is a trade-off between nonce length and maximum message size. If not specified, a 11 byte long random string is used. mac_len : integer Length of the MAC, in bytes. It must be even and in the range ``[4..16]``. The default is 16. msg_len : integer Length of the message to (de)cipher. If not specified, ``encrypt`` or ``decrypt`` may only be called once. assoc_len : integer Length of the associated data. If not specified, all data is internally buffered. """ try: key = key = kwargs.pop("key") except KeyError as e: raise TypeError("Missing parameter: " + str(e)) nonce = kwargs.pop("nonce", None) # N if nonce is None: nonce = get_random_bytes(11) mac_len = kwargs.pop("mac_len", factory.block_size) msg_len = kwargs.pop("msg_len", None) # p assoc_len = kwargs.pop("assoc_len", None) # a cipher_params = dict(kwargs) return CcmMode(factory, key, nonce, mac_len, msg_len, assoc_len, cipher_params)
mit
FFMG/myoddweb.piger
monitor/api/python/Python-3.7.2/Lib/pipes.py
172
8916
"""Conversion pipeline templates. The problem: ------------ Suppose you have some data that you want to convert to another format, such as from GIF image format to PPM image format. Maybe the conversion involves several steps (e.g. piping it through compress or uuencode). Some of the conversion steps may require that their input is a disk file, others may be able to read standard input; similar for their output. The input to the entire conversion may also be read from a disk file or from an open file, and similar for its output. The module lets you construct a pipeline template by sticking one or more conversion steps together. It will take care of creating and removing temporary files if they are necessary to hold intermediate data. You can then use the template to do conversions from many different sources to many different destinations. The temporary file names used are different each time the template is used. The templates are objects so you can create templates for many different conversion steps and store them in a dictionary, for instance. Directions: ----------- To create a template: t = Template() To add a conversion step to a template: t.append(command, kind) where kind is a string of two characters: the first is '-' if the command reads its standard input or 'f' if it requires a file; the second likewise for the output. The command must be valid /bin/sh syntax. If input or output files are required, they are passed as $IN and $OUT; otherwise, it must be possible to use the command in a pipeline. To add a conversion step at the beginning: t.prepend(command, kind) To convert a file to another file using a template: sts = t.copy(infile, outfile) If infile or outfile are the empty string, standard input is read or standard output is written, respectively. The return value is the exit status of the conversion pipeline. To open a file for reading or writing through a conversion pipeline: fp = t.open(file, mode) where mode is 'r' to read the file, or 'w' to write it -- just like for the built-in function open() or for os.popen(). To create a new template object initialized to a given one: t2 = t.clone() """ # ' import re import os import tempfile # we import the quote function rather than the module for backward compat # (quote used to be an undocumented but used function in pipes) from shlex import quote __all__ = ["Template"] # Conversion step kinds FILEIN_FILEOUT = 'ff' # Must read & write real files STDIN_FILEOUT = '-f' # Must write a real file FILEIN_STDOUT = 'f-' # Must read a real file STDIN_STDOUT = '--' # Normal pipeline element SOURCE = '.-' # Must be first, writes stdout SINK = '-.' # Must be last, reads stdin stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \ SOURCE, SINK] class Template: """Class representing a pipeline template.""" def __init__(self): """Template() returns a fresh pipeline template.""" self.debugging = 0 self.reset() def __repr__(self): """t.__repr__() implements repr(t).""" return '<Template instance, steps=%r>' % (self.steps,) def reset(self): """t.reset() restores a pipeline template to its initial state.""" self.steps = [] def clone(self): """t.clone() returns a new pipeline template with identical initial state as the current one.""" t = Template() t.steps = self.steps[:] t.debugging = self.debugging return t def debug(self, flag): """t.debug(flag) turns debugging on or off.""" self.debugging = flag def append(self, cmd, kind): """t.append(cmd, kind) adds a new step at the end.""" if type(cmd) is not type(''): raise TypeError('Template.append: cmd must be a string') if kind not in stepkinds: raise ValueError('Template.append: bad kind %r' % (kind,)) if kind == SOURCE: raise ValueError('Template.append: SOURCE can only be prepended') if self.steps and self.steps[-1][1] == SINK: raise ValueError('Template.append: already ends with SINK') if kind[0] == 'f' and not re.search(r'\$IN\b', cmd): raise ValueError('Template.append: missing $IN in cmd') if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd): raise ValueError('Template.append: missing $OUT in cmd') self.steps.append((cmd, kind)) def prepend(self, cmd, kind): """t.prepend(cmd, kind) adds a new step at the front.""" if type(cmd) is not type(''): raise TypeError('Template.prepend: cmd must be a string') if kind not in stepkinds: raise ValueError('Template.prepend: bad kind %r' % (kind,)) if kind == SINK: raise ValueError('Template.prepend: SINK can only be appended') if self.steps and self.steps[0][1] == SOURCE: raise ValueError('Template.prepend: already begins with SOURCE') if kind[0] == 'f' and not re.search(r'\$IN\b', cmd): raise ValueError('Template.prepend: missing $IN in cmd') if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd): raise ValueError('Template.prepend: missing $OUT in cmd') self.steps.insert(0, (cmd, kind)) def open(self, file, rw): """t.open(file, rw) returns a pipe or file object open for reading or writing; the file is the other end of the pipeline.""" if rw == 'r': return self.open_r(file) if rw == 'w': return self.open_w(file) raise ValueError('Template.open: rw must be \'r\' or \'w\', not %r' % (rw,)) def open_r(self, file): """t.open_r(file) and t.open_w(file) implement t.open(file, 'r') and t.open(file, 'w') respectively.""" if not self.steps: return open(file, 'r') if self.steps[-1][1] == SINK: raise ValueError('Template.open_r: pipeline ends width SINK') cmd = self.makepipeline(file, '') return os.popen(cmd, 'r') def open_w(self, file): if not self.steps: return open(file, 'w') if self.steps[0][1] == SOURCE: raise ValueError('Template.open_w: pipeline begins with SOURCE') cmd = self.makepipeline('', file) return os.popen(cmd, 'w') def copy(self, infile, outfile): return os.system(self.makepipeline(infile, outfile)) def makepipeline(self, infile, outfile): cmd = makepipeline(infile, self.steps, outfile) if self.debugging: print(cmd) cmd = 'set -x; ' + cmd return cmd def makepipeline(infile, steps, outfile): # Build a list with for each command: # [input filename or '', command string, kind, output filename or ''] list = [] for cmd, kind in steps: list.append(['', cmd, kind, '']) # # Make sure there is at least one step # if not list: list.append(['', 'cat', '--', '']) # # Take care of the input and output ends # [cmd, kind] = list[0][1:3] if kind[0] == 'f' and not infile: list.insert(0, ['', 'cat', '--', '']) list[0][0] = infile # [cmd, kind] = list[-1][1:3] if kind[1] == 'f' and not outfile: list.append(['', 'cat', '--', '']) list[-1][-1] = outfile # # Invent temporary files to connect stages that need files # garbage = [] for i in range(1, len(list)): lkind = list[i-1][2] rkind = list[i][2] if lkind[1] == 'f' or rkind[0] == 'f': (fd, temp) = tempfile.mkstemp() os.close(fd) garbage.append(temp) list[i-1][-1] = list[i][0] = temp # for item in list: [inf, cmd, kind, outf] = item if kind[1] == 'f': cmd = 'OUT=' + quote(outf) + '; ' + cmd if kind[0] == 'f': cmd = 'IN=' + quote(inf) + '; ' + cmd if kind[0] == '-' and inf: cmd = cmd + ' <' + quote(inf) if kind[1] == '-' and outf: cmd = cmd + ' >' + quote(outf) item[1] = cmd # cmdlist = list[0][1] for item in list[1:]: [cmd, kind] = item[1:3] if item[0] == '': if 'f' in kind: cmd = '{ ' + cmd + '; }' cmdlist = cmdlist + ' |\n' + cmd else: cmdlist = cmdlist + '\n' + cmd # if garbage: rmcmd = 'rm -f' for file in garbage: rmcmd = rmcmd + ' ' + quote(file) trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15' cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd # return cmdlist
gpl-2.0
selste/micropython
py/makecompresseddata.py
14
6800
from __future__ import print_function import collections import re import sys import gzip import zlib _COMPRESSED_MARKER = 0xFF def check_non_ascii(msg): for c in msg: if ord(c) >= 0x80: print( 'Unable to generate compressed data: message "{}" contains a non-ascii character "{}".'.format( msg, c ), file=sys.stderr, ) sys.exit(1) # Replace <char><space> with <char | 0x80>. # Trival scheme to demo/test. def space_compression(error_strings): for line in error_strings: check_non_ascii(line) result = "" for i in range(len(line)): if i > 0 and line[i] == " ": result = result[:-1] result += "\\{:03o}".format(ord(line[i - 1])) else: result += line[i] error_strings[line] = result return None # Replace common words with <0x80 | index>. # Index is into a table of words stored as aaaaa<0x80|a>bbb<0x80|b>... # Replaced words are assumed to have spaces either side to avoid having to store the spaces in the compressed strings. def word_compression(error_strings): topn = collections.Counter() for line in error_strings.keys(): check_non_ascii(line) for word in line.split(" "): topn[word] += 1 # Order not just by frequency, but by expected saving. i.e. prefer a longer string that is used less frequently. # Use the word itself for ties so that compression is deterministic. def bytes_saved(item): w, n = item return -((len(w) + 1) * (n - 1)), w top128 = sorted(topn.items(), key=bytes_saved)[:128] index = [w for w, _ in top128] index_lookup = {w: i for i, w in enumerate(index)} for line in error_strings.keys(): result = "" need_space = False for word in line.split(" "): if word in index_lookup: result += "\\{:03o}".format(0b10000000 | index_lookup[word]) need_space = False else: if need_space: result += " " need_space = True result += word error_strings[line] = result.strip() return "".join(w[:-1] + "\\{:03o}".format(0b10000000 | ord(w[-1])) for w in index) # Replace chars in text with variable length bit sequence. # For comparison only (the table is not emitted). def huffman_compression(error_strings): # https://github.com/tannewt/huffman import huffman all_strings = "".join(error_strings) cb = huffman.codebook(collections.Counter(all_strings).items()) for line in error_strings: b = "1" for c in line: b += cb[c] n = len(b) if n % 8 != 0: n += 8 - (n % 8) result = "" for i in range(0, n, 8): result += "\\{:03o}".format(int(b[i : i + 8], 2)) if len(result) > len(line) * 4: result = line error_strings[line] = result # TODO: This would be the prefix lengths and the table ordering. return "_" * (10 + len(cb)) # Replace common N-letter sequences with <0x80 | index>, where # the common sequences are stored in a separate table. # This isn't very useful, need a smarter way to find top-ngrams. def ngram_compression(error_strings): topn = collections.Counter() N = 2 for line in error_strings.keys(): check_non_ascii(line) if len(line) < N: continue for i in range(0, len(line) - N, N): topn[line[i : i + N]] += 1 def bytes_saved(item): w, n = item return -(len(w) * (n - 1)) top128 = sorted(topn.items(), key=bytes_saved)[:128] index = [w for w, _ in top128] index_lookup = {w: i for i, w in enumerate(index)} for line in error_strings.keys(): result = "" for i in range(0, len(line) - N + 1, N): word = line[i : i + N] if word in index_lookup: result += "\\{:03o}".format(0b10000000 | index_lookup[word]) else: result += word if len(line) % N != 0: result += line[len(line) - len(line) % N :] error_strings[line] = result.strip() return "".join(index) def main(collected_path, fn): error_strings = collections.OrderedDict() max_uncompressed_len = 0 num_uses = 0 # Read in all MP_ERROR_TEXT strings. with open(collected_path, "r") as f: for line in f: line = line.strip() if not line: continue num_uses += 1 error_strings[line] = None max_uncompressed_len = max(max_uncompressed_len, len(line)) # So that objexcept.c can figure out how big the buffer needs to be. print("#define MP_MAX_UNCOMPRESSED_TEXT_LEN ({})".format(max_uncompressed_len)) # Run the compression. compressed_data = fn(error_strings) # Print the data table. print('MP_COMPRESSED_DATA("{}")'.format(compressed_data)) # Print the replacements. for uncomp, comp in error_strings.items(): if uncomp == comp: prefix = "" else: prefix = "\\{:03o}".format(_COMPRESSED_MARKER) print('MP_MATCH_COMPRESSED("{}", "{}{}")'.format(uncomp, prefix, comp)) # Used to calculate the "true" length of the (escaped) compressed strings. def unescape(s): return re.sub(r"\\\d\d\d", "!", s) # Stats. Note this doesn't include the cost of the decompressor code. uncomp_len = sum(len(s) + 1 for s in error_strings.keys()) comp_len = sum(1 + len(unescape(s)) + 1 for s in error_strings.values()) data_len = len(compressed_data) + 1 if compressed_data else 0 print("// Total input length: {}".format(uncomp_len)) print("// Total compressed length: {}".format(comp_len)) print("// Total data length: {}".format(data_len)) print("// Predicted saving: {}".format(uncomp_len - comp_len - data_len)) # Somewhat meaningless comparison to zlib/gzip. all_input_bytes = "\\0".join(error_strings.keys()).encode() print() if hasattr(gzip, "compress"): gzip_len = len(gzip.compress(all_input_bytes)) + num_uses * 4 print("// gzip length: {}".format(gzip_len)) print("// Percentage of gzip: {:.1f}%".format(100 * (comp_len + data_len) / gzip_len)) if hasattr(zlib, "compress"): zlib_len = len(zlib.compress(all_input_bytes)) + num_uses * 4 print("// zlib length: {}".format(zlib_len)) print("// Percentage of zlib: {:.1f}%".format(100 * (comp_len + data_len) / zlib_len)) if __name__ == "__main__": main(sys.argv[1], word_compression)
mit
GbalsaC/bitnamiP
common/djangoapps/track/backends/django.py
102
2282
""" Event tracker backend that saves events to a Django database. """ # TODO: this module is very specific to the event schema, and is only # brought here for legacy support. It should be updated when the # schema changes or eventually deprecated. from __future__ import absolute_import import logging from django.db import models from track.backends import BaseBackend log = logging.getLogger('track.backends.django') LOGFIELDS = [ 'username', 'ip', 'event_source', 'event_type', 'event', 'agent', 'page', 'time', 'host', ] class TrackingLog(models.Model): """Defines the fields that are stored in the tracking log database.""" dtcreated = models.DateTimeField('creation date', auto_now_add=True) username = models.CharField(max_length=32, blank=True) ip = models.CharField(max_length=32, blank=True) event_source = models.CharField(max_length=32) event_type = models.CharField(max_length=512, blank=True) event = models.TextField(blank=True) agent = models.CharField(max_length=256, blank=True) page = models.CharField(max_length=512, blank=True, null=True) time = models.DateTimeField('event time') host = models.CharField(max_length=64, blank=True) class Meta: app_label = 'track' db_table = 'track_trackinglog' def __unicode__(self): fmt = ( u"[{self.time}] {self.username}@{self.ip}: " u"{self.event_source}| {self.event_type} | " u"{self.page} | {self.event}" ) return fmt.format(self=self) class DjangoBackend(BaseBackend): """Event tracker backend that saves to a Django database""" def __init__(self, name='default', **options): """ Configure database used by the backend. :Parameters: - `name` is the name of the database as specified in the project settings. """ super(DjangoBackend, self).__init__(**options) self.name = name def send(self, event): field_values = {x: event.get(x, '') for x in LOGFIELDS} tldat = TrackingLog(**field_values) try: tldat.save(using=self.name) except Exception as e: # pylint: disable=broad-except log.exception(e)
agpl-3.0
mmahut/openshift-ansible
roles/lib_openshift/src/ansible/oc_pvc.py
60
1107
# pylint: skip-file # flake8: noqa #pylint: disable=too-many-branches def main(): ''' ansible oc module for pvc ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), volume_capacity=dict(default='1G', type='str'), storage_class_name=dict(default=None, required=False, type='str'), selector=dict(default=None, required=False, type='dict'), access_modes=dict(default=['ReadWriteOnce'], type='list'), ), supports_check_mode=True, ) rval = OCPVC.run_ansible(module.params, module.check_mode) if 'failed' in rval: module.fail_json(**rval) return module.exit_json(**rval) if __name__ == '__main__': main()
apache-2.0
QuickSander/CouchPotatoServer
couchpotato/core/notifications/pushbullet.py
32
3088
import base64 import json from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Pushbullet' class Pushbullet(Notification): url = 'https://api.pushbullet.com/v2/%s' def notify(self, message = '', data = None, listener = None): if not data: data = {} devices = self.getDevices() if devices is None: return False # Get all the device IDs linked to this user if not len(devices): devices = [None] successful = 0 for device in devices: response = self.request( 'pushes', cache = False, device_iden = device, type = 'note', title = self.default_title, body = toUnicode(message) ) if response: successful += 1 else: log.error('Unable to push notification to Pushbullet device with ID %s' % device) return successful == len(devices) def getDevices(self): return splitString(self.conf('devices')) def request(self, method, cache = True, **kwargs): try: base64string = base64.encodestring('%s:' % self.conf('api_key'))[:-1] headers = { "Authorization": "Basic %s" % base64string } if cache: return self.getJsonData(self.url % method, headers = headers, data = kwargs) else: data = self.urlopen(self.url % method, headers = headers, data = kwargs) return json.loads(data) except Exception as ex: log.error('Pushbullet request failed') log.debug(ex) return None config = [{ 'name': 'pushbullet', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'pushbullet', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'api_key', 'label': 'Access Token', 'description': 'Can be found on <a href="https://www.pushbullet.com/account" target="_blank">Account Settings</a>', }, { 'name': 'devices', 'default': '', 'advanced': True, 'description': 'IDs of devices to send notifications to, empty = all devices' }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
gpl-3.0
Lektorium-LLC/edx-platform
lms/djangoapps/certificates/management/commands/create_fake_cert.py
5
3521
"""Utility for testing certificate display. This command will create a fake certificate for a user in a course. The certificate will display on the student's dashboard, but no PDF will be generated. Example usage: $ ./manage.py lms create_fake_cert test_user edX/DemoX/Demo_Course --mode honor --grade 0.89 """ import logging from optparse import make_option from django.contrib.auth.models import User from django.core.management.base import BaseCommand, CommandError from opaque_keys.edx.keys import CourseKey from certificates.models import CertificateStatuses, GeneratedCertificate LOGGER = logging.getLogger(__name__) class Command(BaseCommand): """Create a fake certificate for a user in a course. """ USAGE = u'Usage: create_fake_cert <USERNAME> <COURSE_KEY> --mode <MODE> --status <STATUS> --grade <GRADE>' option_list = BaseCommand.option_list + ( make_option( '-m', '--mode', metavar='CERT_MODE', dest='cert_mode', default='honor', help='The course mode of the certificate (e.g. "honor", "verified", or "professional")' ), make_option( '-s', '--status', metavar='CERT_STATUS', dest='status', default=CertificateStatuses.downloadable, help='The status of the certificate' ), make_option( '-g', '--grade', metavar='CERT_GRADE', dest='grade', default='', help='The grade for the course, as a decimal (e.g. "0.89" for 89%)' ), ) def handle(self, *args, **options): """Create a fake certificate for a user. Arguments: username (unicode): Identifier for the certificate's user. course_key (unicode): Identifier for the certificate's course. Keyword Arguments: cert_mode (str): The mode of the certificate (e.g "honor") status (str): The status of the certificate (e.g. "downloadable") grade (str): The grade of the certificate (e.g "0.89" for 89%) Raises: CommandError """ if len(args) < 2: raise CommandError(self.USAGE) user = User.objects.get(username=args[0]) course_key = CourseKey.from_string(args[1]) cert_mode = options.get('cert_mode', 'honor') status = options.get('status', CertificateStatuses.downloadable) grade = options.get('grade', '') cert, created = GeneratedCertificate.eligible_certificates.get_or_create( user=user, course_id=course_key ) cert.mode = cert_mode cert.status = status cert.grade = grade if status == CertificateStatuses.downloadable: cert.download_uuid = 'test' cert.verify_uuid = 'test' cert.download_url = 'http://www.example.com' cert.save() if created: LOGGER.info( u"Created certificate for user %s in course %s " u"with mode %s, status %s, " u"and grade %s", user.id, unicode(course_key), cert_mode, status, grade ) else: LOGGER.info( u"Updated certificate for user %s in course %s " u"with mode %s, status %s, " u"and grade %s", user.id, unicode(course_key), cert_mode, status, grade )
agpl-3.0
DavidLP/home-assistant
homeassistant/components/velbus/climate.py
6
1975
"""Support for Velbus thermostat.""" import logging from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( STATE_HEAT, SUPPORT_TARGET_TEMPERATURE) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT from . import DOMAIN as VELBUS_DOMAIN, VelbusEntity _LOGGER = logging.getLogger(__name__) SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE) async def async_setup_platform( hass, config, async_add_entities, discovery_info=None): """Set up the Velbus thermostat platform.""" if discovery_info is None: return sensors = [] for sensor in discovery_info: module = hass.data[VELBUS_DOMAIN].get_module(sensor[0]) channel = sensor[1] sensors.append(VelbusClimate(module, channel)) async_add_entities(sensors) class VelbusClimate(VelbusEntity, ClimateDevice): """Representation of a Velbus thermostat.""" @property def supported_features(self): """Return the list off supported features.""" return SUPPORT_FLAGS @property def temperature_unit(self): """Return the unit this state is expressed in.""" if self._module.get_unit(self._channel) == '°C': return TEMP_CELSIUS return TEMP_FAHRENHEIT @property def current_temperature(self): """Return the current temperature.""" return self._module.get_state(self._channel) @property def current_operation(self): """Return current operation.""" return STATE_HEAT @property def target_temperature(self): """Return the temperature we try to reach.""" return self._module.get_climate_target() def set_temperature(self, **kwargs): """Set new target temperatures.""" temp = kwargs.get(ATTR_TEMPERATURE) if temp is None: return self._module.set_temp(temp) self.schedule_update_ha_state()
apache-2.0
binhex/moviegrabber
lib/site-packages/cherrypy/test/test_json.py
42
2541
import cherrypy from cherrypy.test import helper from cherrypy._cpcompat import json class JsonTest(helper.CPWebCase): def setup_server(): class Root(object): def plain(self): return 'hello' plain.exposed = True def json_string(self): return 'hello' json_string.exposed = True json_string._cp_config = {'tools.json_out.on': True} def json_list(self): return ['a', 'b', 42] json_list.exposed = True json_list._cp_config = {'tools.json_out.on': True} def json_dict(self): return {'answer': 42} json_dict.exposed = True json_dict._cp_config = {'tools.json_out.on': True} def json_post(self): if cherrypy.request.json == [13, 'c']: return 'ok' else: return 'nok' json_post.exposed = True json_post._cp_config = {'tools.json_in.on': True} root = Root() cherrypy.tree.mount(root) setup_server = staticmethod(setup_server) def test_json_output(self): if json is None: self.skip("json not found ") return self.getPage("/plain") self.assertBody("hello") self.getPage("/json_string") self.assertBody('"hello"') self.getPage("/json_list") self.assertBody('["a", "b", 42]') self.getPage("/json_dict") self.assertBody('{"answer": 42}') def test_json_input(self): if json is None: self.skip("json not found ") return body = '[13, "c"]' headers = [('Content-Type', 'application/json'), ('Content-Length', str(len(body)))] self.getPage("/json_post", method="POST", headers=headers, body=body) self.assertBody('ok') body = '[13, "c"]' headers = [('Content-Type', 'text/plain'), ('Content-Length', str(len(body)))] self.getPage("/json_post", method="POST", headers=headers, body=body) self.assertStatus(415, 'Expected an application/json content type') body = '[13, -]' headers = [('Content-Type', 'application/json'), ('Content-Length', str(len(body)))] self.getPage("/json_post", method="POST", headers=headers, body=body) self.assertStatus(400, 'Invalid JSON document')
gpl-3.0
bpsinc-native/src_tools_gyp
test/rules-rebuild/gyptest-all.py
351
1662
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that a rule that generates multiple outputs rebuilds correctly when the inputs change. """ import TestGyp test = TestGyp.TestGyp(workdir='workarea_all') test.run_gyp('same_target.gyp', chdir='src') test.relocate('src', 'relocate/src') test.build('same_target.gyp', test.ALL, chdir='relocate/src') expect = """\ Hello from main.c Hello from prog1.in! Hello from prog2.in! """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.up_to_date('same_target.gyp', 'program', chdir='relocate/src') test.sleep() contents = test.read(['relocate', 'src', 'prog1.in']) contents = contents.replace('!', ' AGAIN!') test.write(['relocate', 'src', 'prog1.in'], contents) test.build('same_target.gyp', test.ALL, chdir='relocate/src') expect = """\ Hello from main.c Hello from prog1.in AGAIN! Hello from prog2.in! """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.up_to_date('same_target.gyp', 'program', chdir='relocate/src') test.sleep() contents = test.read(['relocate', 'src', 'prog2.in']) contents = contents.replace('!', ' AGAIN!') test.write(['relocate', 'src', 'prog2.in'], contents) test.build('same_target.gyp', test.ALL, chdir='relocate/src') expect = """\ Hello from main.c Hello from prog1.in AGAIN! Hello from prog2.in AGAIN! """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.up_to_date('same_target.gyp', 'program', chdir='relocate/src') test.pass_test()
bsd-3-clause
yosukesuzuki/calendar-app
project/kay/management/utils.py
5
1772
# -*- coding: utf-8 -*- """ Kay framework. :Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp> All rights reserved. :license: BSD, see LICENSE for more details. """ import sys import getpass from google.appengine.ext.remote_api import remote_api_stub import kay.app from kay.misc import get_appid def print_status(msg='',nl=True): if nl: print(msg) else: print(msg), sys.stdout.flush() def get_user_apps(): ret = [] # retrieve main app main_app = kay.app.get_application() apps = [main_app.app] for key, submount_app in main_app.mounts.iteritems(): if not hasattr(submount_app, 'app_settings') or key == "/_kay": continue apps.append(submount_app) for app in apps: for user_app in app.app_settings.INSTALLED_APPS: if user_app.startswith("kay."): continue ret.append(user_app) return ret def auth(): return (raw_input('Username:'), getpass.getpass('Password:')) def dummy_auth(): return ('a', 'a') def create_db_manage_script(main_func=None, clean_func=None, description=None): def inner(appid=('a', ''), host=('h', ''), path=('p', ''), secure=True, clean=('c', False)): if not appid: appid = get_appid() if not host: host = "%s.appspot.com" % appid if not path: path = '/_ah/remote_api' if 'localhost' in host: auth_func = dummy_auth else: auth_func = auth remote_api_stub.ConfigureRemoteApi(None, path, auth_func, host, secure=secure, save_cookies=True) remote_api_stub.MaybeInvokeAuthentication() if clean and callable(clean_func): clean_func() if callable(main_func): main_func() if description: inner.__doc__ = description return inner
mit
tarequeh/django-modular-blog
fragments/constants.py
2
1514
""" Values that are used throughout the app """ FRAGMENT_TYPE_PLAINTEXT = 'plaintext' FRAGMENT_TYPE_HTML = 'html' FRAGMENT_TYPE_MARKDOWN = 'markdown' FRAGMENT_TYPE_IMAGE = 'image' FRAGMENT_TYPE_CODE = 'code' FRAGMENT_TYPE_EMBED = 'embed' FRAGMENT_TYPE_CHOICES = ( (FRAGMENT_TYPE_PLAINTEXT, 'Plaintext'), (FRAGMENT_TYPE_HTML, 'HTML'), (FRAGMENT_TYPE_MARKDOWN, 'Markdown'), (FRAGMENT_TYPE_IMAGE, 'Image'), (FRAGMENT_TYPE_CODE, 'Code'), (FRAGMENT_TYPE_EMBED, 'Embed'), ) CODE_LANGUAGE_GENERIC = 'generic' CODE_LANGUAGE_PYTHON = 'python' CODE_LANGUAGE_BASH = 'bash' CODE_LANGUAGE_JAVASCRIPT = 'javascript' CODE_LANGUAGE_HTML = 'html' CODE_LANGUAGE_CSS = 'css' CODE_LANGUAGE_CHOICES = ( (CODE_LANGUAGE_GENERIC, 'Generic'), (CODE_LANGUAGE_PYTHON, 'Python'), (CODE_LANGUAGE_BASH, 'Bash'), (CODE_LANGUAGE_JAVASCRIPT, 'JavaScript'), (CODE_LANGUAGE_HTML, 'HTML'), (CODE_LANGUAGE_CSS, 'CSS'), ) EMBED_TYPE_RAW = 'raw' EMBED_TYPE_VIMEO = 'vimeo' EMBED_TYPE_YOUTUBE = 'youtube' EMBED_TYPE_TWEET = 'tweet' EMBED_TYPE_INSTAGRAM = 'instagram' EMBED_TYPE_CHOICES = ( (EMBED_TYPE_RAW, 'Raw'), (EMBED_TYPE_VIMEO, 'Vimeo'), (EMBED_TYPE_YOUTUBE, 'Youtube'), (EMBED_TYPE_TWEET, 'Tweet'), (EMBED_TYPE_INSTAGRAM, 'Instagram'), ) POST_STATE_DRAFT = 'draft' POST_STATE_PUBLISHED = 'published' POST_STATE_ARCHIVED = 'archived' POST_STATE_CHOICES = ( (POST_STATE_DRAFT, 'Draft'), (POST_STATE_PUBLISHED, 'Published'), (POST_STATE_ARCHIVED, 'Archived'), )
mit
DavidTingley/ephys-processing-pipeline
installation/klustaviewa-0.3.0/build/lib.linux-x86_64-2.7/klustaviewa/views/treemodel.py
2
7148
from qtools import QtGui, QtCore from collections import OrderedDict # Generic classes # --------------- class TreeItem(object): def __init__(self, parent=None, data=None): """data is an OrderedDict""" self.parent_item = parent self.index = QtCore.QModelIndex() self.children = [] # by default: root if data is None: data = OrderedDict(name='root') self.item_data = data def appendChild(self, child): self.children.append(child) def removeChild(self, child): self.children.remove(child) def removeChildAt(self, row): self.children.pop(row) def insertChild(self, child, index): self.children.insert(index, child) def child(self, row): return self.children[row] def rowCount(self): return len(self.children) def columnCount(self): return len(self.item_data) def data(self, column): if column >= self.columnCount(): return None return self.item_data.get(self.item_data.keys()[column], None) def row(self): if self.parent_item is None: return 0 return self.parent_item.children.index(self) def parent(self): return self.parent_item class TreeModel(QtCore.QAbstractItemModel): def __init__(self, headers): QtCore.QAbstractItemModel.__init__(self) self.root_item = TreeItem() self.headers = headers def add_node(self, item_class=None, item=None, parent=None, **kwargs): """Add a node in the tree. """ if parent is None: parent = self.root_item if item is None: if item_class is None: item_class = TreeItem item = item_class(parent=parent, **kwargs) row = parent.rowCount() item.index = self.createIndex(row, 0, item) self.beginInsertRows(parent.index, row-1, row-1) parent.appendChild(item) self.endInsertRows() return item def remove_node(self, child, parent=None): if parent is None: parent = self.root_item row = child.row() self.beginRemoveRows(parent.index, row, row) parent.removeChild(child) self.endRemoveRows() # def move_node(self, child, parent_target, child_target=None): # row = child.row() # parent_source = child.parent() # if child_target is not None: # child_target_row = child_target.row() # else: # child_target_row = parent_target.rowCount() # canmove = self.beginMoveRows(parent_source.index, row, row, # parent_target.index, child_target_row) # if canmove: # if parent is None: # parent = self.root_item # if item is None: # if item_class is None: # item_class = TreeItem # item = child._(parent=parent, **kwargs) # row = parent.rowCount() # item.index = self.createIndex(row, 0, item) # parent_target.insertChild(child_new, child_target_row) # if parent_target == parent_source: # if child_target_row < row: # row += 1 # parent_source.removeChildAt(row) # else: # parent_source.removeChild(child) # # child.parent_item = parent_target # self.endMoveRows() def get_descendants(self, parents): if type(parents) != list: parents = [parents] nodes = [] for parent in parents: nodes.append(parent) if parent.children: nodes.extend(self.get_descendants(parent.children)) return nodes def all_nodes(self): return self.get_descendants(self.root_item) def index(self, row, column, parent=None): if parent is None: parent = self.root_item.index if not self.hasIndex(row, column, parent): return QtCore.QModelIndex() if not parent.isValid(): parent_item = self.root_item else: parent_item = parent.internalPointer() child_item = parent_item.child(row) if child_item: index = self.createIndex(row, column, child_item) child_item.index = index return index else: return QtCore.QModelIndex() def parent(self, item): if not item.isValid(): return QtCore.QModelIndex() item = item.internalPointer() parent_item = item.parent() if (parent_item == self.root_item): return QtCore.QModelIndex() index = self.createIndex(parent_item.row(), 0, parent_item) parent_item.index = index return index def rowCount(self, parent=None): if parent is None: parent = QtCore.QModelIndex() if parent.column() > 0: return 0 if not parent.isValid(): parent_item = self.root_item else: parent_item = parent.internalPointer() return parent_item.rowCount() def columnCount(self, parent=None): if parent is None: parent = QtCore.QModelIndex() if not parent.isValid(): return len(self.headers) return parent.internalPointer().columnCount() def data(self, index, role): if role != QtCore.Qt.DisplayRole: return None item = index.internalPointer() return item.data(index.column()) def setData(self, index, data, role): return False def supportedDropActions(self): return QtCore.Qt.MoveAction def flags(self, index): if not index.isValid(): return QtCore.Qt.ItemIsEnabled return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | \ QtCore.Qt.ItemIsDragEnabled | QtCore.Qt.ItemIsDropEnabled def mimeTypes(self): return ['text/xml'] def mimeData(self, indexes): data = ",".join(set([str(index.internalPointer()) for index in indexes])) mimedata = QtCore.QMimeData() mimedata.setData('text/xml', data) return mimedata def dropMimeData(self, data, action, row, column, parent): parent_item = parent.internalPointer() target = parent_item sources = data.data('text/xml').split(',') self.drag(target, sources) return True def drag(self, target, sources): """ To be overriden. """ print "drag", target, sources
gpl-3.0
Bismarrck/tensorflow
tensorflow/python/data/experimental/kernel_tests/parallel_interleave_test.py
10
28891
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.parallel_interleave()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import math import threading import time import numpy as np from six.moves import zip_longest from tensorflow.python.data.experimental.ops import interleave_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ParallelInterleaveTest(test_base.DatasetTestBase): def setUp(self): self.error = None self.repeat_count = 2 # Set up threading events used to sequence when items are produced that # are subsequently interleaved. These events allow us to deterministically # simulate slowdowns and force sloppiness. self.read_coordination_events = {} self.write_coordination_events = {} # input values [4, 5, 6] are the common case for the tests; set defaults for i in range(4, 7): self.read_coordination_events[i] = threading.Semaphore(0) self.write_coordination_events[i] = threading.Event() def dataset_fn(self, input_values, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements): def map_py_fn(x): self.write_coordination_events[x].wait() self.write_coordination_events[x].clear() self.read_coordination_events[x].release() if self.error: err = self.error self.error = None raise err # pylint: disable=raising-bad-type return x * x def map_fn(x): return script_ops.py_func(map_py_fn, [x], x.dtype) def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) dataset = dataset.repeat(x) return dataset.map(map_fn) return dataset_ops.Dataset.from_tensor_slices(input_values).repeat( self.repeat_count).apply( interleave_ops.parallel_interleave( interleave_fn, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements)) def _interleave(self, lists, cycle_length, block_length): """Python implementation of interleave used for testing.""" num_open = 0 # `all_iterators` acts as a queue of iterators over each element of `lists`. all_iterators = [iter(l) for l in lists] # `open_iterators` are the iterators whose elements are currently being # interleaved. open_iterators = [] for i in range(cycle_length): if all_iterators: open_iterators.append(all_iterators.pop(0)) num_open += 1 else: open_iterators.append(None) while num_open or all_iterators: for i in range(cycle_length): if open_iterators[i] is None: if all_iterators: open_iterators[i] = all_iterators.pop(0) num_open += 1 else: continue for _ in range(block_length): try: yield next(open_iterators[i]) except StopIteration: open_iterators[i] = None num_open -= 1 break def testPythonImplementation(self): input_lists = [[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]] # Cycle length 1 acts like `Dataset.flat_map()`. expected_elements = itertools.chain(*input_lists) for expected, produced in zip(expected_elements, self._interleave(input_lists, 1, 1)): self.assertEqual(expected, produced) # Cycle length > 1. expected_elements = [ 4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 6 ] for index, (expected, produced) in enumerate( zip_longest(expected_elements, self._interleave(input_lists, 2, 1))): self.assertEqual(expected, produced, "Values differ at %s. %s != %s" % (index, expected, produced)) def testPythonImplementationBlockLength(self): input_lists = [[4] * 4, [5] * 5, [6] * 6] * 2 expected_elements = [ 4, 4, 5, 5, 4, 4, 5, 5, 5, 6, 6, 4, 4, 6, 6, 4, 4, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6, 5, 6, 6 ] for index, (expected, produced) in enumerate( zip_longest(expected_elements, self._interleave(input_lists, 2, 2))): self.assertEqual(expected, produced, "Values differ at %s. %s != %s" % (index, expected, produced)) def testPythonImplementationEmptyLists(self): input_lists = [[4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6]] expected_elements = [ 4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6 ] for index, (expected, produced) in enumerate( zip_longest(expected_elements, self._interleave(input_lists, 2, 1))): self.assertEqual(expected, produced, "Values differ at %s. %s != %s" % (index, expected, produced)) def _clear_coordination_events(self): for i in range(4, 7): self.read_coordination_events[i] = threading.Semaphore(0) self.write_coordination_events[i].clear() def _allow_all_map_threads(self): for i in range(4, 7): self.write_coordination_events[i].set() def _testSingleThreaded(self, sloppy=False, prefetch_input_elements=0): # cycle_length=1,block_length=1 acts like `Dataset.interleave()` and # `Dataset.flat_map()` and is single-threaded. No synchronization required. self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=1, block_length=1, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=prefetch_input_elements)) for expected_element in self._interleave( [[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 1): self.write_coordination_events[expected_element].set() self.assertEqual(expected_element * expected_element, self.evaluate(next_element())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testSingleThreaded(self): self._testSingleThreaded() def testSingleThreadedSloppy(self): self._testSingleThreaded(sloppy=True) def testSingleThreadedPrefetch1Itr(self): self._testSingleThreaded(prefetch_input_elements=1) def testSingleThreadedPrefetch1ItrSloppy(self): self._testSingleThreaded(prefetch_input_elements=1, sloppy=True) def testSingleThreadedRagged(self): # Tests a sequence with wildly different elements per iterator. self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([3, 7, 4]), cycle_length=2, block_length=1, sloppy=False, buffer_output_elements=1, prefetch_input_elements=1)) # Add coordination values for 3 and 7 self.read_coordination_events[3] = threading.Semaphore(0) self.write_coordination_events[3] = threading.Event() self.read_coordination_events[7] = threading.Semaphore(0) self.write_coordination_events[7] = threading.Event() for expected_element in self._interleave( [[3] * 3, [7] * 7, [4] * 4] * self.repeat_count, 2, 1): self.write_coordination_events[expected_element].set() output = self.evaluate(next_element()) self.assertEqual(expected_element * expected_element, output) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def _testTwoThreadsNoContention(self, sloppy=False): # num_threads > 1. # Explicit coordination should result in `Dataset.interleave()` behavior self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=1)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 1)): self.write_coordination_events[expected_element].set() if done_first_event: # First event starts the worker threads. self.read_coordination_events[expected_element].acquire() actual_element = self.evaluate(next_element()) if not done_first_event: self.read_coordination_events[expected_element].acquire() done_first_event = True self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTwoThreadsNoContention(self): self._testTwoThreadsNoContention() def testTwoThreadsNoContentionSloppy(self): self._testTwoThreadsNoContention(sloppy=True) def _testTwoThreadsNoContentionWithRaces(self, sloppy=False): """Tests where all the workers race in producing elements. Note: this is in contrast with the previous test which carefully sequences the execution of the map functions. Args: sloppy: Whether to be sloppy or not. """ self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=1)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 1)): if done_first_event: # First event starts the worker threads. self._allow_all_map_threads() self.read_coordination_events[expected_element].acquire() else: self.write_coordination_events[expected_element].set() time.sleep(0.5) # Sleep to consistently "avoid" the race condition. actual_element = self.evaluate(next_element()) if not done_first_event: done_first_event = True self.assertTrue( self.read_coordination_events[expected_element].acquire(False)) self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTwoThreadsNoContentionWithRaces(self): self._testTwoThreadsNoContentionWithRaces() def testTwoThreadsNoContentionWithRacesSloppy(self): self._testTwoThreadsNoContentionWithRaces(sloppy=True) def _testTwoThreadsNoContentionBlockLength(self, sloppy=False): # num_threads > 1. # Explicit coordination should result in `Dataset.interleave()` behavior self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=2, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=1)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 2)): self.write_coordination_events[expected_element].set() if done_first_event: # First event starts the worker threads. self.read_coordination_events[expected_element].acquire() actual_element = self.evaluate(next_element()) if not done_first_event: done_first_event = True self.read_coordination_events[expected_element].acquire() self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTwoThreadsNoContentionBlockLength(self): self._testTwoThreadsNoContentionBlockLength() def testTwoThreadsNoContentionBlockLengthSloppy(self): self._testTwoThreadsNoContentionBlockLength(sloppy=True) def _testTwoThreadsNoContentionWithRacesAndBlocking(self, sloppy=False): """Tests where all the workers race in producing elements. Note: this is in contrast with the previous test which carefully sequences the execution of the map functions. Args: sloppy: Whether to be sloppy or not. """ self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=2, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=1)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 2)): if done_first_event: # First event starts the worker threads. self._allow_all_map_threads() self.read_coordination_events[expected_element].acquire() else: self.write_coordination_events[expected_element].set() time.sleep(0.5) # Sleep to consistently "avoid" the race condition. actual_element = self.evaluate(next_element()) if not done_first_event: done_first_event = True self.assertTrue( self.read_coordination_events[expected_element].acquire(False)) self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTwoThreadsNoContentionWithRacesAndBlocking(self): self._testTwoThreadsNoContentionWithRacesAndBlocking() def testTwoThreadsNoContentionWithRacesAndBlockingSloppy(self): self._testTwoThreadsNoContentionWithRacesAndBlocking(sloppy=True) def _testEmptyInput(self, sloppy=False): # Empty input. self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([]), cycle_length=2, block_length=3, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=0)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testEmptyInput(self): self._testEmptyInput() def testEmptyInputSloppy(self): self._testEmptyInput(sloppy=True) def _testNonEmptyInputIntoEmptyOutputs(self, sloppy=False): # Non-empty input leading to empty output. self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([0, 0, 0]), cycle_length=2, block_length=3, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=0)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testNonEmptyInputIntoEmptyOutputs(self): self._testNonEmptyInputIntoEmptyOutputs() def testNonEmptyInputIntoEmptyOutputsSloppy(self): self._testNonEmptyInputIntoEmptyOutputs(sloppy=True) def _testPartiallyEmptyOutputs(self, sloppy=False, prefetch_input_elements=1): race_indices = {2, 8, 14} # Sequence points when sloppy mode has race conds # Mixture of non-empty and empty interleaved datasets. self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 0, 6]), cycle_length=2, block_length=1, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=prefetch_input_elements)) for i, expected_element in enumerate( self._interleave([[4] * 4, [], [6] * 6] * self.repeat_count, 2, 1)): self.write_coordination_events[expected_element].set() # First event starts the worker threads. Additionally, when running the # sloppy case with prefetch_input_elements=0, we get stuck if we wait # for the read coordination event for certain event orderings in the # presence of finishing iterators. if done_first_event and not (sloppy and (i in race_indices)): self.read_coordination_events[expected_element].acquire() actual_element = self.evaluate(next_element()) if not done_first_event or (sloppy and (i in race_indices)): done_first_event = True self.read_coordination_events[expected_element].acquire() self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) def testPartiallyEmptyOutputs(self): self._testPartiallyEmptyOutputs() def testPartiallyEmptyOutputsSloppy(self): self._testPartiallyEmptyOutputs(sloppy=True, prefetch_input_elements=0) def testDelayedOutputSloppy(self): # Explicitly control the sequence of events to ensure we correctly avoid # head-of-line blocking. self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=True, buffer_output_elements=1, prefetch_input_elements=0)) mis_ordering = [ 4, 4, 5, 4, 5, 5, 4, 5, 6, 6, 6, 5, 4, 4, 6, 6, 4, 4, 6, 5, 6, 6, 6, 6, 5, 5, 5, 5, 6, 6 ] for element in mis_ordering: self.write_coordination_events[element].set() self.assertEqual(element * element, self.evaluate(next_element())) self.assertTrue(self.read_coordination_events[element].acquire(False)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testBlockLengthWithContentionSloppy(self): self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=True, buffer_output_elements=1, prefetch_input_elements=1)) # Test against a generating sequence that differs from the uncontended # case, in order to prove sloppy correctness. for i, expected_element in enumerate( self._interleave( [[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, cycle_length=2, block_length=3)): self.write_coordination_events[expected_element].set() if done_first_event: # First event starts the worker threads. self.read_coordination_events[expected_element].acquire() actual_element = self.evaluate(next_element()) if not done_first_event: self.read_coordination_events[expected_element].acquire() done_first_event = True self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def _testEarlyExit(self, sloppy=False): # Exiting without consuming all input should not block self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=3, block_length=2, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=0)) for i in range(4, 7): self.write_coordination_events[i].set() elem = self.evaluate(next_element()) # Start all workers # Allow the one successful worker to progress beyond the py_func again. elem = int(math.sqrt(elem)) self.write_coordination_events[elem].set() self.read_coordination_events[elem].acquire() # Allow the prefetch to succeed for i in range(4, 7): self.read_coordination_events[i].acquire() self.write_coordination_events[i].set() def testEarlyExit(self): self._testEarlyExit() def testEarlyExitSloppy(self): self._testEarlyExit(sloppy=True) def _testTooManyReaders(self, sloppy=False): def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) dataset = dataset.repeat(math_ops.cast(x, dtype=dtypes.int64)) return dataset dataset = dataset_ops.Dataset.from_tensor_slices([4, 5, 6]) dataset = dataset.repeat(self.repeat_count) dataset = dataset.apply( interleave_ops.parallel_interleave( interleave_fn, cycle_length=16, block_length=2, sloppy=sloppy)) get_next = self.getNext(dataset) output_values = [] for _ in range(30): output_values.append(self.evaluate(get_next())) expected_values = self._interleave( [[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 2) self.assertItemsEqual(output_values, expected_values) def testTooManyReaders(self): self._testTooManyReaders() def testTooManyReadersSloppy(self): self._testTooManyReaders(sloppy=True) def testSparse(self): def _map_fn(i): return sparse_tensor.SparseTensor( indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2]) def _interleave_fn(x): return dataset_ops.Dataset.from_tensor_slices( sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values)) dataset = dataset_ops.Dataset.range(10).map(_map_fn).apply( interleave_ops.parallel_interleave(_interleave_fn, cycle_length=1)) get_next = self.getNext(dataset) for i in range(10): for j in range(2): expected = [i, 0] if j % 2 == 0 else [0, -i] self.assertAllEqual(expected, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testErrorsInOutputFn(self): self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=False, buffer_output_elements=1, prefetch_input_elements=0)) except_on_element_indices = set([3]) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 1)): if i in except_on_element_indices: self.error = ValueError() self.write_coordination_events[expected_element].set() with self.assertRaises(errors.InvalidArgumentError): self.evaluate(next_element()) else: self.write_coordination_events[expected_element].set() actual_element = self.evaluate(next_element()) self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testErrorsInInputFn(self): def map_py_fn(x): if x == 5: raise ValueError() return x def map_fn(x): return script_ops.py_func(map_py_fn, [x], x.dtype) def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) dataset = dataset.repeat(x) return dataset def dataset_fn(input_values, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements): return dataset_ops.Dataset.from_tensor_slices(input_values).map( map_fn).repeat(self.repeat_count).apply( interleave_ops.parallel_interleave( interleave_fn, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements)) next_element = self.getNext( dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=False, buffer_output_elements=1, prefetch_input_elements=0)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)): if expected_element == 5: with self.assertRaises(errors.InvalidArgumentError): self.evaluate(next_element()) else: actual_element = self.evaluate(next_element()) self.assertEqual( expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testErrorsInInterleaveFn(self): def map_py_fn(x): if x == 5: raise ValueError() return x def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) y = script_ops.py_func(map_py_fn, [x], x.dtype) dataset = dataset.repeat(y) return dataset def dataset_fn(input_values, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements): return dataset_ops.Dataset.from_tensor_slices(input_values).repeat( self.repeat_count).apply( interleave_ops.parallel_interleave( interleave_fn, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements)) next_element = self.getNext( dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=False, buffer_output_elements=1, prefetch_input_elements=0)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)): if expected_element == 5: with self.assertRaises(errors.InvalidArgumentError): self.evaluate(next_element()) else: actual_element = self.evaluate(next_element()) self.assertEqual( expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testShutdownRace(self): dataset = dataset_ops.Dataset.range(20) map_fn = lambda x: dataset_ops.Dataset.range(20 * x, 20 * (x + 1)) dataset = dataset.apply( interleave_ops.parallel_interleave( map_fn, cycle_length=3, sloppy=False, buffer_output_elements=1, prefetch_input_elements=0)) dataset = dataset.batch(32) results = [] for _ in range(2): elements = [] next_element = self.getNext(dataset) try: while True: elements.extend(self.evaluate(next_element())) except errors.OutOfRangeError: pass results.append(elements) self.assertAllEqual(results[0], results[1]) if __name__ == "__main__": test.main()
apache-2.0
dd00/commandergenius
project/jni/python/src/Lib/test/profilee.py
398
3041
""" Input for test_profile.py and test_cprofile.py. IMPORTANT: This stuff is touchy. If you modify anything above the test class you'll have to regenerate the stats by running the two test files. *ALL* NUMBERS in the expected output are relevant. If you change the formatting of pstats, please don't just regenerate the expected output without checking very carefully that not a single number has changed. """ import sys # In order to have reproducible time, we simulate a timer in the global # variable 'TICKS', which represents simulated time in milliseconds. # (We can't use a helper function increment the timer since it would be # included in the profile and would appear to consume all the time.) TICKS = 42000 def timer(): return TICKS def testfunc(): # 1 call # 1000 ticks total: 270 ticks local, 730 ticks in subfunctions global TICKS TICKS += 99 helper() # 300 helper() # 300 TICKS += 171 factorial(14) # 130 def factorial(n): # 23 calls total # 170 ticks total, 150 ticks local # 3 primitive calls, 130, 20 and 20 ticks total # including 116, 17, 17 ticks local global TICKS if n > 0: TICKS += n return mul(n, factorial(n-1)) else: TICKS += 11 return 1 def mul(a, b): # 20 calls # 1 tick, local global TICKS TICKS += 1 return a * b def helper(): # 2 calls # 300 ticks total: 20 ticks local, 260 ticks in subfunctions global TICKS TICKS += 1 helper1() # 30 TICKS += 2 helper1() # 30 TICKS += 6 helper2() # 50 TICKS += 3 helper2() # 50 TICKS += 2 helper2() # 50 TICKS += 5 helper2_indirect() # 70 TICKS += 1 def helper1(): # 4 calls # 30 ticks total: 29 ticks local, 1 tick in subfunctions global TICKS TICKS += 10 hasattr(C(), "foo") # 1 TICKS += 19 lst = [] lst.append(42) # 0 sys.exc_info() # 0 def helper2_indirect(): helper2() # 50 factorial(3) # 20 def helper2(): # 8 calls # 50 ticks local: 39 ticks local, 11 ticks in subfunctions global TICKS TICKS += 11 hasattr(C(), "bar") # 1 TICKS += 13 subhelper() # 10 TICKS += 15 def subhelper(): # 8 calls # 10 ticks total: 8 ticks local, 2 ticks in subfunctions global TICKS TICKS += 2 for i in range(2): # 0 try: C().foo # 1 x 2 except AttributeError: TICKS += 3 # 3 x 2 class C: def __getattr__(self, name): # 28 calls # 1 tick, local global TICKS TICKS += 1 raise AttributeError
lgpl-2.1
tech-server/gondul
templating/templating.py
1
3215
#!/usr/bin/python3 import argparse import traceback import sys import netaddr import requests from flask import Flask, request from jinja2 import Environment, FileSystemLoader, TemplateNotFound endpoints = "read/networks read/oplog read/snmp read/switches-management public/distro-tree public/config public/dhcp public/dhcp-summary public/ping public/switches public/switch-state".split() objects = {} def getEndpoint(endpoint): r = requests.get("http://localhost:80/api/{}".format(endpoint)) if r.status_code != 200: raise Exception("Bad status code for endpoint {}: {}".format(endpoint, r.status_code)) return r.json() def updateData(): for a in endpoints: objects[a] = getEndpoint(a) env = Environment(loader=FileSystemLoader([]), trim_blocks=True) env.filters["netmask"] = lambda ip: netaddr.IPNetwork(ip).netmask env.filters["cidr"] = lambda ip: netaddr.IPNetwork(ip).prefixlen env.filters["networkId"] = lambda ip: netaddr.IPNetwork(ip).ip env.filters["getFirstDhcpIp"] = lambda ip: netaddr.IPNetwork(ip)[3] env.filters["getLastDhcpIp"] = lambda ip: netaddr.IPNetwork(ip)[-1] env.filters["agentDistro"] = lambda src: src.split(":")[0] env.filters["agentPort"] = lambda src: src.split(":")[1] env.filters["getFirstFapIP"] = lambda ip: netaddr.IPNetwork(ip)[netaddr.IPNetwork(ip).size / 2] app = Flask(__name__) @app.after_request def add_header(response): if response.status_code == 200: response.cache_control.max_age = 5 response.cache_control.s_maxage = 1 return response @app.route("/<path>", methods=["GET"]) def root_get(path): updateData() try: template = env.get_template(path) body = template.render(objects=objects, options=request.args) except TemplateNotFound: return 'Template "{}" not found\n'.format(path), 404 except Exception as err: return 'Templating of "{}" failed to render. Most likely due to an error in the template. Error transcript:\n\n{}\n----\n\n{}\n'.format(path, err, traceback.format_exc()), 400 return body, 200 @app.route("/<path>", methods=["POST"]) def root_post(path): updateData() try: content = request.stream.read(int(request.headers["Content-Length"])) template = env.from_string(content.decode("utf-8")) body = template.render(objects=objects, options=request.args) except Exception as err: return 'Templating of "{}" failed to render. Most likely due to an error in the template. Error transcript:\n\n{}\n----\n\n{}\n'.format(path, err, traceback.format_exc()), 400 return body, 200 parser = argparse.ArgumentParser(description="Process templates for gondul.", add_help=False) parser.add_argument("-t", "--templates", type=str, nargs="+", help="location of templates") parser.add_argument("-h", "--host", type=str, default="127.0.0.1", help="host address") parser.add_argument("-p", "--port", type=int, default=8080, help="host port") parser.add_argument("-d", "--debug", action="store_true", help="enable debug mode") args = parser.parse_args() env.loader.searchpath = args.templates if not sys.argv[1:]: parser.print_help() app.run(host=args.host, port=args.port, debug=args.debug)
gpl-2.0
diogovk/ansible
contrib/inventory/zabbix.py
119
3956
#!/usr/bin/env python # (c) 2013, Greg Buehler # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### """ Zabbix Server external inventory script. ======================================== Returns hosts and hostgroups from Zabbix Server. Configuration is read from `zabbix.ini`. Tested with Zabbix Server 2.0.6. """ import os, sys import argparse import ConfigParser try: from zabbix_api import ZabbixAPI except: print >> sys.stderr, "Error: Zabbix API library must be installed: pip install zabbix-api." sys.exit(1) try: import json except: import simplejson as json class ZabbixInventory(object): def read_settings(self): config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini') # server if config.has_option('zabbix', 'server'): self.zabbix_server = config.get('zabbix', 'server') # login if config.has_option('zabbix', 'username'): self.zabbix_username = config.get('zabbix', 'username') if config.has_option('zabbix', 'password'): self.zabbix_password = config.get('zabbix', 'password') def read_cli(self): parser = argparse.ArgumentParser() parser.add_argument('--host') parser.add_argument('--list', action='store_true') self.options = parser.parse_args() def hoststub(self): return { 'hosts': [] } def get_host(self, api, name): data = {} return data def get_list(self, api): hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'}) data = {} data[self.defaultgroup] = self.hoststub() for host in hostsData: hostname = host['name'] data[self.defaultgroup]['hosts'].append(hostname) for group in host['groups']: groupname = group['name'] if not groupname in data: data[groupname] = self.hoststub() data[groupname]['hosts'].append(hostname) return data def __init__(self): self.defaultgroup = 'group_all' self.zabbix_server = None self.zabbix_username = None self.zabbix_password = None self.read_settings() self.read_cli() if self.zabbix_server and self.zabbix_username: try: api = ZabbixAPI(server=self.zabbix_server) api.login(user=self.zabbix_username, password=self.zabbix_password) except BaseException, e: print >> sys.stderr, "Error: Could not login to Zabbix server. Check your zabbix.ini." sys.exit(1) if self.options.host: data = self.get_host(api, self.options.host) print json.dumps(data, indent=2) elif self.options.list: data = self.get_list(api) print json.dumps(data, indent=2) else: print >> sys.stderr, "usage: --list ..OR.. --host <hostname>" sys.exit(1) else: print >> sys.stderr, "Error: Configuration of server and credentials are required. See zabbix.ini." sys.exit(1) ZabbixInventory()
gpl-3.0
BackupTheBerlios/cuon-svn
cuon_client/cuon/Misc/cuon_dialog.py
5
1297
import gtk import pygtk class cuon_dialog: def __init__(self): # Dialog - Flags # DIALOG_MODAL - make the dialog modal # DIALOG_DESTROY_WITH_PARENT - destroy dialog when its parent is destroyed # DIALOG_NO_SEPARATOR - omit the separator between the vbox and the action_area #gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT self.Buttons = {} # def inputLine(self, sTitle=None, sText=None, oParent=None): ok = False res = None print 'QuestionMsg' dialog = gtk.Dialog(sTitle, oParent, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_ACCEPT) ); lLabel = gtk.Label(sText) dialog.vbox.pack_start(lLabel, True, True, 0) lLabel.show() eLine = gtk.Entry() dialog.vbox.pack_start(eLine, True, True, 0) eLine.show() response = dialog.run (); print 'Response', response if response == gtk.RESPONSE_ACCEPT: ok = True res = eLine.get_text() print 'res at dialog', res dialog.destroy (); return ok, res
gpl-3.0
markwal/OctoPrint
src/octoprint/plugins/softwareupdate/scripts/update-octoprint.py
30
6010
#!/bin/env python from __future__ import absolute_import __author__ = "Gina Haeussge <osd@foosel.net>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' __copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License" import errno import subprocess import sys def _get_git_executables(): GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] return GITS def _git(args, cwd, hide_stderr=False, verbose=False, git_executable=None): if git_executable is not None: commands = [git_executable] else: commands = _get_git_executables() for c in commands: try: p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % args[0]) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % args[0]) return p.returncode, stdout def _python(args, cwd, python_executable, sudo=False): command = [python_executable] + args if sudo: command = ["sudo"] + command try: p = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except: return None, None stdout = p.communicate()[0].strip() if sys.version >= "3": stdout = stdout.decode() return p.returncode, stdout def update_source(git_executable, folder, target, force=False): print(">>> Running: git diff --shortstat") returncode, stdout = _git(["diff", "--shortstat"], folder, git_executable=git_executable) if returncode != 0: raise RuntimeError("Could not update, \"git diff\" failed with returncode %d: %s" % (returncode, stdout)) if stdout and stdout.strip(): # we got changes in the working tree, maybe from the user, so we'll now rescue those into a patch import time import os timestamp = time.strftime("%Y%m%d%H%M") patch = os.path.join(folder, "%s-preupdate.patch" % timestamp) print(">>> Running: git diff and saving output to %s" % timestamp) returncode, stdout = _git(["diff"], folder, git_executable=git_executable) if returncode != 0: raise RuntimeError("Could not update, installation directory was dirty and state could not be persisted as a patch to %s" % patch) with open(patch, "wb") as f: f.write(stdout) print(">>> Running: git reset --hard") returncode, stdout = _git(["reset", "--hard"], folder, git_executable=git_executable) if returncode != 0: raise RuntimeError("Could not update, \"git reset --hard\" failed with returncode %d: %s" % (returncode, stdout)) print(">>> Running: git pull") returncode, stdout = _git(["pull"], folder, git_executable=git_executable) if returncode != 0: raise RuntimeError("Could not update, \"git pull\" failed with returncode %d: %s" % (returncode, stdout)) print(stdout) if force: reset_command = ["reset"] reset_command += [target] print(">>> Running: git %s" % " ".join(reset_command)) returncode, stdout = _git(reset_command, folder, git_executable=git_executable) if returncode != 0: raise RuntimeError("Error while updating, \"git %s\" failed with returncode %d: %s" % (" ".join(reset_command), returncode, stdout)) print(stdout) def install_source(python_executable, folder, user=False, sudo=False): print(">>> Running: python setup.py clean") returncode, stdout = _python(["setup.py", "clean"], folder, python_executable) if returncode != 0: print("\"python setup.py clean\" failed with returncode %d: %s" % (returncode, stdout)) print("Continuing anyways") print(stdout) print(">>> Running: python setup.py install") args = ["setup.py", "install"] if user: args.append("--user") returncode, stdout = _python(args, folder, python_executable, sudo=sudo) if returncode != 0: raise RuntimeError("Could not update, \"python setup.py install\" failed with returncode %d: %s" % (returncode, stdout)) print(stdout) def parse_arguments(): import argparse parser = argparse.ArgumentParser(prog="update-octoprint.py") parser.add_argument("--git", action="store", type=str, dest="git_executable", help="Specify git executable to use") parser.add_argument("--python", action="store", type=str, dest="python_executable", help="Specify python executable to use") parser.add_argument("--force", action="store_true", dest="force", help="Set this to force the update to only the specified version (nothing newer)") parser.add_argument("--sudo", action="store_true", dest="sudo", help="Install with sudo") parser.add_argument("--user", action="store_true", dest="user", help="Install to the user site directory instead of the general site directory") parser.add_argument("folder", type=str, help="Specify the base folder of the OctoPrint installation to update") parser.add_argument("target", type=str, help="Specify the commit or tag to which to update") args = parser.parse_args() return args def main(): args = parse_arguments() git_executable = None if args.git_executable: git_executable = args.git_executable python_executable = sys.executable if args.python_executable: python_executable = args.python_executable folder = args.folder target = args.target import os if not os.access(folder, os.W_OK): raise RuntimeError("Could not update, base folder is not writable") update_source(git_executable, folder, target, force=args.force) install_source(python_executable, folder, user=args.user, sudo=args.sudo) if __name__ == "__main__": main()
agpl-3.0
shubhamgupta2021/atomic
Atomic/config.py
4
1724
import os try: import ConfigParser as configparser except ImportError: # py3 compat import configparser class PulpConfig(object): """ pulp configuration: 1. look in ~/.pulp/admin.conf configuration contents: [server] host = <pulp-server-hostname.example.com> verify_ssl = false # optional auth section [auth] username: <user> password: <pass> """ def __init__(self): self.c = configparser.ConfigParser() self.config_file = os.path.expanduser("~/.pulp/admin.conf") self.c.read(self.config_file) self.url = self._get("server", "host") self.username = self._get("auth", "username") self.password = self._get("auth", "password") self.verify_ssl = self._getboolean("server", "verify_ssl") def _get(self, section, val): try: return self.c.get(section, val) except (configparser.NoSectionError, configparser.NoOptionError): return None except ValueError as e: raise ValueError("Bad Value for %s in %s. %s" % (val, self.config_file, e)) def _getboolean(self, section, val): try: return self.c.getboolean(section, val) except (configparser.NoSectionError, configparser.NoOptionError): return True except ValueError as e: raise ValueError("Bad Value for %s in %s. %s" % (val, self.config_file, e)) def config(self): return {"url": self.url, "verify_ssl": self.verify_ssl, "username": self.username, "password": self.password} if __name__ == '__main__': c = PulpConfig() print(c.config())
lgpl-2.1
fanquake/bitcoin
test/functional/wallet_labels.py
6
8597
#!/usr/bin/env python3 # Copyright (c) 2016-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test label RPCs. RPCs tested are: - getaddressesbylabel - listaddressgroupings - setlabel """ from collections import defaultdict from test_framework.blocktools import COINBASE_MATURITY from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error from test_framework.wallet_util import test_address class WalletLabelsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Check that there's no UTXO on the node node = self.nodes[0] assert_equal(len(node.listunspent()), 0) # Note each time we call generate, all generated coins go into # the same address, so we call twice to get two addresses w/50 each node.generatetoaddress(nblocks=1, address=node.getnewaddress(label='coinbase')) node.generatetoaddress(nblocks=COINBASE_MATURITY + 1, address=node.getnewaddress(label='coinbase')) assert_equal(node.getbalance(), 100) # there should be 2 address groups # each with 1 address with a balance of 50 Bitcoins address_groups = node.listaddressgroupings() assert_equal(len(address_groups), 2) # the addresses aren't linked now, but will be after we send to the # common address linked_addresses = set() for address_group in address_groups: assert_equal(len(address_group), 1) assert_equal(len(address_group[0]), 3) assert_equal(address_group[0][1], 50) assert_equal(address_group[0][2], 'coinbase') linked_addresses.add(address_group[0][0]) # send 50 from each address to a third address not in this wallet common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr" node.sendmany( amounts={common_address: 100}, subtractfeefrom=[common_address], minconf=1, ) # there should be 1 address group, with the previously # unlinked addresses now linked (they both have 0 balance) address_groups = node.listaddressgroupings() assert_equal(len(address_groups), 1) assert_equal(len(address_groups[0]), 2) assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses) assert_equal([a[1] for a in address_groups[0]], [0, 0]) node.generate(1) # we want to reset so that the "" label has what's expected. # otherwise we're off by exactly the fee amount as that's mined # and matures in the next 100 blocks amount_to_send = 1.0 # Create labels and make sure subsequent label API calls # recognize the label/address associations. labels = [Label(name) for name in ("a", "b", "c", "d", "e")] for label in labels: address = node.getnewaddress(label.name) label.add_receive_address(address) label.verify(node) # Check all labels are returned by listlabels. assert_equal(node.listlabels(), sorted(['coinbase'] + [label.name for label in labels])) # Send a transaction to each label. for label in labels: node.sendtoaddress(label.addresses[0], amount_to_send) label.verify(node) # Check the amounts received. node.generate(1) for label in labels: assert_equal( node.getreceivedbyaddress(label.addresses[0]), amount_to_send) assert_equal(node.getreceivedbylabel(label.name), amount_to_send) for i, label in enumerate(labels): to_label = labels[(i + 1) % len(labels)] node.sendtoaddress(to_label.addresses[0], amount_to_send) node.generate(1) for label in labels: address = node.getnewaddress(label.name) label.add_receive_address(address) label.verify(node) assert_equal(node.getreceivedbylabel(label.name), 2) label.verify(node) node.generate(COINBASE_MATURITY + 1) # Check that setlabel can assign a label to a new unused address. for label in labels: address = node.getnewaddress() node.setlabel(address, label.name) label.add_address(address) label.verify(node) assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "") # Check that addmultisigaddress can assign labels. if not self.options.descriptors: for label in labels: addresses = [] for _ in range(10): addresses.append(node.getnewaddress()) multisig_address = node.addmultisigaddress(5, addresses, label.name)['address'] label.add_address(multisig_address) label.purpose[multisig_address] = "send" label.verify(node) node.generate(COINBASE_MATURITY + 1) # Check that setlabel can change the label of an address from a # different label. change_label(node, labels[0].addresses[0], labels[0], labels[1]) # Check that setlabel can set the label of an address already # in the label. This is a no-op. change_label(node, labels[2].addresses[0], labels[2], labels[2]) if self.options.descriptors: # This is a descriptor wallet test because of segwit v1+ addresses self.log.info('Check watchonly labels') node.createwallet(wallet_name='watch_only', disable_private_keys=True) wallet_watch_only = node.get_wallet_rpc('watch_only') BECH32_VALID = { '✔️_VER15_PROG40': 'bcrt10qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqxkg7fn', '✔️_VER16_PROG03': 'bcrt1sqqqqq8uhdgr', '✔️_VER16_PROB02': 'bcrt1sqqqq4wstyw', } BECH32_INVALID = { '❌_VER15_PROG41': 'bcrt1sqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqajlxj8', '❌_VER16_PROB01': 'bcrt1sqq5r4036', } for l in BECH32_VALID: ad = BECH32_VALID[l] wallet_watch_only.importaddress(label=l, rescan=False, address=ad) node.generatetoaddress(1, ad) assert_equal(wallet_watch_only.getaddressesbylabel(label=l), {ad: {'purpose': 'receive'}}) assert_equal(wallet_watch_only.getreceivedbylabel(label=l), 0) for l in BECH32_INVALID: ad = BECH32_INVALID[l] assert_raises_rpc_error( -5, "Address is not valid" if self.options.descriptors else "Invalid Bitcoin address or script", lambda: wallet_watch_only.importaddress(label=l, rescan=False, address=ad), ) class Label: def __init__(self, name): # Label name self.name = name # Current receiving address associated with this label. self.receive_address = None # List of all addresses assigned with this label self.addresses = [] # Map of address to address purpose self.purpose = defaultdict(lambda: "receive") def add_address(self, address): assert_equal(address not in self.addresses, True) self.addresses.append(address) def add_receive_address(self, address): self.add_address(address) def verify(self, node): if self.receive_address is not None: assert self.receive_address in self.addresses for address in self.addresses: test_address(node, address, labels=[self.name]) assert self.name in node.listlabels() assert_equal( node.getaddressesbylabel(self.name), {address: {"purpose": self.purpose[address]} for address in self.addresses}) def change_label(node, address, old_label, new_label): assert_equal(address in old_label.addresses, True) node.setlabel(address, new_label.name) old_label.addresses.remove(address) new_label.add_address(address) old_label.verify(node) new_label.verify(node) if __name__ == '__main__': WalletLabelsTest().main()
mit
zerkrx/zerkbox
lib/pip/vcs/git.py
340
11197
from __future__ import absolute_import import logging import tempfile import os.path from pip.compat import samefile from pip.exceptions import BadCommand from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request from pip._vendor.packaging.version import parse as parse_version from pip.utils import display_path, rmtree from pip.vcs import vcs, VersionControl urlsplit = urllib_parse.urlsplit urlunsplit = urllib_parse.urlunsplit logger = logging.getLogger(__name__) class Git(VersionControl): name = 'git' dirname = '.git' repo_name = 'clone' schemes = ( 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', ) def __init__(self, url=None, *args, **kwargs): # Works around an apparent Git bug # (see http://article.gmane.org/gmane.comp.version-control.git/146500) if url: scheme, netloc, path, query, fragment = urlsplit(url) if scheme.endswith('file'): initial_slashes = path[:-len(path.lstrip('/'))] newpath = ( initial_slashes + urllib_request.url2pathname(path) .replace('\\', '/').lstrip('/') ) url = urlunsplit((scheme, netloc, newpath, query, fragment)) after_plus = scheme.find('+') + 1 url = scheme[:after_plus] + urlunsplit( (scheme[after_plus:], netloc, newpath, query, fragment), ) super(Git, self).__init__(url, *args, **kwargs) def get_git_version(self): VERSION_PFX = 'git version ' version = self.run_command(['version'], show_stdout=False) if version.startswith(VERSION_PFX): version = version[len(VERSION_PFX):] else: version = '' # get first 3 positions of the git version becasue # on windows it is x.y.z.windows.t, and this parses as # LegacyVersion which always smaller than a Version. version = '.'.join(version.split('.')[:3]) return parse_version(version) def export(self, location): """Export the Git repository at the url to the destination location""" temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) try: if not location.endswith('/'): location = location + '/' self.run_command( ['checkout-index', '-a', '-f', '--prefix', location], show_stdout=False, cwd=temp_dir) finally: rmtree(temp_dir) def check_rev_options(self, rev, dest, rev_options): """Check the revision options before checkout to compensate that tags and branches may need origin/ as a prefix. Returns the SHA1 of the branch or tag if found. """ revisions = self.get_short_refs(dest) origin_rev = 'origin/%s' % rev if origin_rev in revisions: # remote branch return [revisions[origin_rev]] elif rev in revisions: # a local tag or branch name return [revisions[rev]] else: logger.warning( "Could not find a tag or branch '%s', assuming commit.", rev, ) return rev_options def check_version(self, dest, rev_options): """ Compare the current sha to the ref. ref may be a branch or tag name, but current rev will always point to a sha. This means that a branch or tag will never compare as True. So this ultimately only matches against exact shas. """ return self.get_revision(dest).startswith(rev_options[0]) def switch(self, dest, url, rev_options): self.run_command(['config', 'remote.origin.url', url], cwd=dest) self.run_command(['checkout', '-q'] + rev_options, cwd=dest) self.update_submodules(dest) def update(self, dest, rev_options): # First fetch changes from the default remote if self.get_git_version() >= parse_version('1.9.0'): # fetch tags in addition to everything else self.run_command(['fetch', '-q', '--tags'], cwd=dest) else: self.run_command(['fetch', '-q'], cwd=dest) # Then reset to wanted revision (maybe even origin/master) if rev_options: rev_options = self.check_rev_options( rev_options[0], dest, rev_options, ) self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest) #: update submodules self.update_submodules(dest) def obtain(self, dest): url, rev = self.get_url_rev() if rev: rev_options = [rev] rev_display = ' (to %s)' % rev else: rev_options = ['origin/master'] rev_display = '' if self.check_destination(dest, url, rev_options, rev_display): logger.info( 'Cloning %s%s to %s', url, rev_display, display_path(dest), ) self.run_command(['clone', '-q', url, dest]) if rev: rev_options = self.check_rev_options(rev, dest, rev_options) # Only do a checkout if rev_options differs from HEAD if not self.check_version(dest, rev_options): self.run_command( ['checkout', '-q'] + rev_options, cwd=dest, ) #: repo may contain submodules self.update_submodules(dest) def get_url(self, location): """Return URL of the first remote encountered.""" remotes = self.run_command( ['config', '--get-regexp', 'remote\..*\.url'], show_stdout=False, cwd=location) remotes = remotes.splitlines() found_remote = remotes[0] for remote in remotes: if remote.startswith('remote.origin.url '): found_remote = remote break url = found_remote.split(' ')[1] return url.strip() def get_revision(self, location): current_rev = self.run_command( ['rev-parse', 'HEAD'], show_stdout=False, cwd=location) return current_rev.strip() def get_full_refs(self, location): """Yields tuples of (commit, ref) for branches and tags""" output = self.run_command(['show-ref'], show_stdout=False, cwd=location) for line in output.strip().splitlines(): commit, ref = line.split(' ', 1) yield commit.strip(), ref.strip() def is_ref_remote(self, ref): return ref.startswith('refs/remotes/') def is_ref_branch(self, ref): return ref.startswith('refs/heads/') def is_ref_tag(self, ref): return ref.startswith('refs/tags/') def is_ref_commit(self, ref): """A ref is a commit sha if it is not anything else""" return not any(( self.is_ref_remote(ref), self.is_ref_branch(ref), self.is_ref_tag(ref), )) # Should deprecate `get_refs` since it's ambiguous def get_refs(self, location): return self.get_short_refs(location) def get_short_refs(self, location): """Return map of named refs (branches or tags) to commit hashes.""" rv = {} for commit, ref in self.get_full_refs(location): ref_name = None if self.is_ref_remote(ref): ref_name = ref[len('refs/remotes/'):] elif self.is_ref_branch(ref): ref_name = ref[len('refs/heads/'):] elif self.is_ref_tag(ref): ref_name = ref[len('refs/tags/'):] if ref_name is not None: rv[ref_name] = commit return rv def _get_subdirectory(self, location): """Return the relative path of setup.py to the git repo root.""" # find the repo root git_dir = self.run_command(['rev-parse', '--git-dir'], show_stdout=False, cwd=location).strip() if not os.path.isabs(git_dir): git_dir = os.path.join(location, git_dir) root_dir = os.path.join(git_dir, '..') # find setup.py orig_location = location while not os.path.exists(os.path.join(location, 'setup.py')): last_location = location location = os.path.dirname(location) if location == last_location: # We've traversed up to the root of the filesystem without # finding setup.py logger.warning( "Could not find setup.py for directory %s (tried all " "parent directories)", orig_location, ) return None # relative path of setup.py to repo root if samefile(root_dir, location): return None return os.path.relpath(location, root_dir) def get_src_requirement(self, dist, location): repo = self.get_url(location) if not repo.lower().startswith('git:'): repo = 'git+' + repo egg_project_name = dist.egg_name().split('-', 1)[0] if not repo: return None current_rev = self.get_revision(location) req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name) subdirectory = self._get_subdirectory(location) if subdirectory: req += '&subdirectory=' + subdirectory return req def get_url_rev(self): """ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes doesn't work with a ssh:// scheme (e.g. Github). But we need a scheme for parsing. Hence we remove it again afterwards and return it as a stub. """ if '://' not in self.url: assert 'file:' not in self.url self.url = self.url.replace('git+', 'git+ssh://') url, rev = super(Git, self).get_url_rev() url = url.replace('ssh://', '') else: url, rev = super(Git, self).get_url_rev() return url, rev def update_submodules(self, location): if not os.path.exists(os.path.join(location, '.gitmodules')): return self.run_command( ['submodule', 'update', '--init', '--recursive', '-q'], cwd=location, ) @classmethod def controls_location(cls, location): if super(Git, cls).controls_location(location): return True try: r = cls().run_command(['rev-parse'], cwd=location, show_stdout=False, on_returncode='ignore') return not r except BadCommand: logger.debug("could not determine if %s is under git control " "because git is not available", location) return False vcs.register(Git)
gpl-3.0
cedi4155476/QGIS
python/plugins/processing/algs/qgis/LinesIntersection.py
10
5061
# -*- coding: utf-8 -*- """ *************************************************************************** LinesIntersection.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.core import QGis, QgsFeatureRequest, QgsFeature, QgsGeometry from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterTableField from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector class LinesIntersection(GeoAlgorithm): INPUT_A = 'INPUT_A' INPUT_B = 'INPUT_B' FIELD_A = 'FIELD_A' FIELD_B = 'FIELD_B' OUTPUT = 'OUTPUT' def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Line intersections') self.group, self.i18n_group = self.trAlgorithm('Vector overlay tools') self.addParameter(ParameterVector(self.INPUT_A, self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_LINE])) self.addParameter(ParameterVector(self.INPUT_B, self.tr('Intersect layer'), [ParameterVector.VECTOR_TYPE_LINE])) self.addParameter(ParameterTableField( self.FIELD_A, self.tr('Input unique ID field'), self.INPUT_A, optional=True)) self.addParameter(ParameterTableField( self.FIELD_B, self.tr('Intersect unique ID field'), self.INPUT_B, optional=True)) self.addOutput(OutputVector(self.OUTPUT, self.tr('Intersections'))) def processAlgorithm(self, progress): layerA = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_A)) layerB = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_B)) fieldA = self.getParameterValue(self.FIELD_A) fieldB = self.getParameterValue(self.FIELD_B) idxA = layerA.fieldNameIndex(fieldA) idxB = layerB.fieldNameIndex(fieldB) fieldList = [layerA.pendingFields()[idxA], layerB.pendingFields()[idxB]] writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fieldList, QGis.WKBPoint, layerA.dataProvider().crs()) spatialIndex = vector.spatialindex(layerB) inFeatA = QgsFeature() inFeatB = QgsFeature() outFeat = QgsFeature() inGeom = QgsGeometry() tmpGeom = QgsGeometry() features = vector.features(layerA) current = 0 total = 100.0 / float(len(features)) hasIntersections = False for inFeatA in features: inGeom = inFeatA.geometry() hasIntersections = False lines = spatialIndex.intersects(inGeom.boundingBox()) if len(lines) > 0: hasIntersections = True if hasIntersections: for i in lines: request = QgsFeatureRequest().setFilterFid(i) inFeatB = layerB.getFeatures(request).next() tmpGeom = QgsGeometry(inFeatB.geometry()) points = [] attrsA = inFeatA.attributes() attrsB = inFeatB.attributes() if inGeom.intersects(tmpGeom): tempGeom = inGeom.intersection(tmpGeom) if tempGeom.type() == QGis.Point: if tempGeom.isMultipart(): points = tempGeom.asMultiPoint() else: points.append(tempGeom.asPoint()) for j in points: outFeat.setGeometry(tempGeom.fromPoint(j)) outFeat.setAttributes([attrsA[idxA], attrsB[idxB]]) writer.addFeature(outFeat) current += 1 progress.setPercentage(int(current * total)) del writer
gpl-2.0
chemlab/chemlab
chemlab/mviewer/api/display.py
6
4689
from chemlab.mviewer.representations import BallAndStickRepresentation from chemlab.graphics.qttrajectory import format_time from .core import * from chemlab.db import CirDB from chemlab.io import datafile from chemlab.core import System import numpy as np db = CirDB() def display_system(system, autozoom=True): '''Display a `~chemlab.core.System` instance at screen''' viewer.clear() viewer.add_representation(BallAndStickRepresentation, system) if autozoom: autozoom_() viewer.update() msg(str(system)) def display_molecule(mol, autozoom=True): '''Display a `~chemlab.core.Molecule` instance in the viewer. This function wraps the molecule in a system before displaying it. ''' s = System([mol]) display_system(s, autozoom=True) def autozoom(): '''Find optimal camera zoom level for the current view.''' viewer.widget.camera.autozoom(current_system().r_array) viewer.update() autozoom_ = autozoom # To prevent shadowing def download_molecule(name): '''Download a molecule by name.''' mol = db.get('molecule', name) display_molecule(mol) def load_system(name, format=None): '''Read a `~chemlab.core.System` from a file. .. seealso:: `chemlab.io.datafile` ''' mol = datafile(name).read('system') display_system(mol) def load_molecule(name, format=None): '''Read a `~chemlab.core.Molecule` from a file. .. seealso:: `chemlab.io.datafile` ''' mol = datafile(name, format=format).read('molecule') display_system(System([mol])) def load_remote_molecule(url, format=None): '''Load a molecule from the remote location specified by *url*. **Example** :: load_remote_molecule('https://raw.github.com/chemlab/chemlab-testdata/master/benzene.mol') ''' from urllib import urlretrieve filename, headers = urlretrieve(url) load_molecule(filename, format=format) def load_remote_system(url, format=None): '''Load a system from the remote location specified by *url*. **Example** :: load_remote_system('https://raw.github.com/chemlab/chemlab-testdata/master/naclwater.gro') ''' from urllib import urlretrieve filename, headers = urlretrieve(url) load_system(filename, format=format) def load_remote_trajectory(url, format=None): '''Load a trajectory file from a remote location specified by *url*. .. seealso:: load_remote_system ''' from urllib import urlretrieve filename, headers = urlretrieve(url) load_trajectory(filename, format) def write_system(filename, format=None): '''Write the system currently displayed to a file.''' datafile(filename, format=format, mode='w').write('system', current_system()) def write_molecule(filename, format=None): '''Write the system displayed in a file as a molecule.''' datafile(filename, format=format, mode='w').write('molecule',current_system()) import bisect def goto_time(timeval): '''Go to a specific time (in nanoseconds) in the current trajectory. ''' i = bisect.bisect(viewer.frame_times, timeval * 1000) goto_frame(i) def goto_frame(frame): '''Go to a specific frame in the current trajectory.''' viewer.traj_controls.goto_frame(frame) _frame_processors = [] def load_trajectory(name, skip=1, format=None): '''Load a trajectory file into chemlab. You should call this command after you load a `~chemlab.core.System` through load_system or load_remote_system. ''' df = datafile(name, format=format) dt, coords = df.read('trajectory', skip=skip) boxes = df.read('boxes') viewer.current_traj = coords viewer.frame_times = dt viewer.traj_controls.set_ticks(len(dt)) def update(index): f = coords[index] for fp in _frame_processors: f = fp(coords, index) # update the current representation viewer.representation.update_positions(f) viewer.representation.update_box(boxes[index]) current_system().r_array = f current_system().box_vectors = boxes[index] viewer.traj_controls.set_time(dt[index]) viewer.update() viewer.traj_controls.show() viewer.traj_controls.frame_changed.connect(update) def guess_bonds(): '''Guess the bonds in the current system''' current_system().guess_bonds() reload_system() def reload_system(): '''Reload the current system in the viewer.''' return display_system(current_system(), autozoom=False)
gpl-3.0
mateor/pants
tests/python/pants_test/engine/test_isolated_process.py
2
10203
# coding=utf-8 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import unittest from pants.engine.engine import LocalSerialEngine from pants.engine.fs import Files, PathGlobs from pants.engine.isolated_process import (Binary, Snapshot, SnapshottedProcess, SnapshottedProcessRequest, _snapshot_path, create_snapshot_tasks) from pants.engine.nodes import Return, Throw from pants.engine.selectors import Select, SelectLiteral from pants.util.contextutil import open_tar from pants.util.objects import datatype from pants_test.engine.scheduler_test_base import SchedulerTestBase class Concatted(datatype('Concatted', ['value'])): pass class ShellCat(Binary): @property def bin_path(self): return '/bin/cat' def file_list_to_args_for_cat_with_snapshot_subjects_and_output_file(files, snapshot): return SnapshottedProcessRequest(args=tuple(sorted(f.path for f in files.dependencies)), snapshots=(snapshot,)) def process_result_to_concatted_from_outfile(process_result, sandbox_dir): with open(os.path.join(sandbox_dir, 'outfile')) as f: # TODO might be better to allow for this to be done via Nodes. But I'm not sure how as yet. return Concatted(f.read()) def process_result_to_concatted(process_result, sandbox_dir): return Concatted(process_result.stdout) class ShellCatToOutFile(Binary): def prefix_of_command(self): return tuple(['sh', '-c', 'cat $@ > outfile', 'unused']) class ShellFailCommand(Binary): def prefix_of_command(self): return tuple(['sh', '-c', 'exit 1']) def __repr__(self): return 'ShellFailCommand' def fail_process_result(process_result, sandbox_dir): raise Exception('Failed in output conversion!') def empty_process_request(): return SnapshottedProcessRequest(args=tuple()) class JavaOutputDir(datatype('JavaOutputDir', ['path'])): pass class Javac(Binary): @property def bin_path(self): return '/usr/bin/javac' def java_sources_to_javac_args(java_sources, sources_snapshot, out_dir): return SnapshottedProcessRequest(args=('-d', out_dir.path)+ tuple(f.path for f in java_sources.dependencies), snapshots=(sources_snapshot,), directories_to_create=(out_dir.path,)) class ClasspathEntry(datatype('ClasspathEntry', ['path'])): """A classpath entry for a subject.""" def process_result_to_classpath_entry(process_result, sandbox_dir): if not process_result.exit_code: # this implies that we should pass some / all of the inputs to the output conversion so they can grab config. # TODO string name association isn't great. return ClasspathEntry(os.path.join(sandbox_dir, 'build')) class SnapshottedProcessRequestTest(SchedulerTestBase, unittest.TestCase): def test_blows_up_on_unhashable_args(self): with self.assertRaises(ValueError): SnapshottedProcessRequest(args=['1']) with self.assertRaises(ValueError): SnapshottedProcessRequest(args=('1',), snapshots=[]) with self.assertRaises(ValueError): SnapshottedProcessRequest(args=('1',), directories_to_create=[]) class IsolatedProcessTest(SchedulerTestBase, unittest.TestCase): # TODO test exercising what happens if a snapshot file doesn't exist after hitting cache for snapshot node. def test_gather_snapshot_of_pathglobs(self): project_tree = self.mk_example_fs_tree() scheduler = self.mk_scheduler(project_tree=project_tree, tasks=create_snapshot_tasks(project_tree)) snapshot_archive_root = os.path.join(project_tree.build_root, '.snapshots') request = scheduler.execution_request([Snapshot], [PathGlobs.create('', globs=['fs_test/a/b/*'])]) LocalSerialEngine(scheduler).reduce(request) root_entries = scheduler.root_entries(request).items() self.assertEquals(1, len(root_entries)) state = self.assertFirstEntryIsReturn(root_entries, scheduler) snapshot = state.value self.assert_archive_files(['fs_test/a/b/1.txt', 'fs_test/a/b/2'], snapshot, snapshot_archive_root) def test_integration_concat_with_snapshot_subjects_test(self): scheduler = self.mk_scheduler_in_example_fs([ # subject to files / product of subject to files for snapshot. SnapshottedProcess.create(product_type=Concatted, binary_type=ShellCatToOutFile, input_selectors=(Select(Files), Select(Snapshot)), input_conversion=file_list_to_args_for_cat_with_snapshot_subjects_and_output_file, output_conversion=process_result_to_concatted_from_outfile), [ShellCatToOutFile, [], ShellCatToOutFile], ]) request = scheduler.execution_request([Concatted], [PathGlobs.create('', globs=['fs_test/a/b/*'])]) LocalSerialEngine(scheduler).reduce(request) root_entries = scheduler.root_entries(request).items() self.assertEquals(1, len(root_entries)) state = self.assertFirstEntryIsReturn(root_entries, scheduler) concatted = state.value self.assertEqual(Concatted('one\ntwo\n'), concatted) def test_javac_compilation_example(self): sources = PathGlobs.create('', files=['scheduler_inputs/src/java/simple/Simple.java']) scheduler = self.mk_scheduler_in_example_fs([ SnapshottedProcess.create(ClasspathEntry, Javac, (Select(Files), Select(Snapshot), SelectLiteral(JavaOutputDir('build'), JavaOutputDir)), java_sources_to_javac_args, process_result_to_classpath_entry), [Javac, [], Javac] ]) request = scheduler.execution_request( [ClasspathEntry], [sources]) LocalSerialEngine(scheduler).reduce(request) root_entries = scheduler.root_entries(request).items() self.assertEquals(1, len(root_entries)) state = self.assertFirstEntryIsReturn(root_entries, scheduler) classpath_entry = state.value self.assertIsInstance(classpath_entry, ClasspathEntry) self.assertTrue(os.path.exists(os.path.join(classpath_entry.path, 'simple', 'Simple.class'))) def test_failed_command_propagates_throw(self): scheduler = self.mk_scheduler_in_example_fs([ # subject to files / product of subject to files for snapshot. SnapshottedProcess.create(product_type=Concatted, binary_type=ShellFailCommand, input_selectors=tuple(), input_conversion=empty_process_request, output_conversion=fail_process_result), [ShellFailCommand, [], ShellFailCommand] ]) request = scheduler.execution_request([Concatted], [PathGlobs.create('', globs=['fs_test/a/b/*'])]) LocalSerialEngine(scheduler).reduce(request) root_entries = scheduler.root_entries(request).items() self.assertEquals(1, len(root_entries)) self.assertFirstEntryIsThrow(root_entries, in_msg='Running ShellFailCommand failed with non-zero exit code: 1') def test_failed_output_conversion_propagates_throw(self): scheduler = self.mk_scheduler_in_example_fs([ # subject to files / product of subject to files for snapshot. SnapshottedProcess.create(product_type=Concatted, binary_type=ShellCatToOutFile, input_selectors=(Select(Files), Select(Snapshot)), input_conversion=file_list_to_args_for_cat_with_snapshot_subjects_and_output_file, output_conversion=fail_process_result), [ShellCatToOutFile, [], ShellCatToOutFile] ]) request = scheduler.execution_request([Concatted], [PathGlobs.create('', globs=['fs_test/a/b/*'])]) LocalSerialEngine(scheduler).reduce(request) root_entries = scheduler.root_entries(request).items() self.assertEquals(1, len(root_entries)) self.assertFirstEntryIsThrow(root_entries, in_msg='Failed in output conversion!') def assert_archive_files(self, expected_archive_files, snapshot, snapshot_archive_root): with open_tar(_snapshot_path(snapshot, snapshot_archive_root), errorlevel=1) as tar: self.assertEqual(sorted(expected_archive_files), sorted(tar.getnames())) def assertFirstEntryIsReturn(self, root_entries, scheduler): root, state = root_entries[0] self.assertReturn(state, scheduler) return state def assertFirstEntryIsThrow(self, root_entries, in_msg=None): root, state = root_entries[0] self.assertIsInstance(state, Throw) if in_msg: self.assertIn(in_msg, str(state)) return state def mk_example_fs_tree(self): return self.mk_fs_tree(os.path.join(os.path.dirname(__file__), 'examples')) def mk_scheduler_in_example_fs(self, rules): project_tree = self.mk_example_fs_tree() # TODO: remove `create_snapshot_tasks`: see TODO there. scheduler = self.mk_scheduler(tasks=(rules + create_snapshot_tasks(project_tree)), project_tree=project_tree) return scheduler def assertReturn(self, state, scheduler): is_return = isinstance(state, Return) if is_return: return else: self.fail('Expected a Return, but found a {}. trace below:\n{}' .format(state, scheduler.trace())) def assertPathContains(self, expected_files, path): for i in expected_files: self.assertTrue(os.path.exists(os.path.join(path, i)), 'Expected {} to exist in {} but did not'.format(i, path))
apache-2.0
afrolov1/nova
nova/api/openstack/compute/plugins/v3/deferred_delete.py
17
3215
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The deferred instance delete extension.""" import webob from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import compute from nova import exception ALIAS = 'os-deferred-delete' authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS) class DeferredDeleteController(wsgi.Controller): def __init__(self, *args, **kwargs): super(DeferredDeleteController, self).__init__(*args, **kwargs) self.compute_api = compute.API() @extensions.expected_errors((404, 409, 413)) @wsgi.action('restore') def _restore(self, req, id, body): """Restore a previously deleted instance.""" context = req.environ["nova.context"] authorize(context) instance = common.get_instance(self.compute_api, context, id, want_objects=True) try: self.compute_api.restore(context, instance) except exception.QuotaError as error: raise webob.exc.HTTPRequestEntityTooLarge( explanation=error.format_message(), headers={'Retry-After': 0}) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'restore') return webob.Response(status_int=202) @extensions.expected_errors((404, 409)) @wsgi.action('force_delete') def _force_delete(self, req, id, body): """Force delete of instance before deferred cleanup.""" context = req.environ["nova.context"] authorize(context) instance = common.get_instance(self.compute_api, context, id, want_objects=True) try: self.compute_api.force_delete(context, instance) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'force_delete') return webob.Response(status_int=202) class DeferredDelete(extensions.V3APIExtensionBase): """Instance deferred delete.""" name = "DeferredDelete" alias = "os-deferred-delete" version = 1 def get_controller_extensions(self): controller = DeferredDeleteController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): return []
apache-2.0
Ex-Mente/auxi.0
auxi/tools/materialphysicalproperties/coals.py
1
5954
#!/usr/bin/env python3 """ This module provides physical property data sets and models for coals and cokes. """ from sys import modules from os.path import realpath, dirname, join from math import exp from auxi.tools.materialphysicalproperties.core import Model from auxi.tools.chemistry.stoichiometry import molar_mass as mm from auxi.tools.physicalconstants import R __version__ = '0.3.3' __license__ = 'LGPL v3' __copyright__ = 'Copyright 2016, Ex Mente Technologies (Pty) Ltd' __author__ = 'Johan Zietsman' __credits__ = ['Johan Zietsman'] __maintainer__ = 'Johan zietsman' __email__ = 'johan.zietsman@ex-mente.co.za' __status__ = 'Planning' def _path(relative_path): path = modules[__name__].__file__ path = realpath(path) path = dirname(path) return join(path, relative_path) class DafThermoTy(Model): """ An abstract model that describes a thermochemical property of dry ash-free (daf) coal as a function of composition and temperature. """ def __init__(self, material, proprty, symbol, display_symbol, units, references, datasets): state_schema = {'T': {'required': True, 'type': 'float', 'min': 0.0}, 'y_C': {'required': True, 'type': 'float', 'min': 0.0, 'max': 1.0}, 'y_H': {'required': True, 'type': 'float', 'min': 0.0, 'max': 1.0}, 'y_O': {'required': True, 'type': 'float', 'min': 0.0, 'max': 1.0}, 'y_N': {'required': True, 'type': 'float', 'min': 0.0, 'max': 1.0}, 'y_S': {'required': True, 'type': 'float', 'min': 0.0, 'max': 1.0}} super().__init__(material, proprty, symbol, display_symbol, units, state_schema, references, datasets) def _calc_a(self, y_C, y_H, y_O, y_N, y_S): """ Calculate the mean atomic weight for the specified element mass fractions. :param y_C: Carbon mass fraction :param y_H: Hydrogen mass fraction :param y_O: Oxygen mass fraction :param y_N: Nitrogen mass fraction :param y_S: Sulphur mass fraction :returns: [kg/kmol] mean atomic weight See equation at bottom of page 538 of Merrick1983a. """ return 1 / (y_C/mm("C") + y_H/mm("H") + y_O/mm("O") + y_N/mm("N") + y_S/mm("S")) class DafCpTy(DafThermoTy): """ A model that describes the heat capacity of dry ash-free (daf) coal as a function of composition and temperature. """ def __init__(self): super().__init__('Dry Ash-free Coal', 'Heat Capacity', 'Cp', 'C_p', 'J/kg/K', ['Merrick1983a', 'Merrick1983b'], None) def _calc_g1(self, z): """ Calculate the g1 parameter. :param z: dimensionless temperature """ return exp(z) / ((exp(z) - 1.0) / z) ** 2.0 def calculate(self, **state): """ Calculate the heat capacity at the specified temperature and composition using equation 10 in Merrick1983b. :param T: [K] temperature :param y_C: Carbon mass fraction :param y_H: Hydrogen mass fraction :param y_O: Oxygen mass fraction :param y_N: Nitrogen mass fraction :param y_S: Sulphur mass fraction :returns: [J/kg/K] heat capacity The **state parameter contains the keyword argument(s) specified above that are used to describe the state of the material. """ T = state['T'] y_C = state['y_C'] y_H = state['y_H'] y_O = state['y_O'] y_N = state['y_N'] y_S = state['y_S'] a = self._calc_a(y_C, y_H, y_O, y_N, y_S) / 1000 # kg/mol result = (R/a) * (self._calc_g1(380/T) + 2*self._calc_g1(1800/T)) return result class DafHTy(DafThermoTy): """ A model that describes the enthalpy of dry ash-free (daf) coal as a function of composition and temperature. """ def __init__(self): super().__init__('Dry Ash-free Coal', 'Enthalpy', 'H', 'H', 'J/kg', ['Merrick1983a', 'Merrick1983b'], None) def _calc_g0(self, z): """ Calculate the g0 parameter. :param z: dimensionless temperature """ return 1 / (exp(z) - 1) def calculate(self, **state): """ Calculate the enthalpy at the specified temperature and composition using equation 9 in Merrick1983b. :param T: [K] temperature :param y_C: Carbon mass fraction :param y_H: Hydrogen mass fraction :param y_O: Oxygen mass fraction :param y_N: Nitrogen mass fraction :param y_S: Sulphur mass fraction :returns: [J/kg] enthalpy The **state parameter contains the keyword argument(s) specified above that are used to describe the state of the material. """ T = state['T'] y_C = state['y_C'] y_H = state['y_H'] y_O = state['y_O'] y_N = state['y_N'] y_S = state['y_S'] a = self._calc_a(y_C, y_H, y_O, y_N, y_S) / 1000 # kg/mol result = (R/a) * (380*self._calc_g0(380/T) + 3600*self._calc_g0(1800/T)) return result if __name__ == '__main__': composition = {'y_C': 0.8271423317, 'y_H': 0.0442564668, 'y_O': 0.1034694128, 'y_N': 0.020228025, 'y_S': 0.0049037636} cp = DafCpTy() h = DafHTy() def H(T): return h.calculate(T=T, **composition) def Cp(T): return cp.calculate(T=T, **composition) dT = 1 for T in range(0, 810, 10): TK = T + 273.15 dH = (H(TK + dT/2) - H(TK - dT/2))/dT print(T, Cp(TK), dH, H(TK))
lgpl-3.0
vijayanandnandam/youtube-dl
test/test_socks.py
50
3500
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import random import subprocess from test.helper import ( FakeYDL, get_params, ) from youtube_dl.compat import ( compat_str, compat_urllib_request, ) class TestMultipleSocks(unittest.TestCase): @staticmethod def _check_params(attrs): params = get_params() for attr in attrs: if attr not in params: print('Missing %s. Skipping.' % attr) return return params def test_proxy_http(self): params = self._check_params(['primary_proxy', 'primary_server_ip']) if params is None: return ydl = FakeYDL({ 'proxy': params['primary_proxy'] }) self.assertEqual( ydl.urlopen('http://yt-dl.org/ip').read().decode('utf-8'), params['primary_server_ip']) def test_proxy_https(self): params = self._check_params(['primary_proxy', 'primary_server_ip']) if params is None: return ydl = FakeYDL({ 'proxy': params['primary_proxy'] }) self.assertEqual( ydl.urlopen('https://yt-dl.org/ip').read().decode('utf-8'), params['primary_server_ip']) def test_secondary_proxy_http(self): params = self._check_params(['secondary_proxy', 'secondary_server_ip']) if params is None: return ydl = FakeYDL() req = compat_urllib_request.Request('http://yt-dl.org/ip') req.add_header('Ytdl-request-proxy', params['secondary_proxy']) self.assertEqual( ydl.urlopen(req).read().decode('utf-8'), params['secondary_server_ip']) def test_secondary_proxy_https(self): params = self._check_params(['secondary_proxy', 'secondary_server_ip']) if params is None: return ydl = FakeYDL() req = compat_urllib_request.Request('https://yt-dl.org/ip') req.add_header('Ytdl-request-proxy', params['secondary_proxy']) self.assertEqual( ydl.urlopen(req).read().decode('utf-8'), params['secondary_server_ip']) class TestSocks(unittest.TestCase): _SKIP_SOCKS_TEST = True def setUp(self): if self._SKIP_SOCKS_TEST: return self.port = random.randint(20000, 30000) self.server_process = subprocess.Popen([ 'srelay', '-f', '-i', '127.0.0.1:%d' % self.port], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def tearDown(self): if self._SKIP_SOCKS_TEST: return self.server_process.terminate() self.server_process.communicate() def _get_ip(self, protocol): if self._SKIP_SOCKS_TEST: return '127.0.0.1' ydl = FakeYDL({ 'proxy': '%s://127.0.0.1:%d' % (protocol, self.port), }) return ydl.urlopen('http://yt-dl.org/ip').read().decode('utf-8') def test_socks4(self): self.assertTrue(isinstance(self._get_ip('socks4'), compat_str)) def test_socks4a(self): self.assertTrue(isinstance(self._get_ip('socks4a'), compat_str)) def test_socks5(self): self.assertTrue(isinstance(self._get_ip('socks5'), compat_str)) if __name__ == '__main__': unittest.main()
unlicense
zlfben/gem5
src/arch/x86/isa/insts/general_purpose/data_transfer/move.py
40
9122
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' # # Regular moves # def macroop MOV_R_MI { limm t1, imm, dataSize=asz ld reg, seg, [1, t0, t1] }; def macroop MOV_MI_R { limm t1, imm, dataSize=asz st reg, seg, [1, t0, t1] }; def macroop MOV_R_R { mov reg, reg, regm }; def macroop MOV_M_R { st reg, seg, sib, disp }; def macroop MOV_P_R { rdip t7 st reg, seg, riprel, disp }; def macroop MOV_R_M { ld reg, seg, sib, disp }; def macroop MOV_R_P { rdip t7 ld reg, seg, riprel, disp }; def macroop MOV_R_I { limm reg, imm }; def macroop MOV_M_I { limm t1, imm st t1, seg, sib, disp }; def macroop MOV_P_I { rdip t7 limm t1, imm st t1, seg, riprel, disp }; # # Sign extending moves # def macroop MOVSXD_R_R { sexti reg, regm, 31 }; def macroop MOVSXD_R_M { ld t1, seg, sib, disp, dataSize=4 sexti reg, t1, 31 }; def macroop MOVSXD_R_P { rdip t7 ld t1, seg, riprel, disp, dataSize=4 sexti reg, t1, 31 }; def macroop MOVSX_B_R_R { mov t1, t1, regm, dataSize=1 sexti reg, t1, 7 }; def macroop MOVSX_B_R_M { ld t1, seg, sib, disp, dataSize=1 sexti reg, t1, 7 }; def macroop MOVSX_B_R_P { rdip t7 ld t1, seg, riprel, disp, dataSize=1 sexti reg, t1, 7 }; def macroop MOVSX_W_R_R { sexti reg, regm, 15 }; def macroop MOVSX_W_R_M { ld reg, seg, sib, disp, dataSize=2 sexti reg, reg, 15 }; def macroop MOVSX_W_R_P { rdip t7 ld reg, seg, riprel, disp, dataSize=2 sexti reg, reg, 15 }; # # Zero extending moves # def macroop MOVZX_B_R_R { mov t1, t1, regm, dataSize=1 zexti reg, t1, 7 }; def macroop MOVZX_B_R_M { ld t1, seg, sib, disp, dataSize=1 zexti reg, t1, 7 }; def macroop MOVZX_B_R_P { rdip t7 ld t1, seg, riprel, disp, dataSize=1 zexti reg, t1, 7 }; def macroop MOVZX_W_R_R { zexti reg, regm, 15 }; def macroop MOVZX_W_R_M { ld t1, seg, sib, disp, dataSize=2 zexti reg, t1, 15 }; def macroop MOVZX_W_R_P { rdip t7 ld t1, seg, riprel, disp, dataSize=2 zexti reg, t1, 15 }; def macroop MOV_C_R { .serializing .adjust_env maxOsz wrcr reg, regm }; def macroop MOV_R_C { .serializing .adjust_env maxOsz rdcr reg, regm }; def macroop MOV_D_R { .serializing .adjust_env maxOsz wrdr reg, regm }; def macroop MOV_R_D { .adjust_env maxOsz rddr reg, regm }; def macroop MOV_R_S { rdsel reg, regm }; def macroop MOV_M_S { rdsel t1, reg st t1, seg, sib, disp, dataSize=2 }; def macroop MOV_P_S { rdip t7 rdsel t1, reg st t1, seg, riprel, disp, dataSize=2 }; def macroop MOV_REAL_S_R { zexti t2, regm, 15, dataSize=8 slli t3, t2, 4, dataSize=8 wrsel reg, regm wrbase reg, t3, dataSize=8 }; def macroop MOV_REAL_S_M { ld t1, seg, sib, disp, dataSize=2 zexti t2, t1, 15, dataSize=8 slli t3, t2, 4, dataSize=8 wrsel reg, t1 wrbase reg, t3, dataSize=8 }; def macroop MOV_REAL_S_P { panic "RIP relative addressing shouldn't happen in real mode" }; def macroop MOV_S_R { andi t0, regm, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, regm, 0xF8, dataSize=8 andi t0, regm, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks regm, t3, dataSize=8 wrdl reg, t3, regm wrsel reg, regm }; def macroop MOV_S_M { ld t1, seg, sib, disp, dataSize=2 andi t0, t1, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, t1, 0xF8, dataSize=8 andi t0, t1, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks t1, t3, dataSize=8 wrdl reg, t3, t1 wrsel reg, t1 }; def macroop MOV_S_P { rdip t7 ld t1, seg, riprel, disp, dataSize=2 andi t0, t1, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, t1, 0xF8, dataSize=8 andi t0, t1, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks t1, t3, dataSize=8 wrdl reg, t3, t1 wrsel reg, t1 }; def macroop MOVSS_S_R { andi t0, regm, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, regm, 0xF8, dataSize=8 andi t0, regm, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks regm, t3, SSCheck, dataSize=8 wrdl reg, t3, regm wrsel reg, regm }; def macroop MOVSS_S_M { ld t1, seg, sib, disp, dataSize=2 andi t0, t1, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, t1, 0xF8, dataSize=8 andi t0, t1, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks t1, t3, SSCheck, dataSize=8 wrdl reg, t3, t1 wrsel reg, t1 }; def macroop MOVSS_S_P { rdip t7 ld t1, seg, riprel, disp, dataSize=2 andi t0, t1, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, t1, 0xF8, dataSize=8 andi t0, t1, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks t1, t3, SSCheck, dataSize=8 wrdl reg, t3, t1 wrsel reg, t1 }; def macroop MOVNTI_M_R { st reg, seg, sib, disp }; def macroop MOVNTI_P_R { rdip t7 st reg, seg, riprel, disp }; def macroop MOVD_XMM_R { mov2fp xmml, regm, srcSize=dsz, destSize=8 lfpimm xmmh, 0 }; def macroop MOVD_XMM_M { ldfp xmml, seg, sib, disp, dataSize=dsz lfpimm xmmh, 0 }; def macroop MOVD_XMM_P { rdip t7 ldfp xmml, seg, riprel, disp, dataSize=dsz lfpimm xmmh, 0 }; def macroop MOVD_R_XMM { mov2int reg, xmmlm, size=dsz }; def macroop MOVD_M_XMM { stfp xmml, seg, sib, disp, dataSize=dsz }; def macroop MOVD_P_XMM { rdip t7 stfp xmml, seg, riprel, disp, dataSize=dsz }; ''' #let {{ # class MOVD(Inst): # "GenFault ${new UnimpInstFault}" #}};
bsd-3-clause
adw0rd/lettuce
tests/integration/lib/Django-1.3/django/contrib/auth/tests/tokens.py
227
3416
from datetime import date, timedelta from django.conf import settings from django.contrib.auth.models import User, AnonymousUser from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.test import TestCase class TokenGeneratorTest(TestCase): def test_make_token(self): """ Ensure that we can make a token and that it is valid """ user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) self.assertTrue(p0.check_token(user, tk1)) def test_10265(self): """ Ensure that the token generated for a user created in the same request will work correctly. """ # See ticket #10265 user = User.objects.create_user('comebackkid', 'test3@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) reload = User.objects.get(username='comebackkid') tk2 = p0.make_token(reload) self.assertEqual(tk1, tk2) def test_timeout(self): """ Ensure we can use the token after n days, but no greater. """ # Uses a mocked version of PasswordResetTokenGenerator so we can change # the value of 'today' class Mocked(PasswordResetTokenGenerator): def __init__(self, today): self._today_val = today def _today(self): return self._today_val user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS)) self.assertTrue(p1.check_token(user, tk1)) p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1)) self.assertFalse(p2.check_token(user, tk1)) def test_django12_hash(self): """ Ensure we can use the hashes generated by Django 1.2 """ # Hard code in the Django 1.2 algorithm (not the result, as it is time # dependent) def _make_token(user): from django.utils.hashcompat import sha_constructor from django.utils.http import int_to_base36 timestamp = (date.today() - date(2001,1,1)).days ts_b36 = int_to_base36(timestamp) hash = sha_constructor(settings.SECRET_KEY + unicode(user.id) + user.password + user.last_login.strftime('%Y-%m-%d %H:%M:%S') + unicode(timestamp)).hexdigest()[::2] return "%s-%s" % (ts_b36, hash) user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = _make_token(user) self.assertTrue(p0.check_token(user, tk1)) def test_date_length(self): """ Make sure we don't allow overly long dates, causing a potential DoS. """ user = User.objects.create_user('ima1337h4x0r', 'test4@example.com', 'p4ssw0rd') p0 = PasswordResetTokenGenerator() # This will put a 14-digit base36 timestamp into the token, which is too large. tk1 = p0._make_token_with_timestamp(user, 175455491841851871349) self.assertFalse(p0.check_token(user, tk1))
gpl-3.0
narurien/ganeti-ceph
test/py/ganeti.storage.drbd_unittest.py
1
17356
#!/usr/bin/python # # Copyright (C) 2006, 2007, 2010, 2012, 2013 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Script for unittesting the drbd module""" import os from ganeti import constants from ganeti import errors from ganeti.storage import drbd from ganeti.storage import drbd_info from ganeti.storage import drbd_cmdgen import testutils class TestDRBD8(testutils.GanetiTestCase): def testGetVersion(self): data = [ "version: 8.0.0 (api:76/proto:80)", "version: 8.0.12 (api:76/proto:86-91)", "version: 8.2.7 (api:88/proto:0-100)", "version: 8.3.7.49 (api:188/proto:13-191)", ] result = [ { "k_major": 8, "k_minor": 0, "k_point": 0, "api": 76, "proto": 80, }, { "k_major": 8, "k_minor": 0, "k_point": 12, "api": 76, "proto": 86, "proto2": "91", }, { "k_major": 8, "k_minor": 2, "k_point": 7, "api": 88, "proto": 0, "proto2": "100", }, { "k_major": 8, "k_minor": 3, "k_point": 7, "k_fix": "49", "api": 188, "proto": 13, "proto2": "191", } ] for d, r in zip(data, result): info = drbd.DRBD8Info.CreateFromLines([d]) self.assertEqual(info.GetVersion(), r) self.assertEqual(info.GetVersionString(), d.replace("version: ", "")) class TestDRBD8Runner(testutils.GanetiTestCase): """Testing case for drbd.DRBD8Dev""" @staticmethod def _has_disk(data, dname, mname, meta_index=0): """Check local disk corectness""" retval = ( "local_dev" in data and data["local_dev"] == dname and "meta_dev" in data and data["meta_dev"] == mname and ((meta_index is None and "meta_index" not in data) or ("meta_index" in data and data["meta_index"] == meta_index) ) ) return retval @staticmethod def _has_net(data, local, remote): """Check network connection parameters""" retval = ( "local_addr" in data and data["local_addr"] == local and "remote_addr" in data and data["remote_addr"] == remote ) return retval def testParser83Creation(self): """Test drbdsetup show parser creation""" drbd_info.DRBD83ShowInfo._GetShowParser() def testParser84Creation(self): """Test drbdsetup show parser creation""" drbd_info.DRBD84ShowInfo._GetShowParser() def testParser80(self): """Test drbdsetup show parser for disk and network version 8.0""" data = testutils.ReadTestData("bdev-drbd-8.0.txt") result = drbd_info.DRBD83ShowInfo.GetDevInfo(data) self.failUnless(self._has_disk(result, "/dev/xenvg/test.data", "/dev/xenvg/test.meta"), "Wrong local disk info") self.failUnless(self._has_net(result, ("192.0.2.1", 11000), ("192.0.2.2", 11000)), "Wrong network info (8.0.x)") def testParser83(self): """Test drbdsetup show parser for disk and network version 8.3""" data = testutils.ReadTestData("bdev-drbd-8.3.txt") result = drbd_info.DRBD83ShowInfo.GetDevInfo(data) self.failUnless(self._has_disk(result, "/dev/xenvg/test.data", "/dev/xenvg/test.meta"), "Wrong local disk info") self.failUnless(self._has_net(result, ("192.0.2.1", 11000), ("192.0.2.2", 11000)), "Wrong network info (8.3.x)") def testParser84(self): """Test drbdsetup show parser for disk and network version 8.4""" data = testutils.ReadTestData("bdev-drbd-8.4.txt") result = drbd_info.DRBD84ShowInfo.GetDevInfo(data) self.failUnless(self._has_disk(result, "/dev/xenvg/test.data", "/dev/xenvg/test.meta"), "Wrong local disk info") self.failUnless(self._has_net(result, ("192.0.2.1", 11000), ("192.0.2.2", 11000)), "Wrong network info (8.4.x)") def testParser84NoDiskParams(self): """Test drbdsetup show parser for 8.4 without disk params The missing disk parameters occur after re-attaching a local disk but before setting the disk params. """ data = testutils.ReadTestData("bdev-drbd-8.4-no-disk-params.txt") result = drbd_info.DRBD84ShowInfo.GetDevInfo(data) self.failUnless(self._has_disk(result, "/dev/xenvg/test.data", "/dev/xenvg/test.meta", meta_index=None), "Wrong local disk info") self.failUnless(self._has_net(result, ("192.0.2.1", 11000), ("192.0.2.2", 11000)), "Wrong network info (8.4.x)") def testParserNetIP4(self): """Test drbdsetup show parser for IPv4 network""" data = testutils.ReadTestData("bdev-drbd-net-ip4.txt") result = drbd_info.DRBD83ShowInfo.GetDevInfo(data) self.failUnless(("local_dev" not in result and "meta_dev" not in result and "meta_index" not in result), "Should not find local disk info") self.failUnless(self._has_net(result, ("192.0.2.1", 11002), ("192.0.2.2", 11002)), "Wrong network info (IPv4)") def testParserNetIP6(self): """Test drbdsetup show parser for IPv6 network""" data = testutils.ReadTestData("bdev-drbd-net-ip6.txt") result = drbd_info.DRBD83ShowInfo.GetDevInfo(data) self.failUnless(("local_dev" not in result and "meta_dev" not in result and "meta_index" not in result), "Should not find local disk info") self.failUnless(self._has_net(result, ("2001:db8:65::1", 11048), ("2001:db8:66::1", 11048)), "Wrong network info (IPv6)") def testParserDisk(self): """Test drbdsetup show parser for disk""" data = testutils.ReadTestData("bdev-drbd-disk.txt") result = drbd_info.DRBD83ShowInfo.GetDevInfo(data) self.failUnless(self._has_disk(result, "/dev/xenvg/test.data", "/dev/xenvg/test.meta"), "Wrong local disk info") self.failUnless(("local_addr" not in result and "remote_addr" not in result), "Should not find network info") def testBarriersOptions(self): """Test class method that generates drbdsetup options for disk barriers""" # Tests that should fail because of wrong version/options combinations should_fail = [ (8, 0, 12, "bfd", True), (8, 0, 12, "fd", False), (8, 0, 12, "b", True), (8, 2, 7, "bfd", True), (8, 2, 7, "b", True) ] for vmaj, vmin, vrel, opts, meta in should_fail: self.assertRaises(errors.BlockDeviceError, drbd_cmdgen.DRBD83CmdGenerator._ComputeDiskBarrierArgs, vmaj, vmin, vrel, opts, meta) # get the valid options from the frozenset(frozenset()) in constants. valid_options = [list(x)[0] for x in constants.DRBD_VALID_BARRIER_OPT] # Versions that do not support anything for vmaj, vmin, vrel in ((8, 0, 0), (8, 0, 11), (8, 2, 6)): for opts in valid_options: self.assertRaises( errors.BlockDeviceError, drbd_cmdgen.DRBD83CmdGenerator._ComputeDiskBarrierArgs, vmaj, vmin, vrel, opts, True) # Versions with partial support (testing only options that are supported) tests = [ (8, 0, 12, "n", False, []), (8, 0, 12, "n", True, ["--no-md-flushes"]), (8, 2, 7, "n", False, []), (8, 2, 7, "fd", False, ["--no-disk-flushes", "--no-disk-drain"]), (8, 0, 12, "n", True, ["--no-md-flushes"]), ] # Versions that support everything for vmaj, vmin, vrel in ((8, 3, 0), (8, 3, 12)): tests.append((vmaj, vmin, vrel, "bfd", True, ["--no-disk-barrier", "--no-disk-drain", "--no-disk-flushes", "--no-md-flushes"])) tests.append((vmaj, vmin, vrel, "n", False, [])) tests.append((vmaj, vmin, vrel, "b", True, ["--no-disk-barrier", "--no-md-flushes"])) tests.append((vmaj, vmin, vrel, "fd", False, ["--no-disk-flushes", "--no-disk-drain"])) tests.append((vmaj, vmin, vrel, "n", True, ["--no-md-flushes"])) # Test execution for test in tests: vmaj, vmin, vrel, disabled_barriers, disable_meta_flush, expected = test args = \ drbd_cmdgen.DRBD83CmdGenerator._ComputeDiskBarrierArgs( vmaj, vmin, vrel, disabled_barriers, disable_meta_flush) self.failUnless(set(args) == set(expected), "For test %s, got wrong results %s" % (test, args)) # Unsupported or invalid versions for vmaj, vmin, vrel in ((0, 7, 25), (9, 0, 0), (7, 0, 0), (8, 4, 0)): self.assertRaises(errors.BlockDeviceError, drbd_cmdgen.DRBD83CmdGenerator._ComputeDiskBarrierArgs, vmaj, vmin, vrel, "n", True) # Invalid options for option in ("", "c", "whatever", "nbdfc", "nf"): self.assertRaises(errors.BlockDeviceError, drbd_cmdgen.DRBD83CmdGenerator._ComputeDiskBarrierArgs, 8, 3, 11, option, True) class TestDRBD8Status(testutils.GanetiTestCase): """Testing case for DRBD8Dev /proc status""" def setUp(self): """Read in txt data""" testutils.GanetiTestCase.setUp(self) proc_data = testutils.TestDataFilename("proc_drbd8.txt") proc80e_data = testutils.TestDataFilename("proc_drbd80-emptyline.txt") proc83_data = testutils.TestDataFilename("proc_drbd83.txt") proc83_sync_data = testutils.TestDataFilename("proc_drbd83_sync.txt") proc83_sync_krnl_data = \ testutils.TestDataFilename("proc_drbd83_sync_krnl2.6.39.txt") proc84_data = testutils.TestDataFilename("proc_drbd84.txt") proc84_sync_data = testutils.TestDataFilename("proc_drbd84_sync.txt") self.proc80ev_data = \ testutils.TestDataFilename("proc_drbd80-emptyversion.txt") self.drbd_info = drbd.DRBD8Info.CreateFromFile(filename=proc_data) self.drbd_info80e = drbd.DRBD8Info.CreateFromFile(filename=proc80e_data) self.drbd_info83 = drbd.DRBD8Info.CreateFromFile(filename=proc83_data) self.drbd_info83_sync = \ drbd.DRBD8Info.CreateFromFile(filename=proc83_sync_data) self.drbd_info83_sync_krnl = \ drbd.DRBD8Info.CreateFromFile(filename=proc83_sync_krnl_data) self.drbd_info84 = drbd.DRBD8Info.CreateFromFile(filename=proc84_data) self.drbd_info84_sync = \ drbd.DRBD8Info.CreateFromFile(filename=proc84_sync_data) def testIOErrors(self): """Test handling of errors while reading the proc file.""" temp_file = self._CreateTempFile() os.unlink(temp_file) self.failUnlessRaises(errors.BlockDeviceError, drbd.DRBD8Info.CreateFromFile, filename=temp_file) def testHelper(self): """Test reading usermode_helper in /sys.""" sys_drbd_helper = testutils.TestDataFilename("sys_drbd_usermode_helper.txt") drbd_helper = drbd.DRBD8.GetUsermodeHelper(filename=sys_drbd_helper) self.failUnlessEqual(drbd_helper, "/bin/true") def testHelperIOErrors(self): """Test handling of errors while reading usermode_helper in /sys.""" temp_file = self._CreateTempFile() os.unlink(temp_file) self.failUnlessRaises(errors.BlockDeviceError, drbd.DRBD8.GetUsermodeHelper, filename=temp_file) def testMinorNotFound(self): """Test not-found-minor in /proc""" self.failUnless(not self.drbd_info.HasMinorStatus(9)) self.failUnless(not self.drbd_info83.HasMinorStatus(9)) self.failUnless(not self.drbd_info80e.HasMinorStatus(3)) def testLineNotMatch(self): """Test wrong line passed to drbd_info.DRBD8Status""" self.assertRaises(errors.BlockDeviceError, drbd_info.DRBD8Status, "foo") def testMinor0(self): """Test connected, primary device""" for info in [self.drbd_info, self.drbd_info83, self.drbd_info84]: stats = info.GetMinorStatus(0) self.failUnless(stats.is_in_use) self.failUnless(stats.is_connected and stats.is_primary and stats.peer_secondary and stats.is_disk_uptodate) def testMinor1(self): """Test connected, secondary device""" for info in [self.drbd_info, self.drbd_info83, self.drbd_info84]: stats = info.GetMinorStatus(1) self.failUnless(stats.is_in_use) self.failUnless(stats.is_connected and stats.is_secondary and stats.peer_primary and stats.is_disk_uptodate) def testMinor2(self): """Test unconfigured device""" for info in [self.drbd_info, self.drbd_info83, self.drbd_info80e, self.drbd_info84]: stats = info.GetMinorStatus(2) self.failIf(stats.is_in_use) def testMinor4(self): """Test WFconn device""" for info in [self.drbd_info, self.drbd_info83, self.drbd_info84]: stats = info.GetMinorStatus(4) self.failUnless(stats.is_in_use) self.failUnless(stats.is_wfconn and stats.is_primary and stats.rrole == "Unknown" and stats.is_disk_uptodate) def testMinor6(self): """Test diskless device""" for info in [self.drbd_info, self.drbd_info83, self.drbd_info84]: stats = info.GetMinorStatus(6) self.failUnless(stats.is_in_use) self.failUnless(stats.is_connected and stats.is_secondary and stats.peer_primary and stats.is_diskless) def testMinor8(self): """Test standalone device""" for info in [self.drbd_info, self.drbd_info83, self.drbd_info84]: stats = info.GetMinorStatus(8) self.failUnless(stats.is_in_use) self.failUnless(stats.is_standalone and stats.rrole == "Unknown" and stats.is_disk_uptodate) def testDRBD83SyncFine(self): stats = self.drbd_info83_sync.GetMinorStatus(3) self.failUnless(stats.is_in_resync) self.assertAlmostEqual(stats.sync_percent, 34.9) def testDRBD83SyncBroken(self): stats = self.drbd_info83_sync_krnl.GetMinorStatus(3) self.failUnless(stats.is_in_resync) self.assertAlmostEqual(stats.sync_percent, 2.4) def testDRBD84Sync(self): stats = self.drbd_info84_sync.GetMinorStatus(5) self.failUnless(stats.is_in_resync) self.assertAlmostEqual(stats.sync_percent, 68.5) def testDRBDEmptyVersion(self): self.assertRaises(errors.BlockDeviceError, drbd.DRBD8Info.CreateFromFile, filename=self.proc80ev_data) class TestDRBD8Construction(testutils.GanetiTestCase): def setUp(self): """Read in txt data""" testutils.GanetiTestCase.setUp(self) self.proc80_info = \ drbd_info.DRBD8Info.CreateFromFile( filename=testutils.TestDataFilename("proc_drbd8.txt")) self.proc83_info = \ drbd_info.DRBD8Info.CreateFromFile( filename=testutils.TestDataFilename("proc_drbd83.txt")) self.proc84_info = \ drbd_info.DRBD8Info.CreateFromFile( filename=testutils.TestDataFilename("proc_drbd84.txt")) self.test_unique_id = ("hosta.com", 123, "host2.com", 123, 0, "secret") @testutils.patch_object(drbd.DRBD8, "GetProcInfo") def testConstructionWith80Data(self, mock_create_from_file): mock_create_from_file.return_value = self.proc80_info inst = drbd.DRBD8Dev(self.test_unique_id, [], 123, {}) self.assertEqual(inst._show_info_cls, drbd_info.DRBD83ShowInfo) self.assertTrue(isinstance(inst._cmd_gen, drbd_cmdgen.DRBD83CmdGenerator)) @testutils.patch_object(drbd.DRBD8, "GetProcInfo") def testConstructionWith83Data(self, mock_create_from_file): mock_create_from_file.return_value = self.proc83_info inst = drbd.DRBD8Dev(self.test_unique_id, [], 123, {}) self.assertEqual(inst._show_info_cls, drbd_info.DRBD83ShowInfo) self.assertTrue(isinstance(inst._cmd_gen, drbd_cmdgen.DRBD83CmdGenerator)) @testutils.patch_object(drbd.DRBD8, "GetProcInfo") def testConstructionWith84Data(self, mock_create_from_file): mock_create_from_file.return_value = self.proc84_info inst = drbd.DRBD8Dev(self.test_unique_id, [], 123, {}) self.assertEqual(inst._show_info_cls, drbd_info.DRBD84ShowInfo) self.assertTrue(isinstance(inst._cmd_gen, drbd_cmdgen.DRBD84CmdGenerator)) if __name__ == "__main__": testutils.GanetiTestProgram()
gpl-2.0
wkschwartz/django
tests/utils_tests/test_archive.py
1
3026
import os import stat import sys import tempfile import unittest from django.core.exceptions import SuspiciousOperation from django.test import SimpleTestCase from django.utils import archive class TestArchive(unittest.TestCase): def setUp(self): self.testdir = os.path.join(os.path.dirname(__file__), 'archives') self.old_cwd = os.getcwd() os.chdir(self.testdir) def tearDown(self): os.chdir(self.old_cwd) def test_extract_function(self): for entry in os.scandir(self.testdir): with self.subTest(entry.name), tempfile.TemporaryDirectory() as tmpdir: archive.extract(entry.path, tmpdir) self.assertTrue(os.path.isfile(os.path.join(tmpdir, '1'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, '2'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, 'foo', '1'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, 'foo', '2'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, 'foo', 'bar', '1'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, 'foo', 'bar', '2'))) @unittest.skipIf(sys.platform == 'win32', 'Python on Windows has a limited os.chmod().') def test_extract_file_permissions(self): """archive.extract() preserves file permissions.""" mask = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO umask = os.umask(0) os.umask(umask) # Restore the original umask. for entry in os.scandir(self.testdir): if entry.name.startswith('leadpath_'): continue with self.subTest(entry.name), tempfile.TemporaryDirectory() as tmpdir: archive.extract(entry.path, tmpdir) # An executable file in the archive has executable permissions. filepath = os.path.join(tmpdir, 'executable') self.assertEqual(os.stat(filepath).st_mode & mask, 0o775) # A file is readable even if permission data is missing. filepath = os.path.join(tmpdir, 'no_permissions') self.assertEqual(os.stat(filepath).st_mode & mask, 0o666 & ~umask) class TestArchiveInvalid(SimpleTestCase): def test_extract_function_traversal(self): archives_dir = os.path.join(os.path.dirname(__file__), 'traversal_archives') tests = [ ('traversal.tar', '..'), ('traversal_absolute.tar', '/tmp/evil.py'), ] if sys.platform == 'win32': tests += [ ('traversal_disk_win.tar', 'd:evil.py'), ('traversal_disk_win.zip', 'd:evil.py'), ] msg = "Archive contains invalid path: '%s'" for entry, invalid_path in tests: with self.subTest(entry), tempfile.TemporaryDirectory() as tmpdir: with self.assertRaisesMessage(SuspiciousOperation, msg % invalid_path): archive.extract(os.path.join(archives_dir, entry), tmpdir)
bsd-3-clause
StevenSLXie/tinyble
build/lib/tinyble/query.py
2
14812
""" Contains the querying interface. Starting with :class:`~tinydb.queries.Query` you can construct complex queries: >>> ((where('f1') == 5) & (where('f2') != 2)) | where('s').matches(r'^\w+$') (('f1' == 5) and ('f2' != 2)) or ('s' ~= ^\w+$ ) Queries are executed by using the ``__call__``: >>> q = where('val') == 5 >>> q({'val': 5}) True >>> q({'val': 1}) False """ import re import sys from tinydb.utils import catch_warning __all__ = ('Query',) def is_sequence(obj): return hasattr(obj, '__iter__') class AndOrMixin(object): """ A mixin providing methods calls ``&`` and ``|``. All queries can be combined with ``&`` and ``|``. Thus, we provide a mixin here to prevent repeating this code all the time. """ def __or__(self, other): """ Combines this query and another with logical or. Example: >>> (where('f1') == 5) | (where('f2') != 2) ('f1' == 5) or ('f2' != 2) :rtype: QueryOr """ return QueryOr(self, other) def __and__(self, other): """ Combines this query and another with logical and. Example: >>> (where('f1') == 5) & (where('f2') != 2) ('f1' == 5) and ('f2' != 2) :rtype: QueryAnd """ return QueryAnd(self, other) class Query(AndOrMixin): """ Provides methods to do tests on dict fields. Any type of comparison will be called in this class. In addition, it is aliased to :data:`where` to provide a more intuitive syntax. When not using any comparison operation, this simply tests for existence of the given key. """ def __init__(self, key): self._key = key self._cmp = None self._repr = 'has \'{0}\''.format(key) def matches(self, regex): """ Run a regex test against a dict value (whole string has to match). >>> where('f1').matches(r'^\w+$') 'f1' ~= ^\w+$ :param regex: The regular expression to pass to ``re.match`` :rtype: QueryRegex """ return QueryRegex(self._key, regex, re_method='match') def contains(self, regex): """ Run a regex test against a dict value (only substring has to match). >>> where('f1').contains(r'\d+') 'f1' ~= \d+ :param regex: The regular expression to pass to ``re.search`` :rtype: QueryRegex """ return QueryRegex(self._key, regex, re_method='search') def test(self, func): """ Run a user-defined test function against a dict value. >>> def test_func(val): ... return val == 42 ... >>> where('f1').test(test_func) 'f1'.test(<function test_func at 0xXXXXXXXX>) :param func: The function to run. Has to accept one parameter and return a boolean. :rtype: QueryCustom """ return QueryCustom(self._key, func) def has(self, key): """ Run test on a nested dict. >>> where('x').has('y') == 2 has 'x' => ('y' == 2) Matches:: {'x': {'y': 2}} :param key: the key to search for in the nested dict :rtype: QueryHas """ return QueryHas(self._key, key) def any(self, cond): """ Checks if a condition is met by any element in a list, where a condition can also be a sequence (e.g. list). >>> where('f1').any(where('f2') == 1) 'f1' has any 'f2' == 1 Matches:: {'f1': [{'f2': 1}, {'f2': 0}]} >>> where('f1').any([1, 2, 3]) 'f1' has any [1, 2, 3] Matches:: {'f1': [1, 2]} {'f1': [3, 4, 5]} :param cond: The condition to check :rtype: tinydb.queries.Query """ # Check for condition type if callable(cond): def _cmp(value): return is_sequence(value) and any(cond(e) for e in value) else: def _cmp(value): return is_sequence(value) and any(e in cond for e in value) self._cmp = _cmp self._repr = '\'{0}\' has any {1}'.format(self._key, cond) return self def all(self, cond): """ Checks if a condition is met by any element in a list, where a condition can also be a sequence (e.g. list). >>> where('f1').all(where('f2') == 1) 'f1' all have 'f2' == 1 Matches:: {'f1': [{'f2': 1}, {'f2': 1}]} >>> where('f1').all([{'f2': 1}, {'f3': 2}]) 'f1' all have [{'f2': 1}, {'f3': 2}] Matches:: {'f1': [{'f2': 1}, {'f3': 2}]} {'f1': [{'f2': 1}, {'f3': 2}, {'f4': 3}]} :param cond: The condition to check :rtype: tinydb.queries.Query """ # Check for condition type if callable(cond): def _cmp(value): return is_sequence(value) and all(cond(e) for e in value) else: def _cmp(value): return is_sequence(value) and all(e in value for e in cond) self._cmp = _cmp self._repr = '\'{0}\' all have {1}'.format(self._key, cond) return self def __eq__(self, other): """ Test a dict value for equality. >>> where('f1') == 42 'f1' == 42 """ if isinstance(other, Query): return self._repr == other._repr if sys.version_info <= (3, 0): # pragma: no cover # Special UTF-8 handling on Python 2 def _cmp(value): with catch_warning(UnicodeWarning): try: return value == other except UnicodeWarning: # Dealing with a case, where 'value' or 'other' # is unicode and the other is a byte string. if isinstance(value, str): return value.decode('utf-8') == other elif isinstance(other, str): return value == other.decode('utf-8') self._cmp = _cmp else: # pragma: no cover self._cmp = lambda value: value == other self._update_repr('==', other) return self def __ne__(self, other): """ Test a dict value for inequality. >>> where('f1') != 42 'f1' != 42 """ self._cmp = lambda value: value != other self._update_repr('!=', other) return self def __lt__(self, other): """ Test a dict value for being lower than another value. >>> where('f1') < 42 'f1' < 42 """ self._cmp = lambda value: value < other self._update_repr('<', other) return self def __le__(self, other): """ Test a dict value for being lower than or equal to another value. >>> where('f1') <= 42 'f1' <= 42 """ self._cmp = lambda value: value <= other self._update_repr('<=', other) return self def __gt__(self, other): """ Test a dict value for being greater than another value. >>> where('f1') > 42 'f1' > 42 """ self._cmp = lambda value: value > other self._update_repr('>', other) return self def __ge__(self, other): """ Test a dict value for being greater than or equal to another value. >>> where('f1') >= 42 'f1' >= 42 """ self._cmp = lambda value: value >= other self._update_repr('>=', other) return self def __invert__(self): """ Negates a query. >>> ~(where('f1') >= 42) not ('f1' >= 42) :rtype: tinydb.queries.QueryNot """ return QueryNot(self) def __and__(self, other): """ Combines this query and another with logical and. Example: >>> (where('f1') == 5) & (where('f2') != 2) ('f1' == 5) and ('f2' != 2) :rtype: QueryAnd """ return super(Query, self).__and__(other) def __or__(self, other): """ Combines this query and another with logical or. Example: >>> (where('f1') == 5) | (where('f2') != 2) ('f1' == 5) or ('f2' != 2) :rtype: QueryOr """ return super(Query, self).__or__(other) def __call__(self, element): """ Run the test on the element. :param element: The dict that we will run our tests against. :type element: dict """ # Check for key existence if self._key not in element: return False # Check, if a comparator has been set if self._cmp: return self._cmp(element[self._key]) else: return True # Key exists def _update_repr(self, operator, value): """ Update the current test's ``repr``. """ self._repr = '{0!r} {1} {2!r}'.format(self._key, operator, value) def __repr__(self): return self._repr def __hash__(self): # Queries have to be hashable because the query cache is implemented # as an dict where the keys are Query objects. The hash should be # the same for objects with the same, so we can use the repr for this. return hash(repr(self)) where = Query class QueryNot(AndOrMixin): """ Negates a query. >>> ~(where('f1') >= 42) not ('f1' >= 42) """ def __init__(self, cond): self._cond = cond def __call__(self, element): """ Run the test on the element. :param element: The dict that we will run our tests against. :type element: dict """ return not self._cond(element) def __repr__(self): return 'not ({0})'.format(self._cond) class QueryOr(AndOrMixin): """ Combines this query and another with logical or. See :meth:`.AndOrMixin.__or__`. """ def __init__(self, where1, where2): self._cond_1 = where1 self._cond_2 = where2 def __call__(self, element): """ See :meth:`.Query.__call__`. """ return self._cond_1(element) or self._cond_2(element) def __repr__(self): return '({0}) or ({1})'.format(self._cond_1, self._cond_2) class QueryAnd(AndOrMixin): """ Combines this query and another with logical and. See :meth:`.AndOrMixin.__and__`. """ def __init__(self, where1, where2): self._cond_1 = where1 self._cond_2 = where2 def __call__(self, element): """ See :meth:`.Query.__call__`. """ return self._cond_1(element) and self._cond_2(element) def __repr__(self): return '({0}) and ({1})'.format(self._cond_1, self._cond_2) class QueryRegex(AndOrMixin): """ Run a regex test against a dict value. See :meth:`.Query.matches`. """ def __init__(self, key, regex, re_method): self.regex = regex self._key = key self.re_method = re_method def __call__(self, element): """ See :meth:`.Query.__call__`. """ if self._key not in element: return False if self.re_method == 'match': return re.match(self.regex, element[self._key]) if self.re_method == 'search': return re.search(self.regex, element[self._key]) def __repr__(self): return '\'{0}\' {1} /{2}/'.format(self._key, self.re_method, self.regex) def __hash__(self): return hash(repr(self)) class QueryCustom(AndOrMixin): """ Run a user-defined test function against a dict value. See :meth:`.Query.test`. """ def __init__(self, key, test): self.test = test self._key = key def __call__(self, element): """ See :meth:`.Query.__call__`. """ if self._key not in element: return False return self.test(element[self._key]) def __repr__(self): return '\'{0}\'.test({1})'.format(self._key, self.test) class QueryHas(Query): """ Run a query on a nested dict. See :meth:`.Query.has` """ def __init__(self, root, key): super(QueryHas, self).__init__(key) self._special = None self._path = [root] # Store the path to the element to check def matches(self, regex): """ See :meth:`.Query.matches`. """ self._special = QueryRegex(self._key, regex, re_method='match') return self def contains(self, regex): """ See :meth:`.Query.search`. """ self._special = QueryRegex(self._key, regex, re_method='search') return self def test(self, func): """ See :meth:`.Query.test`. """ self._special = QueryCustom(self._key, func) return self def has(self, key): """ See :meth:`.Query.has`. """ # Nested has: Append old key to path and use given key from now on self._path.append(self._key) self._key = key return self def __call__(self, element): """ See :meth:`.Query.__call__`. """ # Retrieve value from given path for key in self._path: try: # Check, if requested key exists if key not in element: return False except (KeyError, TypeError): # We can't continue searching because either ... # - the element contains a value instead of a dict (TypeError) # - or doesn't contain the key (KeyError) return False # Follow the path and continue searching element = element[key] # Verify the element is a dict where we can run the test # Fixes searching for 'x' => 'y' in {'x': {'y': 2}} if not isinstance(element, dict): return False if self._special: # Process special test return self._special(element) else: # Process like a normal query return super(QueryHas, self).__call__(element) def __repr__(self): path = self._path[:] if not self._special and not self._cmp: path += [self._key] repr_str = 'has ' # 'key1' => 'key2' => ... repr_str += '\'' + '\' => \''.join(path) + '\'' if self._special: repr_str += ' => ({})'.format(self._special) elif self._cmp: repr_str += ' => ({})'.format(super(QueryHas, self).__repr__()) return repr_str
mit
wwf5067/statsmodels
statsmodels/graphics/dotplots.py
31
18190
import numpy as np from statsmodels.compat import range from . import utils def dot_plot(points, intervals=None, lines=None, sections=None, styles=None, marker_props=None, line_props=None, split_names=None, section_order=None, line_order=None, stacked=False, styles_order=None, striped=False, horizontal=True, show_names="both", fmt_left_name=None, fmt_right_name=None, show_section_titles=None, ax=None): """ Produce a dotplot similar in style to those in Cleveland's "Visualizing Data" book. These are also known as "forest plots". Parameters ---------- points : array_like The quantitative values to be plotted as markers. intervals : array_like The intervals to be plotted around the points. The elements of `intervals` are either scalars or sequences of length 2. A scalar indicates the half width of a symmetric interval. A sequence of length 2 contains the left and right half-widths (respectively) of a nonsymmetric interval. If None, no intervals are drawn. lines : array_like A grouping variable indicating which points/intervals are drawn on a common line. If None, each point/interval appears on its own line. sections : array_like A grouping variable indicating which lines are grouped into sections. If None, everything is drawn in a single section. styles : array_like A grouping label defining the plotting style of the markers and intervals. marker_props : dict A dictionary mapping style codes (the values in `styles`) to dictionaries defining key/value pairs to be passed as keyword arguments to `plot` when plotting markers. Useful keyword arguments are "color", "marker", and "ms" (marker size). line_props : dict A dictionary mapping style codes (the values in `styles`) to dictionaries defining key/value pairs to be passed as keyword arguments to `plot` when plotting interval lines. Useful keyword arguments are "color", "linestyle", "solid_capstyle", and "linewidth". split_names : string If not None, this is used to split the values of `lines` into substrings that are drawn in the left and right margins, respectively. If None, the values of `lines` are drawn in the left margin. section_order : array_like The section labels in the order in which they appear in the dotplot. line_order : array_like The line labels in the order in which they appear in the dotplot. stacked : boolean If True, when multiple points or intervals are drawn on the same line, they are offset from each other. styles_order : array_like If stacked=True, this is the order in which the point styles on a given line are drawn from top to bottom (if horizontal is True) or from left to right (if horiontal is False). If None (default), the order is lexical. striped : boolean If True, every other line is enclosed in a shaded box. horizontal : boolean If True (default), the lines are drawn horizontally, otherwise they are drawn vertically. show_names : string Determines whether labels (names) are shown in the left and/or right margins (top/bottom margins if `horizontal` is True). If `both`, labels are drawn in both margins, if 'left', labels are drawn in the left or top margin. If `right`, labels are drawn in the right or bottom margin. fmt_left_name : function The left/top margin names are passed through this function before drawing on the plot. fmt_right_name : function The right/bottom marginnames are passed through this function before drawing on the plot. show_section_titles : bool or None If None, section titles are drawn only if there is more than one section. If False/True, section titles are never/always drawn, respectively. ax : matplotlib.axes The axes on which the dotplot is drawn. If None, a new axes is created. Returns ------- fig : Figure The figure given by `ax.figure` or a new instance. Notes ----- `points`, `intervals`, `lines`, `sections`, `styles` must all have the same length whenever present. Examples -------- This is a simple dotplot with one point per line: >>> dot_plot(points=point_values) This dotplot has labels on the lines (if elements in `label_values` are repeated, the corresponding points appear on the same line): >>> dot_plot(points=point_values, lines=label_values) References ---------- * Cleveland, William S. (1993). "Visualizing Data". Hobart Press. * Jacoby, William G. (2006) "The Dot Plot: A Graphical Display for Labeled Quantitative Values." The Political Methodologist 14(1): 6-14. """ import matplotlib.transforms as transforms fig, ax = utils.create_mpl_ax(ax) # Convert to numpy arrays if that is not what we are given. points = np.asarray(points) asarray_or_none = lambda x : None if x is None else np.asarray(x) intervals = asarray_or_none(intervals) lines = asarray_or_none(lines) sections = asarray_or_none(sections) styles = asarray_or_none(styles) # Total number of points npoint = len(points) # Set default line values if needed if lines is None: lines = np.arange(npoint) # Set default section values if needed if sections is None: sections = np.zeros(npoint) # Set default style values if needed if styles is None: styles = np.zeros(npoint) # The vertical space (in inches) for a section title section_title_space = 0.5 # The number of sections nsect = len(set(sections)) if section_order is not None: nsect = len(set(section_order)) # The number of section titles if show_section_titles == False: draw_section_titles = False nsect_title = 0 elif show_section_titles == True: draw_section_titles = True nsect_title = nsect else: draw_section_titles = nsect > 1 nsect_title = nsect if nsect > 1 else 0 # The total vertical space devoted to section titles. section_space_total = section_title_space * nsect_title # Add a bit of room so that points that fall at the axis limits # are not cut in half. ax.set_xmargin(0.02) ax.set_ymargin(0.02) if section_order is None: lines0 = list(set(sections)) lines0.sort() else: lines0 = section_order if line_order is None: lines1 = list(set(lines)) lines1.sort() else: lines1 = line_order # A map from (section,line) codes to index positions. lines_map = {} for i in range(npoint): if section_order is not None and sections[i] not in section_order: continue if line_order is not None and lines[i] not in line_order: continue ky = (sections[i], lines[i]) if ky not in lines_map: lines_map[ky] = [] lines_map[ky].append(i) # Get the size of the axes on the parent figure in inches bbox = ax.get_window_extent().transformed( fig.dpi_scale_trans.inverted()) awidth, aheight = bbox.width, bbox.height # The number of lines in the plot. nrows = len(lines_map) # The positions of the lowest and highest guideline in axes # coordinates (for horizontal dotplots), or the leftmost and # rightmost guidelines (for vertical dotplots). bottom, top = 0, 1 if horizontal: # x coordinate is data, y coordinate is axes trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) else: # x coordinate is axes, y coordinate is data trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) # Space used for a section title, in axes coordinates title_space_axes = section_title_space / aheight # Space between lines if horizontal: dpos = (top - bottom - nsect_title*title_space_axes) /\ float(nrows) else: dpos = (top - bottom) / float(nrows) # Determine the spacing for stacked points if styles_order is not None: style_codes = styles_order else: style_codes = list(set(styles)) style_codes.sort() # Order is top to bottom for horizontal plots, so need to # flip. if horizontal: style_codes = style_codes[::-1] # nval is the maximum number of points on one line. nval = len(style_codes) if nval > 1: stackd = dpos / (2.5*(float(nval)-1)) else: stackd = 0. # Map from style code to its integer position #style_codes_map = {x: style_codes.index(x) for x in style_codes} # python 2.6 compat version: style_codes_map = dict((x, style_codes.index(x)) for x in style_codes) # Setup default marker styles colors = ["r", "g", "b", "y", "k", "purple", "orange"] if marker_props is None: #marker_props = {x: {} for x in style_codes} # python 2.6 compat version: marker_props = dict((x, {}) for x in style_codes) for j in range(nval): sc = style_codes[j] if "color" not in marker_props[sc]: marker_props[sc]["color"] = colors[j % len(colors)] if "marker" not in marker_props[sc]: marker_props[sc]["marker"] = "o" if "ms" not in marker_props[sc]: marker_props[sc]["ms"] = 10 if stackd == 0 else 6 # Setup default line styles if line_props is None: #line_props = {x: {} for x in style_codes} # python 2.6 compat version: line_props = dict((x, {}) for x in style_codes) for j in range(nval): sc = style_codes[j] if "color" not in line_props[sc]: line_props[sc]["color"] = "grey" if "linewidth" not in line_props[sc]: line_props[sc]["linewidth"] = 2 if stackd > 0 else 8 if horizontal: # The vertical position of the first line. pos = top - dpos/2 if nsect == 1 else top else: # The horizontal position of the first line. pos = bottom + dpos/2 # Points that have already been labeled labeled = set() # Positions of the y axis grid lines ticks = [] # Loop through the sections for k0 in lines0: # Draw a section title if draw_section_titles: if horizontal: y0 = pos + dpos/2 if k0 == lines0[0] else pos ax.fill_between((0, 1), (y0,y0), (pos-0.7*title_space_axes, pos-0.7*title_space_axes), color='darkgrey', transform=ax.transAxes, zorder=1) txt = ax.text(0.5, pos - 0.35*title_space_axes, k0, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) txt.set_fontweight("bold") pos -= title_space_axes else: m = len([k for k in lines_map if k[0] == k0]) ax.fill_between((pos-dpos/2+0.01, pos+(m-1)*dpos+dpos/2-0.01), (1.01,1.01), (1.06,1.06), color='darkgrey', transform=ax.transAxes, zorder=1, clip_on=False) txt = ax.text(pos + (m-1)*dpos/2, 1.02, k0, horizontalalignment='center', verticalalignment='bottom', transform=ax.transAxes) txt.set_fontweight("bold") jrow = 0 for k1 in lines1: # No data to plot if (k0, k1) not in lines_map: continue # Draw the guideline if horizontal: ax.axhline(pos, color='grey') else: ax.axvline(pos, color='grey') # Set up the labels if split_names is not None: us = k1.split(split_names) if len(us) >= 2: left_label, right_label = us[0], us[1] else: left_label, right_label = k1, None else: left_label, right_label = k1, None if fmt_left_name is not None: left_label = fmt_left_name(left_label) if fmt_right_name is not None: right_label = fmt_right_name(right_label) # Draw the stripe if striped and jrow % 2 == 0: if horizontal: ax.fill_between((0, 1), (pos-dpos/2, pos-dpos/2), (pos+dpos/2, pos+dpos/2), color='lightgrey', transform=ax.transAxes, zorder=0) else: ax.fill_between((pos-dpos/2, pos+dpos/2), (0, 0), (1, 1), color='lightgrey', transform=ax.transAxes, zorder=0) jrow += 1 # Draw the left margin label if show_names.lower() in ("left", "both"): if horizontal: ax.text(-0.1/awidth, pos, left_label, horizontalalignment="right", verticalalignment='center', transform=ax.transAxes, family='monospace') else: ax.text(pos, -0.1/aheight, left_label, horizontalalignment="center", verticalalignment='top', transform=ax.transAxes, family='monospace') # Draw the right margin label if show_names.lower() in ("right", "both"): if right_label is not None: if horizontal: ax.text(1 + 0.1/awidth, pos, right_label, horizontalalignment="left", verticalalignment='center', transform=ax.transAxes, family='monospace') else: ax.text(pos, 1 + 0.1/aheight, right_label, horizontalalignment="center", verticalalignment='bottom', transform=ax.transAxes, family='monospace') # Save the vertical position so that we can place the # tick marks ticks.append(pos) # Loop over the points in one line for ji,jp in enumerate(lines_map[(k0,k1)]): # Calculate the vertical offset yo = 0 if stacked: yo = -dpos/5 + style_codes_map[styles[jp]]*stackd pt = points[jp] # Plot the interval if intervals is not None: # Symmetric interval if np.isscalar(intervals[jp]): lcb, ucb = pt - intervals[jp],\ pt + intervals[jp] # Nonsymmetric interval else: lcb, ucb = pt - intervals[jp][0],\ pt + intervals[jp][1] # Draw the interval if horizontal: ax.plot([lcb, ucb], [pos+yo, pos+yo], '-', transform=trans, **line_props[styles[jp]]) else: ax.plot([pos+yo, pos+yo], [lcb, ucb], '-', transform=trans, **line_props[styles[jp]]) # Plot the point sl = styles[jp] sll = sl if sl not in labeled else None labeled.add(sl) if horizontal: ax.plot([pt,], [pos+yo,], ls='None', transform=trans, label=sll, **marker_props[sl]) else: ax.plot([pos+yo,], [pt,], ls='None', transform=trans, label=sll, **marker_props[sl]) if horizontal: pos -= dpos else: pos += dpos # Set up the axis if horizontal: ax.xaxis.set_ticks_position("bottom") ax.yaxis.set_ticks_position("none") ax.set_yticklabels([]) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['bottom'].set_position(('axes', -0.1/aheight)) ax.set_ylim(0, 1) ax.yaxis.set_ticks(ticks) ax.autoscale_view(scaley=False, tight=True) else: ax.yaxis.set_ticks_position("left") ax.xaxis.set_ticks_position("none") ax.set_xticklabels([]) ax.spines['bottom'].set_color('none') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['left'].set_position(('axes', -0.1/awidth)) ax.set_xlim(0, 1) ax.xaxis.set_ticks(ticks) ax.autoscale_view(scalex=False, tight=True) return fig
bsd-3-clause
Mirantis/tempest
tempest/services/volume/json/extensions_client.py
1
1222
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from tempest.common import rest_client from tempest import config CONF = config.CONF class BaseExtensionsClientJSON(rest_client.RestClient): def __init__(self, auth_provider): super(BaseExtensionsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type def list_extensions(self): url = 'extensions' resp, body = self.get(url) body = json.loads(body) return resp, body['extensions'] class ExtensionsClientJSON(BaseExtensionsClientJSON): """ Volume V1 extensions client. """
apache-2.0
SnappleCap/oh-mainline
vendor/packages/Django/django/contrib/formtools/tests/wizard/forms.py
90
7721
from __future__ import unicode_literals from django import forms, http from django.conf import settings from django.db import models from django.test import TestCase from django.template.response import TemplateResponse from django.utils.importlib import import_module from django.contrib.auth.models import User from django.contrib.formtools.wizard.views import (WizardView, SessionWizardView, CookieWizardView) class DummyRequest(http.HttpRequest): def __init__(self, POST=None): super(DummyRequest, self).__init__() self.method = POST and "POST" or "GET" if POST is not None: self.POST.update(POST) self.session = {} self._dont_enforce_csrf_checks = True def get_request(*args, **kwargs): request = DummyRequest(*args, **kwargs) engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore(None) return request class Step1(forms.Form): name = forms.CharField() class Step2(forms.Form): name = forms.CharField() class Step3(forms.Form): data = forms.CharField() class CustomKwargsStep1(Step1): def __init__(self, test=None, *args, **kwargs): self.test = test return super(CustomKwargsStep1, self).__init__(*args, **kwargs) class TestModel(models.Model): name = models.CharField(max_length=100) class Meta: app_label = 'formtools' class TestModelForm(forms.ModelForm): class Meta: model = TestModel TestModelFormSet = forms.models.modelformset_factory(TestModel, form=TestModelForm, extra=2) class TestWizard(WizardView): storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage' def dispatch(self, request, *args, **kwargs): response = super(TestWizard, self).dispatch(request, *args, **kwargs) return response, self def get_form_kwargs(self, step, *args, **kwargs): kwargs = super(TestWizard, self).get_form_kwargs(step, *args, **kwargs) if step == 'kwargs_test': kwargs['test'] = True return kwargs class FormTests(TestCase): def test_form_init(self): testform = TestWizard.get_initkwargs([Step1, Step2]) self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2}) testform = TestWizard.get_initkwargs([('start', Step1), ('step2', Step2)]) self.assertEqual( testform['form_list'], {'start': Step1, 'step2': Step2}) testform = TestWizard.get_initkwargs([Step1, Step2, ('finish', Step3)]) self.assertEqual( testform['form_list'], {'0': Step1, '1': Step2, 'finish': Step3}) def test_first_step(self): request = get_request() testform = TestWizard.as_view([Step1, Step2]) response, instance = testform(request) self.assertEqual(instance.steps.current, '0') testform = TestWizard.as_view([('start', Step1), ('step2', Step2)]) response, instance = testform(request) self.assertEqual(instance.steps.current, 'start') def test_persistence(self): testform = TestWizard.as_view([('start', Step1), ('step2', Step2)]) request = get_request({'test_wizard-current_step': 'start', 'name': 'data1'}) response, instance = testform(request) self.assertEqual(instance.steps.current, 'start') instance.storage.current_step = 'step2' testform2 = TestWizard.as_view([('start', Step1), ('step2', Step2)]) request.POST = {'test_wizard-current_step': 'step2'} response, instance = testform2(request) self.assertEqual(instance.steps.current, 'step2') def test_form_condition(self): request = get_request() testform = TestWizard.as_view( [('start', Step1), ('step2', Step2), ('step3', Step3)], condition_dict={'step2': True}) response, instance = testform(request) self.assertEqual(instance.get_next_step(), 'step2') testform = TestWizard.as_view( [('start', Step1), ('step2', Step2), ('step3', Step3)], condition_dict={'step2': False}) response, instance = testform(request) self.assertEqual(instance.get_next_step(), 'step3') def test_form_kwargs(self): request = get_request() testform = TestWizard.as_view([('start', Step1), ('kwargs_test', CustomKwargsStep1)]) response, instance = testform(request) self.assertEqual(instance.get_form_kwargs('start'), {}) self.assertEqual(instance.get_form_kwargs('kwargs_test'), {'test': True}) self.assertEqual(instance.get_form('kwargs_test').test, True) def test_form_prefix(self): request = get_request() testform = TestWizard.as_view([('start', Step1), ('step2', Step2)]) response, instance = testform(request) self.assertEqual(instance.get_form_prefix(), 'start') self.assertEqual(instance.get_form_prefix('another'), 'another') def test_form_initial(self): request = get_request() testform = TestWizard.as_view([('start', Step1), ('step2', Step2)], initial_dict={'start': {'name': 'value1'}}) response, instance = testform(request) self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'}) self.assertEqual(instance.get_form_initial('step2'), {}) def test_form_instance(self): request = get_request() the_instance = TestModel() testform = TestWizard.as_view([('start', TestModelForm), ('step2', Step2)], instance_dict={'start': the_instance}) response, instance = testform(request) self.assertEqual( instance.get_form_instance('start'), the_instance) self.assertEqual( instance.get_form_instance('non_exist_instance'), None) def test_formset_instance(self): request = get_request() the_instance1, created = TestModel.objects.get_or_create( name='test object 1') the_instance2, created = TestModel.objects.get_or_create( name='test object 2') testform = TestWizard.as_view([('start', TestModelFormSet), ('step2', Step2)], instance_dict={'start': TestModel.objects.filter(name='test object 1')}) response, instance = testform(request) self.assertEqual(list(instance.get_form_instance('start')), [the_instance1]) self.assertEqual(instance.get_form_instance('non_exist_instance'), None) self.assertEqual(instance.get_form().initial_form_count(), 1) def test_done(self): request = get_request() testform = TestWizard.as_view([('start', Step1), ('step2', Step2)]) response, instance = testform(request) self.assertRaises(NotImplementedError, instance.done, None) def test_revalidation(self): request = get_request() testform = TestWizard.as_view([('start', Step1), ('step2', Step2)]) response, instance = testform(request) instance.render_done(None) self.assertEqual(instance.storage.current_step, 'start') class SessionFormTests(TestCase): def test_init(self): request = get_request() testform = SessionWizardView.as_view([('start', Step1)]) self.assertTrue(isinstance(testform(request), TemplateResponse)) class CookieFormTests(TestCase): def test_init(self): request = get_request() testform = CookieWizardView.as_view([('start', Step1)]) self.assertTrue(isinstance(testform(request), TemplateResponse))
agpl-3.0
sujeet4github/MyLangUtils
LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/pip/commands/check.py
336
1382
import logging from pip.basecommand import Command from pip.operations.check import check_requirements from pip.utils import get_installed_distributions logger = logging.getLogger(__name__) class CheckCommand(Command): """Verify installed packages have compatible dependencies.""" name = 'check' usage = """ %prog [options]""" summary = 'Verify installed packages have compatible dependencies.' def run(self, options, args): dists = get_installed_distributions(local_only=False, skip=()) missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists) for dist in dists: key = '%s==%s' % (dist.project_name, dist.version) for requirement in missing_reqs_dict.get(key, []): logger.info( "%s %s requires %s, which is not installed.", dist.project_name, dist.version, requirement.project_name) for requirement, actual in incompatible_reqs_dict.get(key, []): logger.info( "%s %s has requirement %s, but you have %s %s.", dist.project_name, dist.version, requirement, actual.project_name, actual.version) if missing_reqs_dict or incompatible_reqs_dict: return 1 else: logger.info("No broken requirements found.")
gpl-3.0
windyuuy/opera
chromium/src/tools/gyp/test/win/gyptest-cl-debug-format.py
344
1270
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure debug format settings are extracted properly. """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['ninja']) CHDIR = 'compiler-flags' test.run_gyp('debug-format.gyp', chdir=CHDIR) # While there's ways to via .pdb contents, the .pdb doesn't include # which style the debug information was created from, so we resort to just # verifying the flags are correct on the command line. ninja_file = test.built_file_path('obj/test-debug-format-off.ninja', chdir=CHDIR) test.must_not_contain(ninja_file, '/Z7') test.must_not_contain(ninja_file, '/Zi') test.must_not_contain(ninja_file, '/ZI') ninja_file = test.built_file_path('obj/test-debug-format-oldstyle.ninja', chdir=CHDIR) test.must_contain(ninja_file, '/Z7') ninja_file = test.built_file_path('obj/test-debug-format-pdb.ninja', chdir=CHDIR) test.must_contain(ninja_file, '/Zi') ninja_file = test.built_file_path('obj/test-debug-format-editcontinue.ninja', chdir=CHDIR) test.must_contain(ninja_file, '/ZI') test.pass_test()
bsd-3-clause
pratikmallya/heat
heat/engine/resources/openstack/nova/nova_floatingip.py
2
5929
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils import six from heat.common.i18n import _ from heat.common.i18n import _LE from heat.engine import attributes from heat.engine import constraints from heat.engine import properties from heat.engine import resource from heat.engine import support LOG = logging.getLogger(__name__) class NovaFloatingIp(resource.Resource): support_status = support.SupportStatus(version='2014.1') PROPERTIES = (POOL,) = ('pool',) ATTRIBUTES = ( POOL_ATTR, IP, ) = ( 'pool', 'ip', ) properties_schema = { POOL: properties.Schema( properties.Schema.STRING, description=_('Allocate a floating IP from a given ' 'floating IP pool.') ), } attributes_schema = { POOL_ATTR: attributes.Schema( _('Pool from which floating IP is allocated.'), type=attributes.Schema.STRING ), IP: attributes.Schema( _('Allocated floating IP address.'), type=attributes.Schema.STRING ), } default_client_name = 'nova' entity = 'floating_ips' def __init__(self, name, json_snippet, stack): super(NovaFloatingIp, self).__init__(name, json_snippet, stack) self._floating_ip = None def _get_resource(self): if self._floating_ip is None and self.resource_id is not None: self._floating_ip = self.client().floating_ips.get( self.resource_id) return self._floating_ip def handle_create(self): try: pool = self.properties[self.POOL] floating_ip = self.client().floating_ips.create(pool=pool) except Exception as e: with excutils.save_and_reraise_exception(): if self.client_plugin().is_not_found(e): if pool is None: LOG.error(_LE('Could not allocate floating IP. ' 'Probably there is no default floating' ' IP pool is configured.')) self.resource_id_set(floating_ip.id) self._floating_ip = floating_ip def _resolve_attribute(self, key): floating_ip = self._get_resource() attributes = { self.POOL_ATTR: getattr(floating_ip, self.POOL_ATTR, None), self.IP: floating_ip.ip } return six.text_type(attributes[key]) class NovaFloatingIpAssociation(resource.Resource): support_status = support.SupportStatus(version='2014.1') PROPERTIES = ( SERVER, FLOATING_IP ) = ( 'server_id', 'floating_ip' ) properties_schema = { SERVER: properties.Schema( properties.Schema.STRING, _('Server to assign floating IP to.'), required=True, update_allowed=True, constraints=[ constraints.CustomConstraint('nova.server') ] ), FLOATING_IP: properties.Schema( properties.Schema.STRING, _('ID of the floating IP to assign to the server.'), required=True, update_allowed=True ), } default_client_name = 'nova' def get_reference_id(self): return self.physical_resource_name_or_FnGetRefId() def handle_create(self): server = self.client().servers.get(self.properties[self.SERVER]) fl_ip = self.client().floating_ips.get( self.properties[self.FLOATING_IP]) self.client().servers.add_floating_ip(server, fl_ip.ip) self.resource_id_set(self.id) def handle_delete(self): if self.resource_id is None: return try: server = self.client().servers.get(self.properties[self.SERVER]) if server: fl_ip = self.client().floating_ips.get( self.properties[self.FLOATING_IP]) self.client().servers.remove_floating_ip(server, fl_ip.ip) except Exception as e: self.client_plugin().ignore_conflict_and_not_found(e) def handle_update(self, json_snippet, tmpl_diff, prop_diff): if prop_diff: # If floating_ip in prop_diff, we need to remove the old floating # ip from the old server, and then to add the new floating ip # to the old/new(if the server_id is changed) server. # If prop_diff only has the server_id, no need to remove the # floating ip from the old server, nova does this automatically # when calling add_floating_ip(). if self.FLOATING_IP in prop_diff: self.handle_delete() server_id = (prop_diff.get(self.SERVER) or self.properties[self.SERVER]) fl_ip_id = (prop_diff.get(self.FLOATING_IP) or self.properties[self.FLOATING_IP]) server = self.client().servers.get(server_id) fl_ip = self.client().floating_ips.get(fl_ip_id) self.client().servers.add_floating_ip(server, fl_ip.ip) self.resource_id_set(self.id) def resource_mapping(): return { 'OS::Nova::FloatingIP': NovaFloatingIp, 'OS::Nova::FloatingIPAssociation': NovaFloatingIpAssociation, }
apache-2.0
JeremyRand/bitcoin
qa/rpc-tests/walletbackup.py
132
7263
#!/usr/bin/env python2 # Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Exercise the wallet backup code. Ported from walletbackup.sh. Test case is: 4 nodes. 1 2 and 3 send transactions between each other, fourth node is a miner. 1 2 3 each mine a block to start, then Miner creates 100 blocks so 1 2 3 each have 50 mature coins to spend. Then 5 iterations of 1/2/3 sending coins amongst themselves to get transactions in the wallets, and the miner mining one block. Wallets are backed up using dumpwallet/backupwallet. Then 5 more iterations of transactions and mining a block. Miner then generates 101 more blocks, so any transaction fees paid mature. Sanity check: Sum(1,2,3,4 balances) == 114*50 1/2/3 are shutdown, and their wallets erased. Then restore using wallet.dat backup. And confirm 1/2/3/4 balances are same as before. Shutdown again, restore using importwallet, and confirm again balances are correct. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from random import randint import logging logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) class WalletBackupTest(BitcoinTestFramework): def setup_chain(self): logging.info("Initializing test directory "+self.options.tmpdir) initialize_chain_clean(self.options.tmpdir, 4) # This mirrors how the network was setup in the bash test def setup_network(self, split=False): # nodes 1, 2,3 are spenders, let's give them a keypool=100 extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []] self.nodes = start_nodes(4, self.options.tmpdir, extra_args) connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[1], 3) connect_nodes(self.nodes[2], 3) connect_nodes(self.nodes[2], 0) self.is_network_split=False self.sync_all() def one_send(self, from_node, to_address): if (randint(1,2) == 1): amount = Decimal(randint(1,10)) / Decimal(10) self.nodes[from_node].sendtoaddress(to_address, amount) def do_one_round(self): a0 = self.nodes[0].getnewaddress() a1 = self.nodes[1].getnewaddress() a2 = self.nodes[2].getnewaddress() self.one_send(0, a1) self.one_send(0, a2) self.one_send(1, a0) self.one_send(1, a2) self.one_send(2, a0) self.one_send(2, a1) # Have the miner (node3) mine a block. # Must sync mempools before mining. sync_mempools(self.nodes) self.nodes[3].generate(1) # As above, this mirrors the original bash test. def start_three(self): self.nodes[0] = start_node(0, self.options.tmpdir) self.nodes[1] = start_node(1, self.options.tmpdir) self.nodes[2] = start_node(2, self.options.tmpdir) connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[1], 3) connect_nodes(self.nodes[2], 3) connect_nodes(self.nodes[2], 0) def stop_three(self): stop_node(self.nodes[0], 0) stop_node(self.nodes[1], 1) stop_node(self.nodes[2], 2) def erase_three(self): os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat") os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat") os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat") def run_test(self): logging.info("Generating initial blockchain") self.nodes[0].generate(1) sync_blocks(self.nodes) self.nodes[1].generate(1) sync_blocks(self.nodes) self.nodes[2].generate(1) sync_blocks(self.nodes) self.nodes[3].generate(100) sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), 50) assert_equal(self.nodes[1].getbalance(), 50) assert_equal(self.nodes[2].getbalance(), 50) assert_equal(self.nodes[3].getbalance(), 0) logging.info("Creating transactions") # Five rounds of sending each other transactions. for i in range(5): self.do_one_round() logging.info("Backing up") tmpdir = self.options.tmpdir self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak") self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump") self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak") self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump") self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak") self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump") logging.info("More transactions") for i in range(5): self.do_one_round() # Generate 101 more blocks, so any fees paid mature self.nodes[3].generate(101) self.sync_all() balance0 = self.nodes[0].getbalance() balance1 = self.nodes[1].getbalance() balance2 = self.nodes[2].getbalance() balance3 = self.nodes[3].getbalance() total = balance0 + balance1 + balance2 + balance3 # At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.) # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700. assert_equal(total, 5700) ## # Test restoring spender wallets from backups ## logging.info("Restoring using wallet.dat") self.stop_three() self.erase_three() # Start node2 with no chain shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") # Restore wallets from backup shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat") shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat") shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat") logging.info("Re-starting nodes") self.start_three() sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), balance0) assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) logging.info("Restoring using dumped wallet") self.stop_three() self.erase_three() #start node2 with no chain shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") self.start_three() assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[1].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 0) self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump") self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump") self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump") sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), balance0) assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) if __name__ == '__main__': WalletBackupTest().main()
mit
forge33/CouchPotatoServer
libs/requests/packages/urllib3/connection.py
371
8967
import datetime import sys import socket from socket import timeout as SocketTimeout import warnings from .packages import six try: # Python 3 from http.client import HTTPConnection as _HTTPConnection, HTTPException except ImportError: from httplib import HTTPConnection as _HTTPConnection, HTTPException class DummyConnection(object): "Used to detect a failed ConnectionCls import." pass try: # Compiled with SSL? HTTPSConnection = DummyConnection import ssl BaseSSLError = ssl.SSLError except (ImportError, AttributeError): # Platform-specific: No SSL. ssl = None class BaseSSLError(BaseException): pass try: # Python 3: # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError except NameError: # Python 2: class ConnectionError(Exception): pass from .exceptions import ( ConnectTimeoutError, SystemTimeWarning, SecurityWarning, ) from .packages.ssl_match_hostname import match_hostname from .util.ssl_ import ( resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, assert_fingerprint, ) from .util import connection port_by_scheme = { 'http': 80, 'https': 443, } RECENT_DATE = datetime.date(2014, 1, 1) class HTTPConnection(_HTTPConnection, object): """ Based on httplib.HTTPConnection but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` - ``source_address``: Set the source address for the current connection. .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass:: HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port = port_by_scheme['http'] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] #: Whether this connection verifies the host's certificate. is_verified = False def __init__(self, *args, **kw): if six.PY3: # Python 3 kw.pop('strict', None) # Pre-set source_address in case we have an older Python like 2.6. self.source_address = kw.get('source_address') if sys.version_info < (2, 7): # Python 2.6 # _HTTPConnection on Python 2.6 will balk at this keyword arg, but # not newer versions. We can still use it when creating a # connection though, so we pop it *after* we have saved it as # self.source_address. kw.pop('source_address', None) #: The socket options provided by the user. If no options are #: provided, we use the default options. self.socket_options = kw.pop('socket_options', self.default_socket_options) # Superclass also sets self.source_address in Python 2.7+. _HTTPConnection.__init__(self, *args, **kw) def _new_conn(self): """ Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection( (self.host, self.port), self.timeout, **extra_kw) except SocketTimeout: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) return conn def _prepare_conn(self, conn): self.sock = conn # the _tunnel_host attribute was added in python 2.6.3 (via # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do # not have them. if getattr(self, '_tunnel_host', None): # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 def connect(self): conn = self._new_conn() self._prepare_conn(conn) class HTTPSConnection(HTTPConnection): default_port = port_by_scheme['https'] def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw): HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw) self.key_file = key_file self.cert_file = cert_file # Required property for Google AppEngine 1.9.0 which otherwise causes # HTTPS requests to go out as HTTP. (See Issue #356) self._protocol = 'https' def connect(self): conn = self._new_conn() self._prepare_conn(conn) self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file) class VerifiedHTTPSConnection(HTTPSConnection): """ Based on httplib.HTTPSConnection but wraps the socket with SSL certification. """ cert_reqs = None ca_certs = None ssl_version = None assert_fingerprint = None def set_cert(self, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None): self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.ca_certs = ca_certs self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint def connect(self): # Add certificate verification conn = self._new_conn() resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs) resolved_ssl_version = resolve_ssl_version(self.ssl_version) hostname = self.host if getattr(self, '_tunnel_host', None): # _tunnel_host was added in Python 2.6.3 # (See: http://hg.python.org/cpython/rev/0f57b30a152f) self.sock = conn # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 # Override the host with the one we're requesting data from. hostname = self._tunnel_host is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn(( 'System time is way off (before {0}). This will probably ' 'lead to SSL verification errors').format(RECENT_DATE), SystemTimeWarning ) # Wrap socket using verification with the root certs in # trusted_root_certs self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file, cert_reqs=resolved_cert_reqs, ca_certs=self.ca_certs, server_hostname=hostname, ssl_version=resolved_ssl_version) if self.assert_fingerprint: assert_fingerprint(self.sock.getpeercert(binary_form=True), self.assert_fingerprint) elif resolved_cert_reqs != ssl.CERT_NONE \ and self.assert_hostname is not False: cert = self.sock.getpeercert() if not cert.get('subjectAltName', ()): warnings.warn(( 'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. ' 'This feature is being removed by major browsers and deprecated by RFC 2818. ' '(See https://github.com/shazow/urllib3/issues/497 for details.)'), SecurityWarning ) match_hostname(cert, self.assert_hostname or hostname) self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or self.assert_fingerprint is not None) if ssl: # Make a copy for testing. UnverifiedHTTPSConnection = HTTPSConnection HTTPSConnection = VerifiedHTTPSConnection
gpl-3.0
xlk521/cloudguantou
django/contrib/admin/validation.py
153
19088
from django.core.exceptions import ImproperlyConfigured from django.db import models from django.db.models.fields import FieldDoesNotExist from django.forms.models import (BaseModelForm, BaseModelFormSet, fields_for_model, _get_foreign_key) from django.contrib.admin.util import get_fields_from_path, NotRelationField from django.contrib.admin.options import (flatten_fieldsets, BaseModelAdmin, HORIZONTAL, VERTICAL) __all__ = ['validate'] def validate(cls, model): """ Does basic ModelAdmin option validation. Calls custom validation classmethod in the end if it is provided in cls. The signature of the custom validation classmethod should be: def validate(cls, model). """ # Before we can introspect models, they need to be fully loaded so that # inter-relations are set up correctly. We force that here. models.get_apps() opts = model._meta validate_base(cls, model) # list_display if hasattr(cls, 'list_display'): check_isseq(cls, 'list_display', cls.list_display) for idx, field in enumerate(cls.list_display): if not callable(field): if not hasattr(cls, field): if not hasattr(model, field): try: opts.get_field(field) except models.FieldDoesNotExist: raise ImproperlyConfigured("%s.list_display[%d], %r is not a callable or an attribute of %r or found in the model %r." % (cls.__name__, idx, field, cls.__name__, model._meta.object_name)) else: # getattr(model, field) could be an X_RelatedObjectsDescriptor f = fetch_attr(cls, model, opts, "list_display[%d]" % idx, field) if isinstance(f, models.ManyToManyField): raise ImproperlyConfigured("'%s.list_display[%d]', '%s' is a ManyToManyField which is not supported." % (cls.__name__, idx, field)) # list_display_links if hasattr(cls, 'list_display_links'): check_isseq(cls, 'list_display_links', cls.list_display_links) for idx, field in enumerate(cls.list_display_links): if field not in cls.list_display: raise ImproperlyConfigured("'%s.list_display_links[%d]' " "refers to '%s' which is not defined in 'list_display'." % (cls.__name__, idx, field)) # list_filter if hasattr(cls, 'list_filter'): check_isseq(cls, 'list_filter', cls.list_filter) for idx, fpath in enumerate(cls.list_filter): try: get_fields_from_path(model, fpath) except (NotRelationField, FieldDoesNotExist), e: raise ImproperlyConfigured( "'%s.list_filter[%d]' refers to '%s' which does not refer to a Field." % ( cls.__name__, idx, fpath ) ) # list_per_page = 100 if hasattr(cls, 'list_per_page') and not isinstance(cls.list_per_page, int): raise ImproperlyConfigured("'%s.list_per_page' should be a integer." % cls.__name__) # list_editable if hasattr(cls, 'list_editable') and cls.list_editable: check_isseq(cls, 'list_editable', cls.list_editable) for idx, field_name in enumerate(cls.list_editable): try: field = opts.get_field_by_name(field_name)[0] except models.FieldDoesNotExist: raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a " "field, '%s', not defined on %s." % (cls.__name__, idx, field_name, model.__name__)) if field_name not in cls.list_display: raise ImproperlyConfigured("'%s.list_editable[%d]' refers to " "'%s' which is not defined in 'list_display'." % (cls.__name__, idx, field_name)) if field_name in cls.list_display_links: raise ImproperlyConfigured("'%s' cannot be in both '%s.list_editable'" " and '%s.list_display_links'" % (field_name, cls.__name__, cls.__name__)) if not cls.list_display_links and cls.list_display[0] in cls.list_editable: raise ImproperlyConfigured("'%s.list_editable[%d]' refers to" " the first field in list_display, '%s', which can't be" " used unless list_display_links is set." % (cls.__name__, idx, cls.list_display[0])) if not field.editable: raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a " "field, '%s', which isn't editable through the admin." % (cls.__name__, idx, field_name)) # search_fields = () if hasattr(cls, 'search_fields'): check_isseq(cls, 'search_fields', cls.search_fields) # date_hierarchy = None if cls.date_hierarchy: f = get_field(cls, model, opts, 'date_hierarchy', cls.date_hierarchy) if not isinstance(f, (models.DateField, models.DateTimeField)): raise ImproperlyConfigured("'%s.date_hierarchy is " "neither an instance of DateField nor DateTimeField." % cls.__name__) # ordering = None if cls.ordering: check_isseq(cls, 'ordering', cls.ordering) for idx, field in enumerate(cls.ordering): if field == '?' and len(cls.ordering) != 1: raise ImproperlyConfigured("'%s.ordering' has the random " "ordering marker '?', but contains other fields as " "well. Please either remove '?' or the other fields." % cls.__name__) if field == '?': continue if field.startswith('-'): field = field[1:] # Skip ordering in the format field1__field2 (FIXME: checking # this format would be nice, but it's a little fiddly). if '__' in field: continue get_field(cls, model, opts, 'ordering[%d]' % idx, field) if hasattr(cls, "readonly_fields"): check_readonly_fields(cls, model, opts) # list_select_related = False # save_as = False # save_on_top = False for attr in ('list_select_related', 'save_as', 'save_on_top'): if not isinstance(getattr(cls, attr), bool): raise ImproperlyConfigured("'%s.%s' should be a boolean." % (cls.__name__, attr)) # inlines = [] if hasattr(cls, 'inlines'): check_isseq(cls, 'inlines', cls.inlines) for idx, inline in enumerate(cls.inlines): if not issubclass(inline, BaseModelAdmin): raise ImproperlyConfigured("'%s.inlines[%d]' does not inherit " "from BaseModelAdmin." % (cls.__name__, idx)) if not inline.model: raise ImproperlyConfigured("'model' is a required attribute " "of '%s.inlines[%d]'." % (cls.__name__, idx)) if not issubclass(inline.model, models.Model): raise ImproperlyConfigured("'%s.inlines[%d].model' does not " "inherit from models.Model." % (cls.__name__, idx)) validate_base(inline, inline.model) validate_inline(inline, cls, model) def validate_inline(cls, parent, parent_model): # model is already verified to exist and be a Model if cls.fk_name: # default value is None f = get_field(cls, cls.model, cls.model._meta, 'fk_name', cls.fk_name) if not isinstance(f, models.ForeignKey): raise ImproperlyConfigured("'%s.fk_name is not an instance of " "models.ForeignKey." % cls.__name__) fk = _get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name, can_fail=True) # extra = 3 if not isinstance(cls.extra, int): raise ImproperlyConfigured("'%s.extra' should be a integer." % cls.__name__) # max_num = None max_num = getattr(cls, 'max_num', None) if max_num is not None and not isinstance(max_num, int): raise ImproperlyConfigured("'%s.max_num' should be an integer or None (default)." % cls.__name__) # formset if hasattr(cls, 'formset') and not issubclass(cls.formset, BaseModelFormSet): raise ImproperlyConfigured("'%s.formset' does not inherit from " "BaseModelFormSet." % cls.__name__) # exclude if hasattr(cls, 'exclude') and cls.exclude: if fk and fk.name in cls.exclude: raise ImproperlyConfigured("%s cannot exclude the field " "'%s' - this is the foreign key to the parent model " "%s." % (cls.__name__, fk.name, parent_model.__name__)) if hasattr(cls, "readonly_fields"): check_readonly_fields(cls, cls.model, cls.model._meta) def validate_base(cls, model): opts = model._meta # raw_id_fields if hasattr(cls, 'raw_id_fields'): check_isseq(cls, 'raw_id_fields', cls.raw_id_fields) for idx, field in enumerate(cls.raw_id_fields): f = get_field(cls, model, opts, 'raw_id_fields', field) if not isinstance(f, (models.ForeignKey, models.ManyToManyField)): raise ImproperlyConfigured("'%s.raw_id_fields[%d]', '%s' must " "be either a ForeignKey or ManyToManyField." % (cls.__name__, idx, field)) # fields if cls.fields: # default value is None check_isseq(cls, 'fields', cls.fields) for field in cls.fields: if field in cls.readonly_fields: # Stuff can be put in fields that isn't actually a model field # if it's in readonly_fields, readonly_fields will handle the # validation of such things. continue check_formfield(cls, model, opts, 'fields', field) try: f = opts.get_field(field) except models.FieldDoesNotExist: # If we can't find a field on the model that matches, # it could be an extra field on the form. continue if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created: raise ImproperlyConfigured("'%s.fields' can't include the ManyToManyField " "field '%s' because '%s' manually specifies " "a 'through' model." % (cls.__name__, field, field)) if cls.fieldsets: raise ImproperlyConfigured('Both fieldsets and fields are specified in %s.' % cls.__name__) if len(cls.fields) > len(set(cls.fields)): raise ImproperlyConfigured('There are duplicate field(s) in %s.fields' % cls.__name__) # fieldsets if cls.fieldsets: # default value is None check_isseq(cls, 'fieldsets', cls.fieldsets) for idx, fieldset in enumerate(cls.fieldsets): check_isseq(cls, 'fieldsets[%d]' % idx, fieldset) if len(fieldset) != 2: raise ImproperlyConfigured("'%s.fieldsets[%d]' does not " "have exactly two elements." % (cls.__name__, idx)) check_isdict(cls, 'fieldsets[%d][1]' % idx, fieldset[1]) if 'fields' not in fieldset[1]: raise ImproperlyConfigured("'fields' key is required in " "%s.fieldsets[%d][1] field options dict." % (cls.__name__, idx)) for fields in fieldset[1]['fields']: # The entry in fields might be a tuple. If it is a standalone # field, make it into a tuple to make processing easier. if type(fields) != tuple: fields = (fields,) for field in fields: if field in cls.readonly_fields: # Stuff can be put in fields that isn't actually a # model field if it's in readonly_fields, # readonly_fields will handle the validation of such # things. continue check_formfield(cls, model, opts, "fieldsets[%d][1]['fields']" % idx, field) try: f = opts.get_field(field) if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created: raise ImproperlyConfigured("'%s.fieldsets[%d][1]['fields']' " "can't include the ManyToManyField field '%s' because " "'%s' manually specifies a 'through' model." % ( cls.__name__, idx, field, field)) except models.FieldDoesNotExist: # If we can't find a field on the model that matches, # it could be an extra field on the form. pass flattened_fieldsets = flatten_fieldsets(cls.fieldsets) if len(flattened_fieldsets) > len(set(flattened_fieldsets)): raise ImproperlyConfigured('There are duplicate field(s) in %s.fieldsets' % cls.__name__) # exclude if cls.exclude: # default value is None check_isseq(cls, 'exclude', cls.exclude) for field in cls.exclude: check_formfield(cls, model, opts, 'exclude', field) try: f = opts.get_field(field) except models.FieldDoesNotExist: # If we can't find a field on the model that matches, # it could be an extra field on the form. continue if len(cls.exclude) > len(set(cls.exclude)): raise ImproperlyConfigured('There are duplicate field(s) in %s.exclude' % cls.__name__) # form if hasattr(cls, 'form') and not issubclass(cls.form, BaseModelForm): raise ImproperlyConfigured("%s.form does not inherit from " "BaseModelForm." % cls.__name__) # filter_vertical if hasattr(cls, 'filter_vertical'): check_isseq(cls, 'filter_vertical', cls.filter_vertical) for idx, field in enumerate(cls.filter_vertical): f = get_field(cls, model, opts, 'filter_vertical', field) if not isinstance(f, models.ManyToManyField): raise ImproperlyConfigured("'%s.filter_vertical[%d]' must be " "a ManyToManyField." % (cls.__name__, idx)) # filter_horizontal if hasattr(cls, 'filter_horizontal'): check_isseq(cls, 'filter_horizontal', cls.filter_horizontal) for idx, field in enumerate(cls.filter_horizontal): f = get_field(cls, model, opts, 'filter_horizontal', field) if not isinstance(f, models.ManyToManyField): raise ImproperlyConfigured("'%s.filter_horizontal[%d]' must be " "a ManyToManyField." % (cls.__name__, idx)) # radio_fields if hasattr(cls, 'radio_fields'): check_isdict(cls, 'radio_fields', cls.radio_fields) for field, val in cls.radio_fields.items(): f = get_field(cls, model, opts, 'radio_fields', field) if not (isinstance(f, models.ForeignKey) or f.choices): raise ImproperlyConfigured("'%s.radio_fields['%s']' " "is neither an instance of ForeignKey nor does " "have choices set." % (cls.__name__, field)) if not val in (HORIZONTAL, VERTICAL): raise ImproperlyConfigured("'%s.radio_fields['%s']' " "is neither admin.HORIZONTAL nor admin.VERTICAL." % (cls.__name__, field)) # prepopulated_fields if hasattr(cls, 'prepopulated_fields'): check_isdict(cls, 'prepopulated_fields', cls.prepopulated_fields) for field, val in cls.prepopulated_fields.items(): f = get_field(cls, model, opts, 'prepopulated_fields', field) if isinstance(f, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)): raise ImproperlyConfigured("'%s.prepopulated_fields['%s']' " "is either a DateTimeField, ForeignKey or " "ManyToManyField. This isn't allowed." % (cls.__name__, field)) check_isseq(cls, "prepopulated_fields['%s']" % field, val) for idx, f in enumerate(val): get_field(cls, model, opts, "prepopulated_fields['%s'][%d]" % (field, idx), f) def check_isseq(cls, label, obj): if not isinstance(obj, (list, tuple)): raise ImproperlyConfigured("'%s.%s' must be a list or tuple." % (cls.__name__, label)) def check_isdict(cls, label, obj): if not isinstance(obj, dict): raise ImproperlyConfigured("'%s.%s' must be a dictionary." % (cls.__name__, label)) def get_field(cls, model, opts, label, field): try: return opts.get_field(field) except models.FieldDoesNotExist: raise ImproperlyConfigured("'%s.%s' refers to field '%s' that is missing from model '%s'." % (cls.__name__, label, field, model.__name__)) def check_formfield(cls, model, opts, label, field): if getattr(cls.form, 'base_fields', None): try: cls.form.base_fields[field] except KeyError: raise ImproperlyConfigured("'%s.%s' refers to field '%s' that " "is missing from the form." % (cls.__name__, label, field)) else: fields = fields_for_model(model) try: fields[field] except KeyError: raise ImproperlyConfigured("'%s.%s' refers to field '%s' that " "is missing from the form." % (cls.__name__, label, field)) def fetch_attr(cls, model, opts, label, field): try: return opts.get_field(field) except models.FieldDoesNotExist: pass try: return getattr(model, field) except AttributeError: raise ImproperlyConfigured("'%s.%s' refers to '%s' that is neither a field, method or property of model '%s'." % (cls.__name__, label, field, model.__name__)) def check_readonly_fields(cls, model, opts): check_isseq(cls, "readonly_fields", cls.readonly_fields) for idx, field in enumerate(cls.readonly_fields): if not callable(field): if not hasattr(cls, field): if not hasattr(model, field): try: opts.get_field(field) except models.FieldDoesNotExist: raise ImproperlyConfigured("%s.readonly_fields[%d], %r is not a callable or an attribute of %r or found in the model %r." % (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
bsd-3-clause
alextruberg/custom_django
django/conf/locale/sl/formats.py
200
2120
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'd. F Y' TIME_FORMAT = 'H:i:s' DATETIME_FORMAT = 'j. F Y. H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j. M. Y' SHORT_DATETIME_FORMAT = 'j.n.Y. H:i' FIRST_DAY_OF_WEEK = 0 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06' '%d-%m-%Y', # '25-10-2006' '%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06' ) DATETIME_INPUT_FORMATS = ( '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' '%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59' '%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200' '%d-%m-%Y %H:%M', # '25-10-2006 14:30' '%d-%m-%Y', # '25-10-2006' '%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59' '%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200' '%d. %m. %Y %H:%M', # '25. 10. 2006 14:30' '%d. %m. %Y', # '25. 10. 2006' '%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59' '%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200' '%d. %m. %y %H:%M', # '25. 10. 06 14:30' '%d. %m. %y', # '25. 10. 06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
tudyzhb/yichui
djangoappengine/main/__init__.py
53
2627
import os import sys # Add parent folder to sys.path, so we can import boot. # App Engine causes main.py to be reloaded if an exception gets raised # on the first request of a main.py instance, so don't add project_dir # multiple times. project_dir = os.path.abspath( os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) if project_dir not in sys.path or sys.path.index(project_dir) > 0: while project_dir in sys.path: sys.path.remove(project_dir) sys.path.insert(0, project_dir) for path in sys.path[:]: if path != project_dir and os.path.isdir(os.path.join(path, 'django')): sys.path.remove(path) break # Remove the standard version of Django. if 'django' in sys.modules and sys.modules['django'].VERSION < (1, 2): for k in [k for k in sys.modules if k.startswith('django.') or k == 'django']: del sys.modules[k] from djangoappengine.boot import setup_env setup_env() def validate_models(): """ Since BaseRunserverCommand is only run once, we need to call model valdidation here to ensure it is run every time the code changes. """ import logging from django.core.management.validation import get_validation_errors try: from cStringIO import StringIO except ImportError: from StringIO import StringIO logging.info("Validating models...") s = StringIO() num_errors = get_validation_errors(s, None) if num_errors: s.seek(0) error_text = s.read() logging.critical("One or more models did not validate:\n%s" % error_text) else: logging.info("All models validated.") from djangoappengine.utils import on_production_server if not on_production_server: validate_models() from django.core.handlers.wsgi import WSGIHandler from google.appengine.ext.webapp.util import run_wsgi_app from django.conf import settings def log_traceback(*args, **kwargs): import logging logging.exception("Exception in request:") from django.core import signals signals.got_request_exception.connect(log_traceback) # Create a Django application for WSGI. application = WSGIHandler() # Add the staticfiles handler if necessary. if settings.DEBUG and 'django.contrib.staticfiles' in settings.INSTALLED_APPS: from django.contrib.staticfiles.handlers import StaticFilesHandler application = StaticFilesHandler(application) if getattr(settings, 'ENABLE_APPSTATS', False): from google.appengine.ext.appstats.recording import \ appstats_wsgi_middleware application = appstats_wsgi_middleware(application)
bsd-3-clause
Shougo/python-client
pynvim/api/window.py
2
1983
"""API for working with Nvim windows.""" from pynvim.api.common import Remote __all__ = ('Window') class Window(Remote): """A remote Nvim window.""" _api_prefix = "nvim_win_" @property def buffer(self): """Get the `Buffer` currently being displayed by the window.""" return self.request('nvim_win_get_buf') @property def cursor(self): """Get the (row, col) tuple with the current cursor position.""" return self.request('nvim_win_get_cursor') @cursor.setter def cursor(self, pos): """Set the (row, col) tuple as the new cursor position.""" return self.request('nvim_win_set_cursor', pos) @property def height(self): """Get the window height in rows.""" return self.request('nvim_win_get_height') @height.setter def height(self, height): """Set the window height in rows.""" return self.request('nvim_win_set_height', height) @property def width(self): """Get the window width in rows.""" return self.request('nvim_win_get_width') @width.setter def width(self, width): """Set the window height in rows.""" return self.request('nvim_win_set_width', width) @property def row(self): """0-indexed, on-screen window position(row) in display cells.""" return self.request('nvim_win_get_position')[0] @property def col(self): """0-indexed, on-screen window position(col) in display cells.""" return self.request('nvim_win_get_position')[1] @property def tabpage(self): """Get the `Tabpage` that contains the window.""" return self.request('nvim_win_get_tabpage') @property def valid(self): """Return True if the window still exists.""" return self.request('nvim_win_is_valid') @property def number(self): """Get the window number.""" return self.request('nvim_win_get_number')
apache-2.0
gkotton/neutron
neutron/tests/unit/test_extension_ext_net.py
10
7932
# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import itertools import mock import testtools from webob import exc from neutron import context from neutron.db import models_v2 from neutron.extensions import external_net as external_net from neutron import manager from neutron.openstack.common import log as logging from neutron.openstack.common import uuidutils from neutron.tests.unit import test_api_v2 from neutron.tests.unit import test_db_plugin LOG = logging.getLogger(__name__) _uuid = uuidutils.generate_uuid _get_path = test_api_v2._get_path class ExtNetTestExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] class ExtNetDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase): def _create_network(self, fmt, name, admin_state_up, **kwargs): """Override the routine for allowing the router:external attribute.""" # attributes containing a colon should be passed with # a double underscore new_args = dict(itertools.izip(map(lambda x: x.replace('__', ':'), kwargs), kwargs.values())) arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,) return super(ExtNetDBTestCase, self)._create_network( fmt, name, admin_state_up, arg_list=arg_list, **new_args) def setUp(self): plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin' ext_mgr = ExtNetTestExtensionManager() super(ExtNetDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _set_net_external(self, net_id): self._update('networks', net_id, {'network': {external_net.EXTERNAL: True}}) def test_list_nets_external(self): with self.network() as n1: self._set_net_external(n1['network']['id']) with self.network(): body = self._list('networks') self.assertEqual(len(body['networks']), 2) body = self._list('networks', query_params="%s=True" % external_net.EXTERNAL) self.assertEqual(len(body['networks']), 1) body = self._list('networks', query_params="%s=False" % external_net.EXTERNAL) self.assertEqual(len(body['networks']), 1) def test_list_nets_external_pagination(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with contextlib.nested(self.network(name='net1'), self.network(name='net3')) as (n1, n3): self._set_net_external(n1['network']['id']) self._set_net_external(n3['network']['id']) with self.network(name='net2') as n2: self._test_list_with_pagination( 'network', (n1, n3), ('name', 'asc'), 1, 3, query_params='router:external=True') self._test_list_with_pagination( 'network', (n2, ), ('name', 'asc'), 1, 2, query_params='router:external=False') def test_get_network_succeeds_without_filter(self): plugin = manager.NeutronManager.get_plugin() ctx = context.Context(None, None, is_admin=True) result = plugin.get_networks(ctx, filters=None) self.assertEqual(result, []) def test_update_network_set_external_non_admin_fails(self): # Assert that a non-admin user cannot update the # router:external attribute with self.network(tenant_id='noadmin') as network: data = {'network': {'router:external': True}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('', 'noadmin') res = req.get_response(self.api) self.assertEqual(exc.HTTPForbidden.code, res.status_int) def test_network_filter_hook_admin_context(self): plugin = manager.NeutronManager.get_plugin() ctx = context.Context(None, None, is_admin=True) model = models_v2.Network conditions = plugin._network_filter_hook(ctx, model, []) self.assertEqual(conditions, []) def test_network_filter_hook_nonadmin_context(self): plugin = manager.NeutronManager.get_plugin() ctx = context.Context('edinson', 'cavani') model = models_v2.Network txt = "externalnetworks.network_id IS NOT NULL" conditions = plugin._network_filter_hook(ctx, model, []) self.assertEqual(conditions.__str__(), txt) # Try to concatenate conditions conditions = plugin._network_filter_hook(ctx, model, conditions) self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt)) def test_create_port_external_network_non_admin_fails(self): with self.network(router__external=True) as ext_net: with self.subnet(network=ext_net) as ext_subnet: with testtools.ExpectedException( exc.HTTPClientError) as ctx_manager: with self.port(subnet=ext_subnet, set_context='True', tenant_id='noadmin'): pass self.assertEqual(ctx_manager.exception.code, 403) def test_create_port_external_network_admin_succeeds(self): with self.network(router__external=True) as ext_net: with self.subnet(network=ext_net) as ext_subnet: with self.port(subnet=ext_subnet) as port: self.assertEqual(port['port']['network_id'], ext_net['network']['id']) def test_create_external_network_non_admin_fails(self): with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager: with self.network(router__external=True, set_context='True', tenant_id='noadmin'): pass self.assertEqual(ctx_manager.exception.code, 403) def test_create_external_network_admin_succeeds(self): with self.network(router__external=True) as ext_net: self.assertEqual(ext_net['network'][external_net.EXTERNAL], True) def test_delete_network_check_disassociated_floatingips(self): with mock.patch.object(manager.NeutronManager, 'get_service_plugins') as srv_plugins: l3_mock = mock.Mock() srv_plugins.return_value = {'L3_ROUTER_NAT': l3_mock} with self.network() as net: req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, exc.HTTPNoContent.code) (l3_mock.delete_disassociated_floatingips .assert_called_once_with(mock.ANY, net['network']['id']))
apache-2.0
thresholdsoftware/asylum
openerp/addons/account/wizard/account_report_aged_partner_balance.py
48
4072
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from datetime import datetime from dateutil.relativedelta import relativedelta from openerp.osv import fields, osv from openerp.tools.translate import _ class account_aged_trial_balance(osv.osv_memory): _inherit = 'account.common.partner.report' _name = 'account.aged.trial.balance' _description = 'Account Aged Trial balance Report' _columns = { 'period_length':fields.integer('Period Length (days)', required=True), 'direction_selection': fields.selection([('past','Past'), ('future','Future')], 'Analysis Direction', required=True), 'journal_ids': fields.many2many('account.journal', 'account_aged_trial_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True), } _defaults = { 'period_length': 30, 'date_from': lambda *a: time.strftime('%Y-%m-%d'), 'direction_selection': 'past', } def _print_report(self, cr, uid, ids, data, context=None): res = {} if context is None: context = {} data = self.pre_print_report(cr, uid, ids, data, context=context) data['form'].update(self.read(cr, uid, ids, ['period_length', 'direction_selection'])[0]) period_length = data['form']['period_length'] if period_length<=0: raise osv.except_osv(_('User Error!'), _('You must set a period length greater than 0.')) if not data['form']['date_from']: raise osv.except_osv(_('User Error!'), _('You must set a start date.')) start = datetime.strptime(data['form']['date_from'], "%Y-%m-%d") if data['form']['direction_selection'] == 'past': for i in range(5)[::-1]: stop = start - relativedelta(days=period_length) res[str(i)] = { 'name': (i!=0 and (str((5-(i+1)) * period_length) + '-' + str((5-i) * period_length)) or ('+'+str(4 * period_length))), 'stop': start.strftime('%Y-%m-%d'), 'start': (i!=0 and stop.strftime('%Y-%m-%d') or False), } start = stop - relativedelta(days=1) else: for i in range(5): stop = start + relativedelta(days=period_length) res[str(5-(i+1))] = { 'name': (i!=4 and str((i) * period_length)+'-' + str((i+1) * period_length) or ('+'+str(4 * period_length))), 'start': start.strftime('%Y-%m-%d'), 'stop': (i!=4 and stop.strftime('%Y-%m-%d') or False), } start = stop + relativedelta(days=1) data['form'].update(res) if data.get('form',False): data['ids']=[data['form'].get('chart_account_id',False)] return { 'type': 'ir.actions.report.xml', 'report_name': 'account.aged_trial_balance', 'datas': data } account_aged_trial_balance() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
smallswan267/octoplus
selenium/webdriver/ie/service.py
16
3920
#!/usr/bin/python # # Copyright 2012 Webdriver_name committers # Copyright 2012 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess from subprocess import PIPE import time from selenium.common.exceptions import WebDriverException from selenium.webdriver.common import utils class Service(object): """ Object that manages the starting and stopping of the IEDriver """ def __init__(self, executable_path, port=0, host=None, log_level=None, log_file=None): """ Creates a new instance of the Service :Args: - executable_path : Path to the IEDriver - port : Port the service is running on - host : IP address the service port is bound - log_level : Level of logging of service, may be "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE". Default is "FATAL". - log_file : Target of logging of service, may be "stdout", "stderr" or file path. Default is "stdout".""" self.port = port self.path = executable_path if self.port == 0: self.port = utils.free_port() self.host = host self.log_level = log_level self.log_file = log_file def start(self): """ Starts the IEDriver Service. :Exceptions: - WebDriverException : Raised either when it can't start the service or when it can't connect to the service """ try: cmd = [self.path, "--port=%d" % self.port] if self.host is not None: cmd.append("--host=%s" % self.host) if self.log_level is not None: cmd.append("--log-level=%s" % self.log_level) if self.log_file is not None: cmd.append("--log-file=%s" % self.log_file) self.process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE) except TypeError: raise except: raise WebDriverException( "IEDriver executable needs to be available in the path. " "Please download from http://selenium-release.storage.googleapis.com/index.html " "and read up at http://code.google.com/p/selenium/wiki/InternetExplorerDriver") count = 0 while not utils.is_url_connectable(self.port): count += 1 time.sleep(1) if count == 30: raise WebDriverException("Can not connect to the IEDriver") def stop(self): """ Tells the IEDriver to stop and cleans up the process """ #If its dead dont worry if self.process is None: return #Tell the Server to die! try: from urllib import request as url_request except ImportError: import urllib2 as url_request url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port) count = 0 while utils.is_connectable(self.port): if count == 30: break count += 1 time.sleep(1) #Tell the Server to properly die in case try: if self.process: self.process.kill() self.process.wait() except WindowsError: # kill may not be available under windows environment pass
mit
mbox/django
tests/raw_query/models.py
150
1034
from django.db import models class Author(models.Model): first_name = models.CharField(max_length=255) last_name = models.CharField(max_length=255) dob = models.DateField() def __init__(self, *args, **kwargs): super(Author, self).__init__(*args, **kwargs) # Protect against annotations being passed to __init__ -- # this'll make the test suite get angry if annotations aren't # treated differently than fields. for k in kwargs: assert k in [f.attname for f in self._meta.fields], \ "Author.__init__ got an unexpected parameter: %s" % k class Book(models.Model): title = models.CharField(max_length=255) author = models.ForeignKey(Author) paperback = models.BooleanField(default=False) opening_line = models.TextField() class Coffee(models.Model): brand = models.CharField(max_length=255, db_column="name") class Reviewer(models.Model): reviewed = models.ManyToManyField(Book) class FriendlyAuthor(Author): pass
bsd-3-clause
rbalda/neural_ocr
env/lib/python2.7/site-packages/django/core/handlers/base.py
77
13346
from __future__ import unicode_literals import logging import sys import types import warnings from django import http from django.conf import settings from django.core import signals, urlresolvers from django.core.exceptions import ( MiddlewareNotUsed, PermissionDenied, SuspiciousOperation, ) from django.db import connections, transaction from django.http.multipartparser import MultiPartParserError from django.utils import six from django.utils.deprecation import RemovedInDjango20Warning from django.utils.encoding import force_text from django.utils.module_loading import import_string from django.views import debug logger = logging.getLogger('django.request') class BaseHandler(object): # Changes that are always applied to a response (in this order). response_fixes = [ http.conditional_content_removal, ] def __init__(self): self._request_middleware = None self._view_middleware = None self._template_response_middleware = None self._response_middleware = None self._exception_middleware = None def load_middleware(self): """ Populate middleware lists from settings.MIDDLEWARE_CLASSES. Must be called after the environment is fixed (see __call__ in subclasses). """ self._view_middleware = [] self._template_response_middleware = [] self._response_middleware = [] self._exception_middleware = [] request_middleware = [] for middleware_path in settings.MIDDLEWARE_CLASSES: mw_class = import_string(middleware_path) try: mw_instance = mw_class() except MiddlewareNotUsed as exc: if settings.DEBUG: if six.text_type(exc): logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc) else: logger.debug('MiddlewareNotUsed: %r', middleware_path) continue if hasattr(mw_instance, 'process_request'): request_middleware.append(mw_instance.process_request) if hasattr(mw_instance, 'process_view'): self._view_middleware.append(mw_instance.process_view) if hasattr(mw_instance, 'process_template_response'): self._template_response_middleware.insert(0, mw_instance.process_template_response) if hasattr(mw_instance, 'process_response'): self._response_middleware.insert(0, mw_instance.process_response) if hasattr(mw_instance, 'process_exception'): self._exception_middleware.insert(0, mw_instance.process_exception) # We only assign to this when initialization is complete as it is used # as a flag for initialization being complete. self._request_middleware = request_middleware def make_view_atomic(self, view): non_atomic_requests = getattr(view, '_non_atomic_requests', set()) for db in connections.all(): if (db.settings_dict['ATOMIC_REQUESTS'] and db.alias not in non_atomic_requests): view = transaction.atomic(using=db.alias)(view) return view def get_exception_response(self, request, resolver, status_code, exception): try: callback, param_dict = resolver.resolve_error_handler(status_code) # Unfortunately, inspect.getargspec result is not trustable enough # depending on the callback wrapping in decorators (frequent for handlers). # Falling back on try/except: try: response = callback(request, **dict(param_dict, exception=exception)) except TypeError: warnings.warn( "Error handlers should accept an exception parameter. Update " "your code as this parameter will be required in Django 2.0", RemovedInDjango20Warning, stacklevel=2 ) response = callback(request, **param_dict) except: signals.got_request_exception.send(sender=self.__class__, request=request) response = self.handle_uncaught_exception(request, resolver, sys.exc_info()) return response def get_response(self, request): "Returns an HttpResponse object for the given HttpRequest" # Setup default url resolver for this thread, this code is outside # the try/except so we don't get a spurious "unbound local # variable" exception in the event an exception is raised before # resolver is set urlconf = settings.ROOT_URLCONF urlresolvers.set_urlconf(urlconf) resolver = urlresolvers.get_resolver(urlconf) # Use a flag to check if the response was rendered to prevent # multiple renderings or to force rendering if necessary. response_is_rendered = False try: response = None # Apply request middleware for middleware_method in self._request_middleware: response = middleware_method(request) if response: break if response is None: if hasattr(request, 'urlconf'): # Reset url resolver with a custom URLconf. urlconf = request.urlconf urlresolvers.set_urlconf(urlconf) resolver = urlresolvers.get_resolver(urlconf) resolver_match = resolver.resolve(request.path_info) callback, callback_args, callback_kwargs = resolver_match request.resolver_match = resolver_match # Apply view middleware for middleware_method in self._view_middleware: response = middleware_method(request, callback, callback_args, callback_kwargs) if response: break if response is None: wrapped_callback = self.make_view_atomic(callback) try: response = wrapped_callback(request, *callback_args, **callback_kwargs) except Exception as e: response = self.process_exception_by_middleware(e, request) # Complain if the view returned None (a common error). if response is None: if isinstance(callback, types.FunctionType): # FBV view_name = callback.__name__ else: # CBV view_name = callback.__class__.__name__ + '.__call__' raise ValueError("The view %s.%s didn't return an HttpResponse object. It returned None instead." % (callback.__module__, view_name)) # If the response supports deferred rendering, apply template # response middleware and then render the response if hasattr(response, 'render') and callable(response.render): for middleware_method in self._template_response_middleware: response = middleware_method(request, response) # Complain if the template response middleware returned None (a common error). if response is None: raise ValueError( "%s.process_template_response didn't return an " "HttpResponse object. It returned None instead." % (middleware_method.__self__.__class__.__name__)) try: response = response.render() except Exception as e: response = self.process_exception_by_middleware(e, request) response_is_rendered = True except http.Http404 as exc: logger.warning('Not Found: %s', request.path, extra={ 'status_code': 404, 'request': request }) if settings.DEBUG: response = debug.technical_404_response(request, exc) else: response = self.get_exception_response(request, resolver, 404, exc) except PermissionDenied as exc: logger.warning( 'Forbidden (Permission denied): %s', request.path, extra={ 'status_code': 403, 'request': request }) response = self.get_exception_response(request, resolver, 403, exc) except MultiPartParserError as exc: logger.warning( 'Bad request (Unable to parse request body): %s', request.path, extra={ 'status_code': 400, 'request': request }) response = self.get_exception_response(request, resolver, 400, exc) except SuspiciousOperation as exc: # The request logger receives events for any problematic request # The security logger receives events for all SuspiciousOperations security_logger = logging.getLogger('django.security.%s' % exc.__class__.__name__) security_logger.error( force_text(exc), extra={ 'status_code': 400, 'request': request }) if settings.DEBUG: return debug.technical_500_response(request, *sys.exc_info(), status_code=400) response = self.get_exception_response(request, resolver, 400, exc) except SystemExit: # Allow sys.exit() to actually exit. See tickets #1023 and #4701 raise except: # Handle everything else. # Get the exception info now, in case another exception is thrown later. signals.got_request_exception.send(sender=self.__class__, request=request) response = self.handle_uncaught_exception(request, resolver, sys.exc_info()) try: # Apply response middleware, regardless of the response for middleware_method in self._response_middleware: response = middleware_method(request, response) # Complain if the response middleware returned None (a common error). if response is None: raise ValueError( "%s.process_response didn't return an " "HttpResponse object. It returned None instead." % (middleware_method.__self__.__class__.__name__)) response = self.apply_response_fixes(request, response) except: # Any exception should be gathered and handled signals.got_request_exception.send(sender=self.__class__, request=request) response = self.handle_uncaught_exception(request, resolver, sys.exc_info()) response._closable_objects.append(request) # If the exception handler returns a TemplateResponse that has not # been rendered, force it to be rendered. if not response_is_rendered and callable(getattr(response, 'render', None)): response = response.render() return response def process_exception_by_middleware(self, exception, request): """ Pass the exception to the exception middleware. If no middleware return a response for this exception, raise it. """ for middleware_method in self._exception_middleware: response = middleware_method(request, exception) if response: return response raise def handle_uncaught_exception(self, request, resolver, exc_info): """ Processing for any otherwise uncaught exceptions (those that will generate HTTP 500 responses). Can be overridden by subclasses who want customised 500 handling. Be *very* careful when overriding this because the error could be caused by anything, so assuming something like the database is always available would be an error. """ if settings.DEBUG_PROPAGATE_EXCEPTIONS: raise logger.error('Internal Server Error: %s', request.path, exc_info=exc_info, extra={ 'status_code': 500, 'request': request } ) if settings.DEBUG: return debug.technical_500_response(request, *exc_info) # If Http500 handler is not installed, re-raise last exception if resolver.urlconf_module is None: six.reraise(*exc_info) # Return an HttpResponse that displays a friendly error message. callback, param_dict = resolver.resolve_error_handler(500) return callback(request, **param_dict) def apply_response_fixes(self, request, response): """ Applies each of the functions in self.response_fixes to the request and response, modifying the response in the process. Returns the new response. """ for func in self.response_fixes: response = func(request, response) return response
mit
romanz/trezor-mcu
gen/bitmaps/generate.py
3
1293
#!/usr/bin/env python from __future__ import print_function import glob import os from PIL import Image hdrs = [] data = [] imgs = [] def encode_pixels(img): r = '' img = [ (x[0] + x[1] + x[2] > 384 and '1' or '0') for x in img] for i in range(len(img) // 8): c = ''.join(img[i * 8 : i * 8 + 8]) r += '0x%02x, ' % int(c, 2) return r cnt = 0 for fn in sorted(glob.glob('*.png')): print('Processing:', fn) im = Image.open(fn) name = os.path.splitext(fn)[0] w, h = im.size if w % 8 != 0: raise Exception('Width must be divisable by 8! (%s is %dx%d)' % (fn, w, h)) img = list(im.getdata()) hdrs.append('extern const BITMAP bmp_%s;\n' % name) imgs.append('const BITMAP bmp_%s = {%d, %d, bmp_%s_data};\n' % (name, w, h, name)) data.append('const uint8_t bmp_%s_data[] = { %s};\n' % (name, encode_pixels(img))) cnt += 1 with open('../bitmaps.c', 'wt') as f: f.write('#include "bitmaps.h"\n\n') for i in range(cnt): f.write(data[i]) f.write('\n') for i in range(cnt): f.write(imgs[i]) f.close() with open('../bitmaps.h', 'wt') as f: f.write('''#ifndef __BITMAPS_H__ #define __BITMAPS_H__ #include <stdint.h> typedef struct { uint8_t width, height; const uint8_t *data; } BITMAP; ''') for i in range(cnt): f.write(hdrs[i]) f.write('\n#endif\n') f.close()
lgpl-3.0
elazarg/pythia
type_analysis/test_type_safety.py
1
3619
import type_analysis def all_tests(): basic_tests() def basic_tests(): # basic addition with variables assert type_analysis.analyze_type_safety("x = 1; y = 1; z = x + y") == True assert type_analysis.analyze_type_safety("x = 'r'; y = 'r'; z = x + y") == True assert type_analysis.analyze_type_safety("x = 1; y = 'r'; z = x + y") == False assert type_analysis.analyze_type_safety("x = 'r'; y = 1; z = x + y") == False # basic addition with literal assert type_analysis.analyze_type_safety("x = 1; y = x + 1") == True assert type_analysis.analyze_type_safety("x = 1; y = 1 + x") == True assert type_analysis.analyze_type_safety("x = 1; y = x + 'r'") == False assert type_analysis.analyze_type_safety("x = 1; y = 'r' + x") == False assert type_analysis.analyze_type_safety("x = 1; y = 1 + 1") == True assert type_analysis.analyze_type_safety("x = 1; y = 'r' + 'r'") == True # branches assert type_analysis.analyze_type_safety(""" if True: x = 1 else: x = 'bla' z = 'hi' + x """) == False assert type_analysis.analyze_type_safety(""" if True: x = 'bla' else: x = 'bla2' z = 'hi' + x """) == True assert type_analysis.analyze_type_safety(""" if True: x = 1 else: x = 2 z = 2 + x """) == True assert type_analysis.analyze_type_safety(""" if True: x = 'hi' else: x = 'hi' z = 2 + x """) == False assert type_analysis.analyze_type_safety(""" if True: x = 'hi' else: x = 1 z = 2 + x """) == False assert type_analysis.analyze_type_safety(""" if True: x = 1 y = 1 else: x = 'bla' y = 'bla2' z = x + y""") == True # while assert type_analysis.analyze_type_safety(""" while True: x = 'bla' z = 'hi' + x """) == False # because possibly the while is not executed and x is uninitialized assert type_analysis.analyze_type_safety(""" x = 'buya' while True: x = 'bla' z = 'hi' + x """) == True # This is a bad test: the compiler recognizes that the loop is infinite, but not that it must not be entered # assert type_analysis.analyze_type_safety(""" # y = 1 # while True: # y = 'hello' # z = 1 + y""") == False # type exclusion # binary operations assert type_analysis.analyze_type_safety(""" x = 0 y = x - 1 """) == True assert type_analysis.analyze_type_safety(""" x = 0 y = x - "bla" """) == False assert type_analysis.analyze_type_safety(""" x = "hi" y = x - "bla" """) == False # booleans assert type_analysis.analyze_type_safety(""" x = True y = x & True """) == True assert type_analysis.analyze_type_safety(""" x = True y = x & 1 """) == False # not supporting implicit conversion, TODO: assert type_analysis.analyze_type_safety(""" x = True y = x & 'hello' """) == False assert type_analysis.analyze_type_safety(""" x = True if x: y = 1 """) == True assert type_analysis.analyze_type_safety(""" if True: y = 1 """) == True assert type_analysis.analyze_type_safety(""" x = 'bla' if x: y = 1 """) == False assert type_analysis.analyze_type_safety(""" if 'bla': y = 1 """) == True # if with a boolean condition assert type_analysis.analyze_type_safety(""" x = True if x: pass """) == True assert type_analysis.analyze_type_safety(""" x = False if x: y = 1 """) == True assert type_analysis.analyze_type_safety(""" x = True y = x if x: pass """) == True assert type_analysis.analyze_type_safety(""" x = "bla" y = x if x: pass """) == False # NOTE: no implicit cast to boolean if __name__ == "__main__": all_tests()
mit
PXke/invenio
invenio/legacy/bibdocfile/icon_migration_kit.py
4
6992
## This file is part of Invenio. ## Copyright (C) 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from __future__ import print_function """ This script updates the filesystem and database structure WRT icons. In particular it will move all the icons information out of bibdoc_bibdoc tables and into the normal bibdoc + subformat infrastructure. """ import sys from datetime import datetime from invenio.utils.text import wrap_text_in_a_box, wait_for_user from invenio.legacy.bibsched.bibtask import check_running_process_user from invenio.legacy.dbquery import run_sql, OperationalError from invenio.legacy.bibdocfile.api import BibDoc from invenio.config import CFG_LOGDIR, CFG_SITE_SUPPORT_EMAIL from invenio.legacy.bibdocfile.cli import cli_fix_marc from invenio.ext.logging import register_exception from intbitset import intbitset from invenio.legacy.search_engine import record_exists def retrieve_bibdoc_bibdoc(): return run_sql('SELECT id_bibdoc1, id_bibdoc2 from bibdoc_bibdoc') def get_recid_from_docid(docid): return run_sql('SELECT id_bibrec FROM bibrec_bibdoc WHERE id_bibdoc=%s', (docid, )) def backup_tables(drop=False): """This function create a backup of bibrec_bibdoc, bibdoc and bibdoc_bibdoc tables. Returns False in case dropping of previous table is needed.""" if drop: run_sql('DROP TABLE bibdoc_bibdoc_backup_for_icon') try: run_sql("""CREATE TABLE bibdoc_bibdoc_backup_for_icon (KEY id_bibdoc1(id_bibdoc1), KEY id_bibdoc2(id_bibdoc2)) SELECT * FROM bibdoc_bibdoc""") except OperationalError as e: if not drop: return False raise e return True def fix_bibdoc_bibdoc(id_bibdoc1, id_bibdoc2, logfile): """ Migrate an icon. """ try: the_bibdoc = BibDoc.create_instance(id_bibdoc1) except Exception as err: msg = "WARNING: when opening docid %s: %s" % (id_bibdoc1, err) print(msg, file=logfile) print(msg) return True try: msg = "Fixing icon for the document %s" % (id_bibdoc1, ) print(msg, end=' ') print(msg, end=' ', file=logfile) the_icon = BibDoc.create_instance(id_bibdoc2) for a_file in the_icon.list_latest_files(): the_bibdoc.add_icon(a_file.get_full_path(), format=a_file.get_format()) the_icon.delete() run_sql("DELETE FROM bibdoc_bibdoc WHERE id_bibdoc1=%s AND id_bibdoc2=%s", (id_bibdoc1, id_bibdoc2)) print("OK") print("OK", file=logfile) return True except Exception as err: print("ERROR: %s" % err) print("ERROR: %s" % err, file=logfile) register_exception() return False def main(): """Core loop.""" check_running_process_user() logfilename = '%s/fulltext_files_migration_kit-%s.log' % (CFG_LOGDIR, datetime.today().strftime('%Y%m%d%H%M%S')) try: logfile = open(logfilename, 'w') except IOError as e: print(wrap_text_in_a_box('NOTE: it\'s impossible to create the log:\n\n %s\n\nbecause of:\n\n %s\n\nPlease run this migration kit as the same user who runs Invenio (e.g. Apache)' % (logfilename, e), style='conclusion', break_long=False)) sys.exit(1) bibdoc_bibdoc = retrieve_bibdoc_bibdoc() print(wrap_text_in_a_box ("""This script migrate the filesystem structure used to store icons files to the new stricter structure. This script must not be run during normal Invenio operations. It is safe to run this script. No file will be deleted. Anyway it is recommended to run a backup of the filesystem structure just in case. A backup of the database tables involved will be automatically performed.""", style='important')) if not bibdoc_bibdoc: print(wrap_text_in_a_box("No need for migration", style='conclusion')) return print("%s icons will be migrated/fixed." % len(bibdoc_bibdoc)) wait_for_user() print("Backing up database tables") try: if not backup_tables(): print(wrap_text_in_a_box("""It appears that is not the first time that you run this script. Backup tables have been already created by a previous run. In order for the script to go further they need to be removed.""", style='important')) wait_for_user() print("Backing up database tables (after dropping previous backup)", end=' ') backup_tables(drop=True) print("-> OK") else: print("-> OK") except Exception as e: print(wrap_text_in_a_box("Unexpected error while backing up tables. Please, do your checks: %s" % e, style='conclusion')) sys.exit(1) to_fix_marc = intbitset() print("Created a complete log file into %s" % logfilename) try: try: for id_bibdoc1, id_bibdoc2 in bibdoc_bibdoc: try: record_does_exist = True recids = get_recid_from_docid(id_bibdoc1) if not recids: print("Skipping %s" % id_bibdoc1) continue for recid in recids: if record_exists(recid[0]) > 0: to_fix_marc.add(recid[0]) else: record_does_exist = False if not fix_bibdoc_bibdoc(id_bibdoc1, id_bibdoc2, logfile): if record_does_exist: raise StandardError("Error when correcting document ID %s" % id_bibdoc1) except Exception as err: print("ERROR: %s" % err, file=logfile) print(wrap_text_in_a_box("DONE", style='conclusion')) except: logfile.close() register_exception() print(wrap_text_in_a_box( title = "INTERRUPTED BECAUSE OF ERROR!", body = """Please see the log file %s for what was the status prior to the error. Contact %s in case of problems, attaching the log.""" % (logfilename, CFG_SITE_SUPPORT_EMAIL), style = 'conclusion')) sys.exit(1) finally: print("Scheduling FIX-MARC to synchronize MARCXML for updated records.") cli_fix_marc(options={}, explicit_recid_set=to_fix_marc) if __name__ == '__main__': main()
gpl-2.0
jesramirez/odoo
addons/web_analytics/__openerp__.py
305
1432
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Google Analytics', 'version': '1.0', 'category': 'Tools', 'complexity': "easy", 'description': """ Google Analytics. ================= Collects web application usage with Google Analytics. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/website-builder', 'depends': ['web'], 'data': [ 'views/web_analytics.xml', ], 'installable': True, 'active': False, }
agpl-3.0
openfun/edx-platform
common/djangoapps/student/migrations/0009_auto__del_courseregistration__add_courseenrollment.py
188
10452
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting model 'CourseRegistration' db.delete_table('student_courseregistration') # Adding model 'CourseEnrollment' db.create_table('student_courseenrollment', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)), ('course_id', self.gf('django.db.models.fields.IntegerField')()), )) db.send_create_signal('student', ['CourseEnrollment']) def backwards(self, orm): # Adding model 'CourseRegistration' db.create_table('student_courseregistration', ( ('course_id', self.gf('django.db.models.fields.IntegerField')()), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)), )) db.send_create_signal('student', ['CourseRegistration']) # Deleting model 'CourseEnrollment' db.delete_table('student_courseenrollment') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}), 'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}), 'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'student.courseenrollment': { 'Meta': {'object_name': 'CourseEnrollment'}, 'course_id': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.pendingemailchange': { 'Meta': {'object_name': 'PendingEmailChange'}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.pendingnamechange': { 'Meta': {'object_name': 'PendingNameChange'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.registration': { 'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.userprofile': { 'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"}, 'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}) }, 'student.usertestgroup': { 'Meta': {'object_name': 'UserTestGroup'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'}) } } complete_apps = ['student']
agpl-3.0
pydoit/doit
doc/tutorial/tuto_1_1.py
2
1314
import pathlib import pygraphviz def task_imports(): """find imports from a python module""" return { 'file_dep': ['projects/requests/requests/models.py'], 'targets': ['requests.models.deps'], 'actions': ['python -m import_deps %(dependencies)s > %(targets)s'], 'clean': True, } def module_to_dot(dependencies, targets): graph = pygraphviz.AGraph(strict=False, directed=True) graph.node_attr['color'] = 'lightblue2' graph.node_attr['style'] = 'filled' for dep in dependencies: filepath = pathlib.Path(dep) source = filepath.stem with filepath.open() as fh: for line in fh: sink = line.strip() if sink: graph.add_edge(source, sink) graph.write(targets[0]) def task_dot(): """generate a graphviz's dot graph from module imports""" return { 'file_dep': ['requests.models.deps'], 'targets': ['requests.models.dot'], 'actions': [module_to_dot], 'clean': True, } def task_draw(): """generate image from a dot file""" return { 'file_dep': ['requests.models.dot'], 'targets': ['requests.models.png'], 'actions': ['dot -Tpng %(dependencies)s -o %(targets)s'], 'clean': True, }
mit
mgit-at/ansible
lib/ansible/modules/identity/keycloak/keycloak_clienttemplate.py
5
15142
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2017, Eike Frost <ei@kefro.st> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: keycloak_clienttemplate short_description: Allows administration of Keycloak client templates via Keycloak API version_added: "2.5" description: - This module allows the administration of Keycloak client templates via the Keycloak REST API. It requires access to the REST API via OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored to your needs and a user having the expected roles. - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation at U(http://www.keycloak.org/docs-api/3.3/rest-api/) - The Keycloak API does not always enforce for only sensible settings to be used -- you can set SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. If you do not specify a setting, usually a sensible default is chosen. options: state: description: - State of the client template - On C(present), the client template will be created (or updated if it exists already). - On C(absent), the client template will be removed if it exists choices: ['present', 'absent'] default: 'present' id: description: - Id of client template to be worked on. This is usually a UUID. realm: description: - Realm this client template is found in. name: description: - Name of the client template description: description: - Description of the client template in Keycloak protocol: description: - Type of client template (either C(openid-connect) or C(saml). choices: ['openid-connect', 'saml'] full_scope_allowed: description: - Is the "Full Scope Allowed" feature set for this client template or not. This is 'fullScopeAllowed' in the Keycloak REST API. type: bool protocol_mappers: description: - a list of dicts defining protocol mappers for this client template. This is 'protocolMappers' in the Keycloak REST API. suboptions: consentRequired: description: - Specifies whether a user needs to provide consent to a client for this mapper to be active. consentText: description: - The human-readable name of the consent the user is presented to accept. id: description: - Usually a UUID specifying the internal ID of this protocol mapper instance. name: description: - The name of this protocol mapper. protocol: description: - is either 'openid-connect' or 'saml', this specifies for which protocol this protocol mapper is active. choices: ['openid-connect', 'saml'] protocolMapper: description: - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least - C(docker-v2-allow-all-mapper) - C(oidc-address-mapper) - C(oidc-full-name-mapper) - C(oidc-group-membership-mapper) - C(oidc-hardcoded-claim-mapper) - C(oidc-hardcoded-role-mapper) - C(oidc-role-name-mapper) - C(oidc-script-based-protocol-mapper) - C(oidc-sha256-pairwise-sub-mapper) - C(oidc-usermodel-attribute-mapper) - C(oidc-usermodel-client-role-mapper) - C(oidc-usermodel-property-mapper) - C(oidc-usermodel-realm-role-mapper) - C(oidc-usersessionmodel-note-mapper) - C(saml-group-membership-mapper) - C(saml-hardcode-attribute-mapper) - C(saml-hardcode-role-mapper) - C(saml-role-list-mapper) - C(saml-role-name-mapper) - C(saml-user-attribute-mapper) - C(saml-user-property-mapper) - C(saml-user-session-note-mapper) - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to Server Info -> Providers and looking under 'protocol-mapper'. config: description: - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value of I(protocolMapper) and are not documented other than by the source of the mappers and its parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing protocol mapper configuration through check-mode in the "existing" field. attributes: description: - A dict of further attributes for this client template. This can contain various configuration settings, though in the default installation of Keycloak as of 3.4, none are documented or known, so this is usually empty. notes: - The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled), I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such, they are not available through this module. extends_documentation_fragment: - keycloak author: - Eike Frost (@eikef) ''' EXAMPLES = ''' - name: Create or update Keycloak client template (minimal) local_action: module: keycloak_clienttemplate auth_client_id: admin-cli auth_keycloak_url: https://auth.example.com/auth auth_realm: master auth_username: USERNAME auth_password: PASSWORD realm: master name: this_is_a_test - name: delete Keycloak client template local_action: module: keycloak_clienttemplate auth_client_id: admin-cli auth_keycloak_url: https://auth.example.com/auth auth_realm: master auth_username: USERNAME auth_password: PASSWORD realm: master state: absent name: test01 - name: Create or update Keycloak client template (with a protocol mapper) local_action: module: keycloak_clienttemplate auth_client_id: admin-cli auth_keycloak_url: https://auth.example.com/auth auth_realm: master auth_username: USERNAME auth_password: PASSWORD realm: master name: this_is_a_test protocol_mappers: - config: access.token.claim: True claim.name: "family_name" id.token.claim: True jsonType.label: String user.attribute: lastName userinfo.token.claim: True consentRequired: True consentText: "${familyName}" name: family name protocol: openid-connect protocolMapper: oidc-usermodel-property-mapper full_scope_allowed: false id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f ''' RETURN = ''' msg: description: Message as to what action was taken returned: always type: string sample: "Client template testclient has been updated" proposed: description: client template representation of proposed changes to client template returned: always type: dict sample: { name: "test01" } existing: description: client template representation of existing client template (sample is truncated) returned: always type: dict sample: { "description": "test01", "fullScopeAllowed": false, "id": "9c3712ab-decd-481e-954f-76da7b006e5f", "name": "test01", "protocol": "saml" } end_state: description: client template representation of client template after module execution (sample is truncated) returned: always type: dict sample: { "description": "test01", "fullScopeAllowed": false, "id": "9c3712ab-decd-481e-954f-76da7b006e5f", "name": "test01", "protocol": "saml" } ''' from ansible.module_utils.keycloak import KeycloakAPI, camel, keycloak_argument_spec from ansible.module_utils.basic import AnsibleModule def main(): """ Module execution :return: """ argument_spec = keycloak_argument_spec() protmapper_spec = dict( consentRequired=dict(type='bool'), consentText=dict(type='str'), id=dict(type='str'), name=dict(type='str'), protocol=dict(type='str', choices=['openid-connect', 'saml']), protocolMapper=dict(type='str'), config=dict(type='dict'), ) meta_args = dict( realm=dict(type='str', default='master'), state=dict(default='present', choices=['present', 'absent']), id=dict(type='str'), name=dict(type='str'), description=dict(type='str'), protocol=dict(type='str', choices=['openid-connect', 'saml']), attributes=dict(type='dict'), full_scope_allowed=dict(type='bool'), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), ) argument_spec.update(meta_args) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'name']])) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API kc = KeycloakAPI(module) realm = module.params.get('realm') state = module.params.get('state') cid = module.params.get('id') # convert module parameters to client representation parameters (if they belong in there) clientt_params = [x for x in module.params if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm', 'auth_client_secret', 'auth_username', 'auth_password', 'validate_certs', 'realm'] and module.params.get(x) is not None] # See whether the client template already exists in Keycloak if cid is None: before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm) if before_clientt is not None: cid = before_clientt['id'] else: before_clientt = kc.get_client_template_by_id(cid, realm=realm) if before_clientt is None: before_clientt = dict() result['existing'] = before_clientt # Build a proposed changeset from parameters given to this module changeset = dict() for clientt_param in clientt_params: # lists in the Keycloak API are sorted new_param_value = module.params.get(clientt_param) if isinstance(new_param_value, list): try: new_param_value = sorted(new_param_value) except TypeError: pass changeset[camel(clientt_param)] = new_param_value # Whether creating or updating a client, take the before-state and merge the changeset into it updated_clientt = before_clientt.copy() updated_clientt.update(changeset) result['proposed'] = changeset # If the client template does not exist yet, before_client is still empty if before_clientt == dict(): if state == 'absent': # do nothing and exit if module._diff: result['diff'] = dict(before='', after='') result['msg'] = 'Client template does not exist, doing nothing.' module.exit_json(**result) # create new client template result['changed'] = True if 'name' not in updated_clientt: module.fail_json(msg='name needs to be specified when creating a new client') if module._diff: result['diff'] = dict(before='', after=updated_clientt) if module.check_mode: module.exit_json(**result) kc.create_client_template(updated_clientt, realm=realm) after_clientt = kc.get_client_template_by_name(updated_clientt['name'], realm=realm) result['end_state'] = after_clientt result['msg'] = 'Client template %s has been created.' % updated_clientt['name'] module.exit_json(**result) else: if state == 'present': # update existing client template result['changed'] = True if module.check_mode: # We can only compare the current client template with the proposed updates we have if module._diff: result['diff'] = dict(before=before_clientt, after=updated_clientt) module.exit_json(**result) kc.update_client_template(cid, updated_clientt, realm=realm) after_clientt = kc.get_client_template_by_id(cid, realm=realm) if before_clientt == after_clientt: result['changed'] = False if module._diff: result['diff'] = dict(before=before_clientt, after=after_clientt) result['end_state'] = after_clientt result['msg'] = 'Client template %s has been updated.' % updated_clientt['name'] module.exit_json(**result) else: # Delete existing client result['changed'] = True if module._diff: result['diff']['before'] = before_clientt result['diff']['after'] = '' if module.check_mode: module.exit_json(**result) kc.delete_client_template(cid, realm=realm) result['proposed'] = dict() result['end_state'] = dict() result['msg'] = 'Client template %s has been deleted.' % before_clientt['name'] module.exit_json(**result) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
rubenvb/skia
infra/bots/recipe_modules/flavor/android.py
1
21248
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from recipe_engine import recipe_api from . import default import subprocess # TODO(borenet): No! Remove this. """Android flavor, used for running code on Android.""" class AndroidFlavor(default.DefaultFlavor): def __init__(self, m): super(AndroidFlavor, self).__init__(m) self._ever_ran_adb = False self.ADB_BINARY = '/usr/bin/adb.1.0.35' self.ADB_PUB_KEY = '/home/chrome-bot/.android/adbkey' if 'skia' not in self.m.vars.swarming_bot_id: self.ADB_BINARY = '/opt/infra-android/tools/adb' self.ADB_PUB_KEY = ('/home/chrome-bot/.android/' 'chrome_infrastructure_adbkey') # Data should go in android_data_dir, which may be preserved across runs. android_data_dir = '/sdcard/revenge_of_the_skiabot/' self.device_dirs = default.DeviceDirs( bin_dir = '/data/local/tmp/', dm_dir = android_data_dir + 'dm_out', perf_data_dir = android_data_dir + 'perf', resource_dir = android_data_dir + 'resources', images_dir = android_data_dir + 'images', lotties_dir = android_data_dir + 'lotties', skp_dir = android_data_dir + 'skps', svg_dir = android_data_dir + 'svgs', tmp_dir = android_data_dir) # A list of devices we can't root. If rooting fails and a device is not # on the list, we fail the task to avoid perf inconsistencies. self.rootable_blacklist = ['GalaxyS6', 'GalaxyS7_G930FD', 'GalaxyS9', 'MotoG4', 'NVIDIA_Shield'] # Maps device type -> CPU ids that should be scaled for nanobench. # Many devices have two (or more) different CPUs (e.g. big.LITTLE # on Nexus5x). The CPUs listed are the biggest cpus on the device. # The CPUs are grouped together, so we only need to scale one of them # (the one listed) in order to scale them all. # E.g. Nexus5x has cpu0-3 as one chip and cpu4-5 as the other. Thus, # if one wants to run a single-threaded application (e.g. nanobench), one # can disable cpu0-3 and scale cpu 4 to have only cpu4 and 5 at the same # frequency. See also disable_for_nanobench. self.cpus_to_scale = { 'Nexus5x': [4], 'NexusPlayer': [0, 2], # has 2 identical chips, so scale them both. 'Pixel': [2], 'Pixel2XL': [4] } # Maps device type -> CPU ids that should be turned off when running # single-threaded applications like nanobench. The devices listed have # multiple, differnt CPUs. We notice a lot of noise that seems to be # caused by nanobench running on the slow CPU, then the big CPU. By # disabling this, we see less of that noise by forcing the same CPU # to be used for the performance testing every time. self.disable_for_nanobench = { 'Nexus5x': range(0, 4), 'Pixel': range(0, 2), 'Pixel2XL': range(0, 4) } self.gpu_scaling = { "Nexus5": 450000000, "Nexus5x": 600000000, } def _run(self, title, *cmd, **kwargs): with self.m.context(cwd=self.m.path['start_dir'].join('skia')): return self.m.run(self.m.step, title, cmd=list(cmd), **kwargs) def _adb(self, title, *cmd, **kwargs): # The only non-infra adb steps (dm / nanobench) happen to not use _adb(). if 'infra_step' not in kwargs: kwargs['infra_step'] = True self._ever_ran_adb = True # ADB seems to be occasionally flaky on every device, so always retry. attempts = 3 def wait_for_device(attempt): self.m.run(self.m.step, 'kill adb server after failure of \'%s\' (attempt %d)' % ( title, attempt), cmd=[self.ADB_BINARY, 'kill-server'], infra_step=True, timeout=30, abort_on_failure=False, fail_build_on_failure=False) self.m.run(self.m.step, 'wait for device after failure of \'%s\' (attempt %d)' % ( title, attempt), cmd=[self.ADB_BINARY, 'wait-for-device'], infra_step=True, timeout=180, abort_on_failure=False, fail_build_on_failure=False) with self.m.context(cwd=self.m.path['start_dir'].join('skia')): with self.m.env({'ADB_VENDOR_KEYS': self.ADB_PUB_KEY}): return self.m.run.with_retry(self.m.step, title, attempts, cmd=[self.ADB_BINARY]+list(cmd), between_attempts_fn=wait_for_device, **kwargs) def _scale_for_dm(self): device = self.m.vars.builder_cfg.get('model') if (device in self.rootable_blacklist or self.m.vars.internal_hardware_label): return # This is paranoia... any CPUs we disabled while running nanobench # ought to be back online now that we've restarted the device. for i in self.disable_for_nanobench.get(device, []): self._set_cpu_online(i, 1) # enable scale_up = self.cpus_to_scale.get(device, [0]) # For big.LITTLE devices, make sure we scale the LITTLE cores up; # there is a chance they are still in powersave mode from when # swarming slows things down for cooling down and charging. if 0 not in scale_up: scale_up.append(0) for i in scale_up: # AndroidOne doesn't support ondemand governor. hotplug is similar. if device == 'AndroidOne': self._set_governor(i, 'hotplug') else: self._set_governor(i, 'ondemand') def _scale_for_nanobench(self): device = self.m.vars.builder_cfg.get('model') if (device in self.rootable_blacklist or self.m.vars.internal_hardware_label): return for i in self.cpus_to_scale.get(device, [0]): self._set_governor(i, 'userspace') self._scale_cpu(i, 0.6) for i in self.disable_for_nanobench.get(device, []): self._set_cpu_online(i, 0) # disable if device in self.gpu_scaling: #https://developer.qualcomm.com/qfile/28823/lm80-p0436-11_adb_commands.pdf # Section 3.2.1 Commands to put the GPU in performance mode # Nexus 5 is 320000000 by default # Nexus 5x is 180000000 by default gpu_freq = self.gpu_scaling[device] self.m.run.with_retry(self.m.python.inline, "Lock GPU to %d (and other perf tweaks)" % gpu_freq, 3, # attempts program=""" import os import subprocess import sys import time ADB = sys.argv[1] freq = sys.argv[2] idle_timer = "10000" log = subprocess.check_output([ADB, 'root']) # check for message like 'adbd cannot run as root in production builds' print log if 'cannot' in log: raise Exception('adb root failed') subprocess.check_output([ADB, 'shell', 'stop', 'thermald']) subprocess.check_output([ADB, 'shell', 'echo "%s" > ' '/sys/class/kgsl/kgsl-3d0/gpuclk' % freq]) actual_freq = subprocess.check_output([ADB, 'shell', 'cat ' '/sys/class/kgsl/kgsl-3d0/gpuclk']).strip() if actual_freq != freq: raise Exception('Frequency (actual, expected) (%s, %s)' % (actual_freq, freq)) subprocess.check_output([ADB, 'shell', 'echo "%s" > ' '/sys/class/kgsl/kgsl-3d0/idle_timer' % idle_timer]) actual_timer = subprocess.check_output([ADB, 'shell', 'cat ' '/sys/class/kgsl/kgsl-3d0/idle_timer']).strip() if actual_timer != idle_timer: raise Exception('idle_timer (actual, expected) (%s, %s)' % (actual_timer, idle_timer)) for s in ['force_bus_on', 'force_rail_on', 'force_clk_on']: subprocess.check_output([ADB, 'shell', 'echo "1" > ' '/sys/class/kgsl/kgsl-3d0/%s' % s]) actual_set = subprocess.check_output([ADB, 'shell', 'cat ' '/sys/class/kgsl/kgsl-3d0/%s' % s]).strip() if actual_set != "1": raise Exception('%s (actual, expected) (%s, 1)' % (s, actual_set)) """, args = [self.ADB_BINARY, gpu_freq], infra_step=True, timeout=30) def _set_governor(self, cpu, gov): self._ever_ran_adb = True self.m.run.with_retry(self.m.python.inline, "Set CPU %d's governor to %s" % (cpu, gov), 3, # attempts program=""" import os import subprocess import sys import time ADB = sys.argv[1] cpu = int(sys.argv[2]) gov = sys.argv[3] log = subprocess.check_output([ADB, 'root']) # check for message like 'adbd cannot run as root in production builds' print log if 'cannot' in log: raise Exception('adb root failed') subprocess.check_output([ADB, 'shell', 'echo "%s" > ' '/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % (gov, cpu)]) actual_gov = subprocess.check_output([ADB, 'shell', 'cat ' '/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % cpu]).strip() if actual_gov != gov: raise Exception('(actual, expected) (%s, %s)' % (actual_gov, gov)) """, args = [self.ADB_BINARY, cpu, gov], infra_step=True, timeout=30) def _set_cpu_online(self, cpu, value): """Set /sys/devices/system/cpu/cpu{N}/online to value (0 or 1).""" self._ever_ran_adb = True msg = 'Disabling' if value: msg = 'Enabling' self.m.run.with_retry(self.m.python.inline, '%s CPU %d' % (msg, cpu), 3, # attempts program=""" import os import subprocess import sys import time ADB = sys.argv[1] cpu = int(sys.argv[2]) value = int(sys.argv[3]) log = subprocess.check_output([ADB, 'root']) # check for message like 'adbd cannot run as root in production builds' print log if 'cannot' in log: raise Exception('adb root failed') # If we try to echo 1 to an already online cpu, adb returns exit code 1. # So, check the value before trying to write it. prior_status = subprocess.check_output([ADB, 'shell', 'cat ' '/sys/devices/system/cpu/cpu%d/online' % cpu]).strip() if prior_status == str(value): print 'CPU %d online already %d' % (cpu, value) sys.exit() subprocess.check_output([ADB, 'shell', 'echo %s > ' '/sys/devices/system/cpu/cpu%d/online' % (value, cpu)]) actual_status = subprocess.check_output([ADB, 'shell', 'cat ' '/sys/devices/system/cpu/cpu%d/online' % cpu]).strip() if actual_status != str(value): raise Exception('(actual, expected) (%s, %d)' % (actual_status, value)) """, args = [self.ADB_BINARY, cpu, value], infra_step=True, timeout=30) def _scale_cpu(self, cpu, target_percent): self._ever_ran_adb = True self.m.run.with_retry(self.m.python.inline, 'Scale CPU %d to %f' % (cpu, target_percent), 3, # attempts program=""" import os import subprocess import sys import time ADB = sys.argv[1] target_percent = float(sys.argv[2]) cpu = int(sys.argv[3]) log = subprocess.check_output([ADB, 'root']) # check for message like 'adbd cannot run as root in production builds' print log if 'cannot' in log: raise Exception('adb root failed') root = '/sys/devices/system/cpu/cpu%d/cpufreq' %cpu # All devices we test on give a list of their available frequencies. available_freqs = subprocess.check_output([ADB, 'shell', 'cat %s/scaling_available_frequencies' % root]) # Check for message like '/system/bin/sh: file not found' if available_freqs and '/system/bin/sh' not in available_freqs: available_freqs = sorted( int(i) for i in available_freqs.strip().split()) else: raise Exception('Could not get list of available frequencies: %s' % available_freqs) maxfreq = available_freqs[-1] target = int(round(maxfreq * target_percent)) freq = maxfreq for f in reversed(available_freqs): if f <= target: freq = f break print 'Setting frequency to %d' % freq # If scaling_max_freq is lower than our attempted setting, it won't take. # We must set min first, because if we try to set max to be less than min # (which sometimes happens after certain devices reboot) it returns a # perplexing permissions error. subprocess.check_output([ADB, 'shell', 'echo 0 > ' '%s/scaling_min_freq' % root]) subprocess.check_output([ADB, 'shell', 'echo %d > ' '%s/scaling_max_freq' % (freq, root)]) subprocess.check_output([ADB, 'shell', 'echo %d > ' '%s/scaling_setspeed' % (freq, root)]) time.sleep(5) actual_freq = subprocess.check_output([ADB, 'shell', 'cat ' '%s/scaling_cur_freq' % root]).strip() if actual_freq != str(freq): raise Exception('(actual, expected) (%s, %d)' % (actual_freq, freq)) """, args = [self.ADB_BINARY, str(target_percent), cpu], infra_step=True, timeout=30) def install(self): self._adb('mkdir ' + self.device_dirs.resource_dir, 'shell', 'mkdir', '-p', self.device_dirs.resource_dir) if 'ASAN' in self.m.vars.extra_tokens: self._ever_ran_adb = True asan_setup = self.m.vars.slave_dir.join( 'android_ndk_linux', 'toolchains', 'llvm', 'prebuilt', 'linux-x86_64', 'lib64', 'clang', '8.0.2', 'bin', 'asan_device_setup') self.m.run(self.m.python.inline, 'Setting up device to run ASAN', program=""" import os import subprocess import sys import time ADB = sys.argv[1] ASAN_SETUP = sys.argv[2] def wait_for_device(): while True: time.sleep(5) print 'Waiting for device' subprocess.check_output([ADB, 'wait-for-device']) bit1 = subprocess.check_output([ADB, 'shell', 'getprop', 'dev.bootcomplete']) bit2 = subprocess.check_output([ADB, 'shell', 'getprop', 'sys.boot_completed']) if '1' in bit1 and '1' in bit2: print 'Device detected' break log = subprocess.check_output([ADB, 'root']) # check for message like 'adbd cannot run as root in production builds' print log if 'cannot' in log: raise Exception('adb root failed') output = subprocess.check_output([ADB, 'disable-verity']) print output if 'already disabled' not in output: print 'Rebooting device' subprocess.check_output([ADB, 'reboot']) wait_for_device() def installASAN(revert=False): # ASAN setup script is idempotent, either it installs it or # says it's installed. Returns True on success, false otherwise. out = subprocess.check_output([ADB, 'wait-for-device']) print out cmd = [ASAN_SETUP] if revert: cmd = [ASAN_SETUP, '--revert'] process = subprocess.Popen(cmd, env={'ADB': ADB}, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # this also blocks until command finishes (stdout, stderr) = process.communicate() print stdout print 'Stderr: %s' % stderr return process.returncode == 0 if not installASAN(): print 'Trying to revert the ASAN install and then re-install' # ASAN script sometimes has issues if it was interrupted or partially applied # Try reverting it, then re-enabling it if not installASAN(revert=True): raise Exception('reverting ASAN install failed') # Sleep because device does not reboot instantly time.sleep(10) if not installASAN(): raise Exception('Tried twice to setup ASAN and failed.') # Sleep because device does not reboot instantly time.sleep(10) wait_for_device() # Sleep again to hopefully avoid error "secure_mkdirs failed: No such file or # directory" when pushing resources to the device. time.sleep(60) """, args = [self.ADB_BINARY, asan_setup], infra_step=True, timeout=300, abort_on_failure=True) def cleanup_steps(self): if 'ASAN' in self.m.vars.extra_tokens: self._ever_ran_adb = True # Remove ASAN. asan_setup = self.m.vars.slave_dir.join( 'android_ndk_linux', 'toolchains', 'llvm', 'prebuilt', 'linux-x86_64', 'lib64', 'clang', '8.0.2', 'bin', 'asan_device_setup') self.m.run(self.m.step, 'wait for device before uninstalling ASAN', cmd=[self.ADB_BINARY, 'wait-for-device'], infra_step=True, timeout=180, abort_on_failure=False, fail_build_on_failure=False) self.m.run(self.m.step, 'uninstall ASAN', cmd=[asan_setup, '--revert'], infra_step=True, timeout=300, abort_on_failure=False, fail_build_on_failure=False) if self._ever_ran_adb: self.m.run(self.m.python.inline, 'dump log', program=""" import os import subprocess import sys out = sys.argv[1] log = subprocess.check_output(['%s', 'logcat', '-d']) for line in log.split('\\n'): tokens = line.split() if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc': addr, path = tokens[-2:] local = os.path.join(out, os.path.basename(path)) if os.path.exists(local): sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr]) line = line.replace(addr, addr + ' ' + sym.strip()) print line """ % self.ADB_BINARY, args=[self.host_dirs.bin_dir], infra_step=True, timeout=300, abort_on_failure=False) # Only quarantine the bot if the first failed step # is an infra step. If, instead, we did this for any infra failures, we # would do this too much. For example, if a Nexus 10 died during dm # and the following pull step would also fail "device not found" - causing # us to run the shutdown command when the device was probably not in a # broken state; it was just rebooting. if (self.m.run.failed_steps and isinstance(self.m.run.failed_steps[0], recipe_api.InfraFailure)): bot_id = self.m.vars.swarming_bot_id self.m.file.write_text('Quarantining Bot', '/home/chrome-bot/%s.force_quarantine' % bot_id, ' ') if self._ever_ran_adb: self._adb('kill adb server', 'kill-server') def step(self, name, cmd, **kwargs): if not kwargs.get('skip_binary_push', False): if (cmd[0] == 'nanobench'): self._scale_for_nanobench() else: self._scale_for_dm() app = self.host_dirs.bin_dir.join(cmd[0]) self._adb('push %s' % cmd[0], 'push', app, self.device_dirs.bin_dir) sh = '%s.sh' % cmd[0] self.m.run.writefile(self.m.vars.tmp_dir.join(sh), 'set -x; %s%s; echo $? >%src' % ( self.device_dirs.bin_dir, subprocess.list2cmdline(map(str, cmd)), self.device_dirs.bin_dir)) self._adb('push %s' % sh, 'push', self.m.vars.tmp_dir.join(sh), self.device_dirs.bin_dir) self._adb('clear log', 'logcat', '-c') self.m.python.inline('%s' % cmd[0], """ import subprocess import sys bin_dir = sys.argv[1] sh = sys.argv[2] subprocess.check_call(['%s', 'shell', 'sh', bin_dir + sh]) try: sys.exit(int(subprocess.check_output(['%s', 'shell', 'cat', bin_dir + 'rc']))) except ValueError: print "Couldn't read the return code. Probably killed for OOM." sys.exit(1) """ % (self.ADB_BINARY, self.ADB_BINARY), args=[self.device_dirs.bin_dir, sh]) def copy_file_to_device(self, host, device): self._adb('push %s %s' % (host, device), 'push', host, device) def copy_directory_contents_to_device(self, host, device): # Copy the tree, avoiding hidden directories and resolving symlinks. self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device), program=""" import os import subprocess import sys host = sys.argv[1] device = sys.argv[2] for d, _, fs in os.walk(host): p = os.path.relpath(d, host) if p != '.' and p.startswith('.'): continue for f in fs: print os.path.join(p,f) subprocess.check_call(['%s', 'push', os.path.realpath(os.path.join(host, p, f)), os.path.join(device, p, f)]) """ % self.ADB_BINARY, args=[host, device], infra_step=True) def copy_directory_contents_to_host(self, device, host): # TODO(borenet): When all of our devices are on Android 6.0 and up, we can # switch to using tar to zip up the results before pulling. with self.m.step.nest('adb pull'): with self.m.tempfile.temp_dir('adb_pull') as tmp: self._adb('pull %s' % device, 'pull', device, tmp) paths = self.m.file.glob_paths( 'list pulled files', tmp, self.m.path.basename(device) + self.m.path.sep + '*', test_data=['%d.png' % i for i in (1, 2)]) for p in paths: self.m.file.copy('copy %s' % self.m.path.basename(p), p, host) def read_file_on_device(self, path, **kwargs): rv = self._adb('read %s' % path, 'shell', 'cat', path, stdout=self.m.raw_io.output(), **kwargs) return rv.stdout.rstrip() if rv and rv.stdout else None def remove_file_on_device(self, path): self._adb('rm %s' % path, 'shell', 'rm', '-f', path) def create_clean_device_dir(self, path): self._adb('rm %s' % path, 'shell', 'rm', '-rf', path) self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path)
bsd-3-clause
jtolds/pants-lang
src/ir/types.py
1
13255
#!/usr/bin/env python # # Copyright (c) 2012, JT Olds <hello@jtolds.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # """ Pants http://www.pants-lang.org/ IR types """ __author__ = "JT Olds" __author_email__ = "hello@jtolds.com" __all__ = ["Identifier", "Expression", "Assignment", "ObjectMutation", "ReturnValue", "Value", "Field", "Variable", "Integer", "String", "Float", "Function", "OutArgument", "PositionalOutArgument", "NamedOutArgument", "SplatOutArgument", "KeywordOutArgument", "InArgument", "RequiredInArgument", "DefaultInArgument", "SplatInArgument", "KeywordInArgument", "Program", "null_val"] import functools from ast import types as ast class Expression(object): pass class Program(object): __slots__ = ["expressions", "lastval", "line", "col"] def __init__(self, expressions, lastval, line, col): for exp in expressions: assert isinstance(exp, Expression) assert is_value(lastval) self.expressions = expressions self.lastval = lastval self.line = line self.col = col def format(self, indent=""): return ";\n".join((exp.format(indent) for exp in self.expressions)) def __repr__(self): return "Program(%r, %r, %d, %d)" % (self.expressions, self.lastval, self.line, self.col) @functools.total_ordering class Identifier(object): SAFE_C_CHARS = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789") __slots__ = ["name", "user_provided", "line", "col"] def __init__(self, name, user_provided, line, col): assert isinstance(name, str) assert isinstance(user_provided, bool) self.name = name self.user_provided = user_provided self.line = line self.col = col def format(self, indent=""): return self.c_name(False) def c_name(self, escape=True): if not self.user_provided: assert self.name[-1] != "_" for char in self.name: assert char == "_" or char in Identifier.SAFE_C_CHARS return "%s_" % self.name if escape: name = [] for char in self.name: if char in Identifier.SAFE_C_CHARS: name.append(char) elif char == "_": name.append("__") else: name.append("_") name.append("%x" % ord(char)) return "".join(name) else: if self.name[-1] == "_": return "%s_" % self.name return self.name def references(self, identifier): return self == identifier def __eq__(self, other): return (self.name, self.user_provided) == (other.name, other.user_provided) def __lt__(self, other): return (self.name, self.user_provided) < (other.name, other.user_provided) def __repr__(self): return "Identifier(%r, %r, %d, %d)" % (self.name, self.user_provided, self.line, self.col) class Assignment(Expression): __slots__ = ["assignee", "value", "local", "line", "col"] def __init__(self, assignee, value, local, line, col): assert isinstance(assignee, Identifier) assert is_value(value) self.assignee = assignee self.value = value self.local = local self.line = line self.col = col def format(self, indent): return "%s %s %s" % (self.assignee.format(indent), self.local and "=" or ":=", self.value.format(indent)) def __repr__(self): return "Assignment(%r, %r, %r, %d, %d)" % (self.assignee, self.value, self.local, self.line, self.col) class ObjectMutation(Expression): __slots__ = ["object", "field", "value", "line", "col"] def __init__(self, object_, field, value, line, col): assert is_value(object_) assert isinstance(field, Identifier) assert is_value(value) self.object = object_ self.field = field self.value = value self.line = line self.col = col def format(self, indent): return "%s.%s = %s" % (self.object.format(indent), self.field.format(indent), self.value.format(indent)) def __repr__(self): return "ObjectMutation(%r, %r, %r, %d, %d)" % (self.object, self.field, self.value, self.line, self.col) class ReturnValue(Expression): __slots__ = ["assignee", "call", "left_args", "right_args", "line", "col"] def __init__(self, assignee, call, left_args, right_args, line, col): assert isinstance(assignee, Identifier) assert is_value(call) for arg in left_args + right_args: assert isinstance(arg, OutArgument) self.assignee = assignee self.call = call self.left_args = left_args self.right_args = right_args self.line = line self.col = col def format(self, indent): call = ["%s = %s(" % (self.assignee.format(indent), self.call.format(indent))] call.append(", ".join((arg.format(indent) for arg in self.left_args))) if self.left_args: if self.right_args: call.append("; ") else: call.append(";") if self.right_args: call.append(", ".join((arg.format(indent) for arg in self.right_args))) call.append(")") return "".join(call) def __repr__(self): return "ReturnValue(%r, %r, %r, %r, %d, %d)" % (self.assignee, self.call, self.left_args, self.right_args, self.line, self.col) class Value(object): pass class Field(Value): __slots__ = ["object", "field", "line", "col"] def __init__(self, object_, field, line, col): assert is_value(object_) assert isinstance(field, Identifier) self.object = object_ self.field = field self.line = line self.col = col def format(self, indent): return "%s.%s" % (self.object.format(indent), self.field.format(indent)) def references(self, identifier): return self.object.references(identifier) def __repr__(self): return "Field(%r, %r, %d, %d)" % (self.object, self.field, self.line, self.col) class Variable(Value): __slots__ = ["identifier", "line", "col"] def __init__(self, identifier, line, col): assert isinstance(identifier, Identifier) self.identifier = identifier self.line = line self.col = col def format(self, indent): return self.identifier.format(indent) def references(self, identifier): return self.identifier.references(identifier) def __repr__(self): return "Variable(%r, %d, %d)" % (self.identifier, self.line, self.col) Integer = ast.Integer Float = ast.Float String = ast.String def is_value(value): return isinstance(value, (Value, Integer, Float, String)) class Function(Value): __slots__ = ["expressions", "lastval", "left_args", "right_args", "line", "col"] def __init__(self, expressions, lastval, left_args, right_args, line, col): for exp in expressions: assert isinstance(exp, Expression) assert is_value(lastval) for arg in left_args + right_args: assert isinstance(arg, InArgument) self.expressions = expressions self.lastval = lastval self.left_args = left_args self.right_args = right_args self.line = line self.col = col def format(self, indent): string = ["{"] if self.left_args or self.right_args: string.append("|") if self.left_args: string.append(", ".join((arg.format(indent + " ") for arg in self.left_args))) string.append(";") if self.left_args and self.right_args: string.append(" ") if self.right_args: string.append(", ".join((arg.format(indent + " ") for arg in self.right_args))) if self.left_args or self.right_args: string.append("|") for exp in self.expressions: string.append("\n%s %s;" % (indent, exp.format(indent + " "))) if self.expressions: string.append("\n%s %s;\n%s}" % (indent, self.lastval.format(indent + " "), indent)) else: string.append("%s}" % self.lastval.format(indent)) return "".join(string) def __repr__(self): return "Function(%r, %r, %r, %r, %d, %d)" % (self.expressions, self.lastval, self.left_args, self.right_args, self.line, self.col) class OutArgument(object): pass class PositionalOutArgument(OutArgument): __slots__ = ["value", "line", "col"] def __init__(self, value, line, col): assert is_value(value) self.value = value self.line = line self.col = col def references(self, identifier): return self.value.references(identifier) def format(self, indent): return self.value.format(indent) def __repr__(self): return "PositionalOutArgument(%r, %d, %d)" % (self.value, self.line, self.col) class NamedOutArgument(OutArgument): __slots__ = ["name", "value", "line", "col"] def __init__(self, name, value, line, col): assert isinstance(name, Identifier) assert is_value(value) self.name = name self.value = value self.line = line self.col = col def references(self, identifier): return self.value.references(identifier) def format(self, indent): return "%s:%s" % (self.name.format(indent), self.value.format(indent)) def __repr__(self): return "NamedOutArgument(%r, %r, %d, %d)" % (self.name, self.value, self.line, self.col) class SplatOutArgument(OutArgument): __slots__ = ["value", "line", "col"] def __init__(self, value, line, col): assert is_value(value) self.value = value self.line = line self.col = col def references(self, identifier): return self.value.references(identifier) def format(self, indent): return ":(%s)" % self.value.format(indent) def __repr__(self): return "SplatOutArgument(%r, %d, %d)" % (self.value, self.line, self.col) class KeywordOutArgument(OutArgument): __slots__ = ["value", "line", "col"] def __init__(self, value, line, col): assert is_value(value) self.value = value self.line = line self.col = col def references(self, identifier): return self.value.references(identifier) def format(self, indent): return "::(%s)" % self.value.format(indent) def __repr__(self): return "KeywordOutArgument(%r, %d, %d)" % (self.value, self.line, self.col) class InArgument(object): pass class RequiredInArgument(InArgument): __slots__ = ["name", "line", "col"] def __init__(self, name, line, col): assert isinstance(name, Identifier) self.name = name self.line = line self.col = col def references(self, identifier): return False def binds(self, identifier): return self.name.references(identifier) def format(self, indent): return self.name.format(indent) def __repr__(self): return "RequiredInArgument(%r, %d, %d)" % (self.name, self.line, self.col) class DefaultInArgument(InArgument): __slots__ = ["name", "value", "line", "col"] def __init__(self, name, value, line, col): assert isinstance(name, Identifier) assert is_value(value) self.name = name self.value = value self.line = line self.col = col def references(self, identifier): return self.value.references(identifier) def binds(self, identifier): return self.name.references(identifier) def format(self, indent): return "%s:%s" % (self.name.format(indent), self.value.format(indent)) def __repr__(self): return "DefaultInArgument(%r, %r, %d, %d)" % (self.name, self.value, self.line, self.col) class SplatInArgument(InArgument): __slots__ = ["name", "line", "col"] def __init__(self, name, line, col): assert isinstance(name, Identifier) self.name = name self.line = line self.col = col def references(self, identifier): return False def binds(self, identifier): return self.name.references(identifier) def format(self, indent): return ":(%s)" % self.name.format(indent) def __repr__(self): return "SplatInArgument(%r, %d, %d)" % (self.name, self.line, self.col) class KeywordInArgument(InArgument): __slots__ = ["name", "line", "col"] def __init__(self, name, line, col): assert isinstance(name, Identifier) self.name = name self.line = line self.col = col def references(self, identifier): return False def binds(self, identifier): return self.name.references(identifier) def format(self, indent): return "::(%s)" % self.name.format(indent) def __repr__(self): return "KeywordInArgument(%r, %d, %d)" % (self.name, self.line, self.col) def null_val(line, col): return Variable(Identifier("null", False, line, col), line, col)
mit
zzcclp/spark
python/pyspark/pandas/plot/core.py
11
41921
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import importlib import pandas as pd import numpy as np from pyspark.ml.feature import Bucketizer from pyspark.mllib.stat import KernelDensity # type: ignore from pyspark.sql import functions as F from pandas.core.base import PandasObject from pandas.core.dtypes.inference import is_integer from pyspark.pandas.missing import unsupported_function from pyspark.pandas.config import get_option from pyspark.pandas.spark import functions as SF from pyspark.pandas.utils import name_like_string class TopNPlotBase: def get_top_n(self, data): from pyspark.pandas import DataFrame, Series max_rows = get_option("plotting.max_rows") # Simply use the first 1k elements and make it into a pandas dataframe # For categorical variables, it is likely called from df.x.value_counts().plot.xxx(). if isinstance(data, (Series, DataFrame)): data = data.head(max_rows + 1).to_pandas() else: raise TypeError("Only DataFrame and Series are supported for plotting.") self.partial = False if len(data) > max_rows: self.partial = True data = data.iloc[:max_rows] return data def set_result_text(self, ax): max_rows = get_option("plotting.max_rows") assert hasattr(self, "partial") if self.partial: ax.text( 1, 1, "showing top {} elements only".format(max_rows), size=6, ha="right", va="bottom", transform=ax.transAxes, ) class SampledPlotBase: def get_sampled(self, data): from pyspark.pandas import DataFrame, Series fraction = get_option("plotting.sample_ratio") if fraction is None: fraction = 1 / (len(data) / get_option("plotting.max_rows")) fraction = min(1.0, fraction) self.fraction = fraction if isinstance(data, (DataFrame, Series)): if isinstance(data, Series): data = data.to_frame() sampled = data._internal.resolved_copy.spark_frame.sample(fraction=self.fraction) return DataFrame(data._internal.with_new_sdf(sampled)).to_pandas() else: raise TypeError("Only DataFrame and Series are supported for plotting.") def set_result_text(self, ax): assert hasattr(self, "fraction") if self.fraction < 1: ax.text( 1, 1, "showing the sampled result by fraction %s" % self.fraction, size=6, ha="right", va="bottom", transform=ax.transAxes, ) class HistogramPlotBase: @staticmethod def prepare_hist_data(data, bins): # TODO: this logic is similar with KdePlotBase. Might have to deduplicate it. from pyspark.pandas.series import Series if isinstance(data, Series): data = data.to_frame() numeric_data = data.select_dtypes( include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64] ) # no empty frames or series allowed if len(numeric_data.columns) == 0: raise TypeError( "Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__) ) if is_integer(bins): # computes boundaries for the column bins = HistogramPlotBase.get_bins(data.to_spark(), bins) return numeric_data, bins @staticmethod def get_bins(sdf, bins): # 'data' is a Spark DataFrame that selects all columns. if len(sdf.columns) > 1: min_col = F.least(*map(F.min, sdf)) max_col = F.greatest(*map(F.max, sdf)) else: min_col = F.min(sdf.columns[-1]) max_col = F.max(sdf.columns[-1]) boundaries = sdf.select(min_col, max_col).first() # divides the boundaries into bins if boundaries[0] == boundaries[1]: boundaries = (boundaries[0] - 0.5, boundaries[1] + 0.5) return np.linspace(boundaries[0], boundaries[1], bins + 1) @staticmethod def compute_hist(psdf, bins): # 'data' is a Spark DataFrame that selects one column. assert isinstance(bins, (np.ndarray, np.generic)) sdf = psdf._internal.spark_frame scols = [] input_column_names = [] for label in psdf._internal.column_labels: input_column_name = name_like_string(label) input_column_names.append(input_column_name) scols.append(psdf._internal.spark_column_for(label).alias(input_column_name)) sdf = sdf.select(*scols) # 1. Make the bucket output flat to: # +----------+-------+ # |__group_id|buckets| # +----------+-------+ # |0 |0.0 | # |0 |0.0 | # |0 |1.0 | # |0 |2.0 | # |0 |3.0 | # |0 |3.0 | # |1 |0.0 | # |1 |1.0 | # |1 |1.0 | # |1 |2.0 | # |1 |1.0 | # |1 |0.0 | # +----------+-------+ colnames = sdf.columns bucket_names = ["__{}_bucket".format(colname) for colname in colnames] output_df = None for group_id, (colname, bucket_name) in enumerate(zip(colnames, bucket_names)): # creates a Bucketizer to get corresponding bin of each value bucketizer = Bucketizer( splits=bins, inputCol=colname, outputCol=bucket_name, handleInvalid="skip" ) bucket_df = bucketizer.transform(sdf) if output_df is None: output_df = bucket_df.select( SF.lit(group_id).alias("__group_id"), F.col(bucket_name).alias("__bucket") ) else: output_df = output_df.union( bucket_df.select( SF.lit(group_id).alias("__group_id"), F.col(bucket_name).alias("__bucket") ) ) # 2. Calculate the count based on each group and bucket. # +----------+-------+------+ # |__group_id|buckets| count| # +----------+-------+------+ # |0 |0.0 |2 | # |0 |1.0 |1 | # |0 |2.0 |1 | # |0 |3.0 |2 | # |1 |0.0 |2 | # |1 |1.0 |3 | # |1 |2.0 |1 | # +----------+-------+------+ result = ( output_df.groupby("__group_id", "__bucket") .agg(F.count("*").alias("count")) .toPandas() .sort_values(by=["__group_id", "__bucket"]) ) # 3. Fill empty bins and calculate based on each group id. From: # +----------+--------+------+ # |__group_id|__bucket| count| # +----------+--------+------+ # |0 |0.0 |2 | # |0 |1.0 |1 | # |0 |2.0 |1 | # |0 |3.0 |2 | # +----------+--------+------+ # +----------+--------+------+ # |__group_id|__bucket| count| # +----------+--------+------+ # |1 |0.0 |2 | # |1 |1.0 |3 | # |1 |2.0 |1 | # +----------+--------+------+ # # to: # +-----------------+ # |__values1__bucket| # +-----------------+ # |2 | # |1 | # |1 | # |2 | # |0 | # +-----------------+ # +-----------------+ # |__values2__bucket| # +-----------------+ # |2 | # |3 | # |1 | # |0 | # |0 | # +-----------------+ output_series = [] for i, (input_column_name, bucket_name) in enumerate(zip(input_column_names, bucket_names)): current_bucket_result = result[result["__group_id"] == i] # generates a pandas DF with one row for each bin # we need this as some of the bins may be empty indexes = pd.DataFrame({"__bucket": np.arange(0, len(bins) - 1)}) # merges the bins with counts on it and fills remaining ones with zeros pdf = indexes.merge(current_bucket_result, how="left", on=["__bucket"]).fillna(0)[ ["count"] ] pdf.columns = [input_column_name] output_series.append(pdf[input_column_name]) return output_series class BoxPlotBase: @staticmethod def compute_stats(data, colname, whis, precision): # Computes mean, median, Q1 and Q3 with approx_percentile and precision pdf = data._psdf._internal.resolved_copy.spark_frame.agg( *[ F.expr( "approx_percentile(`{}`, {}, {})".format(colname, q, int(1.0 / precision)) ).alias("{}_{}%".format(colname, int(q * 100))) for q in [0.25, 0.50, 0.75] ], F.mean("`%s`" % colname).alias("{}_mean".format(colname)), ).toPandas() # Computes IQR and Tukey's fences iqr = "{}_iqr".format(colname) p75 = "{}_75%".format(colname) p25 = "{}_25%".format(colname) pdf.loc[:, iqr] = pdf.loc[:, p75] - pdf.loc[:, p25] pdf.loc[:, "{}_lfence".format(colname)] = pdf.loc[:, p25] - whis * pdf.loc[:, iqr] pdf.loc[:, "{}_ufence".format(colname)] = pdf.loc[:, p75] + whis * pdf.loc[:, iqr] qnames = ["25%", "50%", "75%", "mean", "lfence", "ufence"] col_summ = pdf[["{}_{}".format(colname, q) for q in qnames]] col_summ.columns = qnames lfence, ufence = col_summ["lfence"], col_summ["ufence"] stats = { "mean": col_summ["mean"].values[0], "med": col_summ["50%"].values[0], "q1": col_summ["25%"].values[0], "q3": col_summ["75%"].values[0], } return stats, (lfence.values[0], ufence.values[0]) @staticmethod def outliers(data, colname, lfence, ufence): # Builds expression to identify outliers expression = F.col("`%s`" % colname).between(lfence, ufence) # Creates a column to flag rows as outliers or not return data._psdf._internal.resolved_copy.spark_frame.withColumn( "__{}_outlier".format(colname), ~expression ) @staticmethod def calc_whiskers(colname, outliers): # Computes min and max values of non-outliers - the whiskers minmax = ( outliers.filter("not `__{}_outlier`".format(colname)) .agg(F.min("`%s`" % colname).alias("min"), F.max(colname).alias("max")) .toPandas() ) return minmax.iloc[0][["min", "max"]].values @staticmethod def get_fliers(colname, outliers, min_val): # Filters only the outliers, should "showfliers" be True fliers_df = outliers.filter("`__{}_outlier`".format(colname)) # If shows fliers, takes the top 1k with highest absolute values # Here we normalize the values by subtracting the minimum value from # each, and use absolute values. order_col = F.abs(F.col("`{}`".format(colname)) - min_val.item()) fliers = ( fliers_df.select(F.col("`{}`".format(colname))) .orderBy(order_col) .limit(1001) .toPandas()[colname] .values ) return fliers class KdePlotBase: @staticmethod def prepare_kde_data(data): # TODO: this logic is similar with HistogramPlotBase. Might have to deduplicate it. from pyspark.pandas.series import Series if isinstance(data, Series): data = data.to_frame() numeric_data = data.select_dtypes( include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64] ) # no empty frames or series allowed if len(numeric_data.columns) == 0: raise TypeError( "Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__) ) return numeric_data @staticmethod def get_ind(sdf, ind): def calc_min_max(): if len(sdf.columns) > 1: min_col = F.least(*map(F.min, sdf)) max_col = F.greatest(*map(F.max, sdf)) else: min_col = F.min(sdf.columns[-1]) max_col = F.max(sdf.columns[-1]) return sdf.select(min_col, max_col).first() if ind is None: min_val, max_val = calc_min_max() sample_range = max_val - min_val ind = np.linspace( min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, 1000, ) elif is_integer(ind): min_val, max_val = calc_min_max() sample_range = max_val - min_val ind = np.linspace( min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, ind, ) return ind @staticmethod def compute_kde(sdf, bw_method=None, ind=None): # 'sdf' is a Spark DataFrame that selects one column. # Using RDD is slow so we might have to change it to Dataset based implementation # once Spark has that implementation. sample = sdf.rdd.map(lambda x: float(x[0])) kd = KernelDensity() kd.setSample(sample) assert isinstance(bw_method, (int, float)), "'bw_method' must be set as a scalar number." if bw_method is not None: # Match the bandwidth with Spark. kd.setBandwidth(float(bw_method)) return kd.estimate(list(map(float, ind))) class PandasOnSparkPlotAccessor(PandasObject): """ Series/Frames plotting accessor and method. Uses the backend specified by the option ``plotting.backend``. By default, plotly is used. Plotting methods can also be accessed by calling the accessor as a method with the ``kind`` argument: ``s.plot(kind='hist')`` is equivalent to ``s.plot.hist()`` """ pandas_plot_data_map = { "pie": TopNPlotBase().get_top_n, "bar": TopNPlotBase().get_top_n, "barh": TopNPlotBase().get_top_n, "scatter": TopNPlotBase().get_top_n, "area": SampledPlotBase().get_sampled, "line": SampledPlotBase().get_sampled, } _backends = {} # type: ignore def __init__(self, data): self.data = data @staticmethod def _find_backend(backend): """ Find a pandas-on-Spark plotting backend """ try: return PandasOnSparkPlotAccessor._backends[backend] except KeyError: try: module = importlib.import_module(backend) except ImportError: # We re-raise later on. pass else: if hasattr(module, "plot") or hasattr(module, "plot_pandas_on_spark"): # Validate that the interface is implemented when the option # is set, rather than at plot time. PandasOnSparkPlotAccessor._backends[backend] = module return module raise ValueError( "Could not find plotting backend '{backend}'. Ensure that you've installed " "the package providing the '{backend}' entrypoint, or that the package has a " "top-level `.plot` method.".format(backend=backend) ) @staticmethod def _get_plot_backend(backend=None): backend = backend or get_option("plotting.backend") # Shortcut if backend in PandasOnSparkPlotAccessor._backends: return PandasOnSparkPlotAccessor._backends[backend] if backend == "matplotlib": # Because matplotlib is an optional dependency, # we need to attempt an import here to raise an ImportError if needed. try: # test if matplotlib can be imported import matplotlib # noqa: F401 from pyspark.pandas.plot import matplotlib as module except ImportError: raise ImportError( "matplotlib is required for plotting when the " "default backend 'matplotlib' is selected." ) from None PandasOnSparkPlotAccessor._backends["matplotlib"] = module elif backend == "plotly": try: # test if plotly can be imported import plotly # noqa: F401 from pyspark.pandas.plot import plotly as module except ImportError: raise ImportError( "plotly is required for plotting when the " "default backend 'plotly' is selected." ) from None PandasOnSparkPlotAccessor._backends["plotly"] = module else: module = PandasOnSparkPlotAccessor._find_backend(backend) PandasOnSparkPlotAccessor._backends[backend] = module return module def __call__(self, kind="line", backend=None, **kwargs): plot_backend = PandasOnSparkPlotAccessor._get_plot_backend(backend) plot_data = self.data kind = {"density": "kde"}.get(kind, kind) if hasattr(plot_backend, "plot_pandas_on_spark"): # use if there's pandas-on-Spark specific method. return plot_backend.plot_pandas_on_spark(plot_data, kind=kind, **kwargs) else: # fallback to use pandas' if not PandasOnSparkPlotAccessor.pandas_plot_data_map[kind]: raise NotImplementedError( "'%s' plot is not supported with '%s' plot " "backend yet." % (kind, plot_backend.__name__) ) plot_data = PandasOnSparkPlotAccessor.pandas_plot_data_map[kind](plot_data) return plot_backend.plot(plot_data, kind=kind, **kwargs) def line(self, x=None, y=None, **kwargs): """ Plot DataFrame/Series as lines. This function is useful to plot lines using Series's values as coordinates. Parameters ---------- x : int or str, optional Columns to use for the horizontal axis. Either the location or the label of the columns to be used. By default, it will use the DataFrame indices. y : int, str, or list of them, optional The values to be plotted. Either the location or the label of the columns to be used. By default, it will use the remaining DataFrame numeric columns. **kwds Keyword arguments to pass on to :meth:`Series.plot` or :meth:`DataFrame.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). See Also -------- plotly.express.line : Plot y versus x as lines and/or markers (plotly). matplotlib.pyplot.plot : Plot y versus x as lines and/or markers (matplotlib). Examples -------- Basic plot. For Series: .. plotly:: >>> s = ps.Series([1, 3, 2]) >>> s.plot.line() # doctest: +SKIP For DataFrame: .. plotly:: The following example shows the populations for some animals over the years. >>> df = ps.DataFrame({'pig': [20, 18, 489, 675, 1776], ... 'horse': [4, 25, 281, 600, 1900]}, ... index=[1990, 1997, 2003, 2009, 2014]) >>> df.plot.line() # doctest: +SKIP .. plotly:: The following example shows the relationship between both populations. >>> df = ps.DataFrame({'pig': [20, 18, 489, 675, 1776], ... 'horse': [4, 25, 281, 600, 1900]}, ... index=[1990, 1997, 2003, 2009, 2014]) >>> df.plot.line(x='pig', y='horse') # doctest: +SKIP """ return self(kind="line", x=x, y=y, **kwargs) def bar(self, x=None, y=None, **kwds): """ Vertical bar plot. Parameters ---------- x : label or position, optional Allows plotting of one column versus another. If not specified, the index of the DataFrame is used. y : label or position, optional Allows plotting of one column versus another. If not specified, all numerical columns are used. **kwds : optional Additional keyword arguments are documented in :meth:`pyspark.pandas.Series.plot` or :meth:`pyspark.pandas.DataFrame.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- Basic plot. For Series: .. plotly:: >>> s = ps.Series([1, 3, 2]) >>> s.plot.bar() # doctest: +SKIP For DataFrame: .. plotly:: >>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) >>> df.plot.bar(x='lab', y='val') # doctest: +SKIP Plot a whole dataframe to a bar plot. Each column is stacked with a distinct color along the horizontal axis. .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.bar() # doctest: +SKIP Instead of stacking, the figure can be split by column with plotly APIs. .. plotly:: >>> from plotly.subplots import make_subplots >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> fig = (make_subplots(rows=2, cols=1) ... .add_trace(df.plot.bar(y='speed').data[0], row=1, col=1) ... .add_trace(df.plot.bar(y='speed').data[0], row=1, col=1) ... .add_trace(df.plot.bar(y='lifespan').data[0], row=2, col=1)) >>> fig # doctest: +SKIP Plot a single column. .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.bar(y='speed') # doctest: +SKIP Plot only selected categories for the DataFrame. .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.bar(x='lifespan') # doctest: +SKIP """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="bar", **kwds) elif isinstance(self.data, DataFrame): return self(kind="bar", x=x, y=y, **kwds) def barh(self, x=None, y=None, **kwargs): """ Make a horizontal bar plot. A horizontal bar plot is a plot that presents quantitative data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value. Parameters ---------- x : label or position, default DataFrame.index Column to be used for categories. y : label or position, default All numeric columns in dataframe Columns to be plotted from the DataFrame. **kwds Keyword arguments to pass on to :meth:`pyspark.pandas.DataFrame.plot` or :meth:`pyspark.pandas.Series.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). See Also -------- plotly.express.bar : Plot a vertical bar plot using plotly. matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib. Examples -------- For Series: .. plotly:: >>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) >>> df.val.plot.barh() # doctest: +SKIP For DataFrame: .. plotly:: >>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) >>> df.plot.barh(x='lab', y='val') # doctest: +SKIP Plot a whole DataFrame to a horizontal bar plot .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.barh() # doctest: +SKIP Plot a column of the DataFrame to a horizontal bar plot .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.barh(y='speed') # doctest: +SKIP Plot DataFrame versus the desired column .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.barh(x='lifespan') # doctest: +SKIP """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="barh", **kwargs) elif isinstance(self.data, DataFrame): return self(kind="barh", x=x, y=y, **kwargs) def box(self, **kwds): """ Make a box plot of the Series columns. Parameters ---------- **kwds : optional Additional keyword arguments are documented in :meth:`pyspark.pandas.Series.plot`. precision: scalar, default = 0.01 This argument is used by pandas-on-Spark to compute approximate statistics for building a boxplot. Use *smaller* values to get more precise statistics (matplotlib-only). Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Notes ----- There are behavior differences between pandas-on-Spark and pandas. * pandas-on-Spark computes approximate statistics - expect differences between pandas and pandas-on-Spark boxplots, especially regarding 1st and 3rd quartiles. * The `whis` argument is only supported as a single number. * pandas-on-Spark doesn't support the following argument(s) (matplotlib-only). * `bootstrap` argument is not supported * `autorange` argument is not supported Examples -------- Draw a box plot from a DataFrame with four columns of randomly generated data. For Series: .. plotly:: >>> data = np.random.randn(25, 4) >>> df = ps.DataFrame(data, columns=list('ABCD')) >>> df['A'].plot.box() # doctest: +SKIP This is an unsupported function for DataFrame type """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="box", **kwds) elif isinstance(self.data, DataFrame): return unsupported_function(class_name="pd.DataFrame", method_name="box")() def hist(self, bins=10, **kwds): """ Draw one histogram of the DataFrame’s columns. A `histogram`_ is a representation of the distribution of data. This function calls :meth:`plotting.backend.plot`, on each series in the DataFrame, resulting in one histogram per column. .. _histogram: https://en.wikipedia.org/wiki/Histogram Parameters ---------- bins : integer or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. **kwds All other plotting keyword arguments to be passed to plotting backend. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- Basic plot. For Series: .. plotly:: >>> s = ps.Series([1, 3, 2]) >>> s.plot.hist() # doctest: +SKIP For DataFrame: .. plotly:: >>> df = pd.DataFrame( ... np.random.randint(1, 7, 6000), ... columns=['one']) >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) >>> df = ps.from_pandas(df) >>> df.plot.hist(bins=12, alpha=0.5) # doctest: +SKIP """ return self(kind="hist", bins=bins, **kwds) def kde(self, bw_method=None, ind=None, **kwargs): """ Generate Kernel Density Estimate plot using Gaussian kernels. Parameters ---------- bw_method : scalar The method used to calculate the estimator bandwidth. See KernelDensity in PySpark for more information. ind : NumPy array or integer, optional Evaluation points for the estimated PDF. If None (default), 1000 equally spaced points are used. If `ind` is a NumPy array, the KDE is evaluated at the points passed. If `ind` is an integer, `ind` number of equally spaced points are used. **kwargs : optional Keyword arguments to pass on to :meth:`pandas-on-Spark.Series.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- A scalar bandwidth should be specified. Using a small bandwidth value can lead to over-fitting, while using a large bandwidth value may result in under-fitting: .. plotly:: >>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5]) >>> s.plot.kde(bw_method=0.3) # doctest: +SKIP .. plotly:: >>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5]) >>> s.plot.kde(bw_method=3) # doctest: +SKIP The `ind` parameter determines the evaluation points for the plot of the estimated KDF: .. plotly:: >>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5]) >>> s.plot.kde(ind=[1, 2, 3, 4, 5], bw_method=0.3) # doctest: +SKIP For DataFrame, it works in the same way as Series: .. plotly:: >>> df = ps.DataFrame({ ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], ... }) >>> df.plot.kde(bw_method=0.3) # doctest: +SKIP .. plotly:: >>> df = ps.DataFrame({ ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], ... }) >>> df.plot.kde(bw_method=3) # doctest: +SKIP .. plotly:: >>> df = ps.DataFrame({ ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], ... }) >>> df.plot.kde(ind=[1, 2, 3, 4, 5, 6], bw_method=0.3) # doctest: +SKIP """ return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs) density = kde def area(self, x=None, y=None, **kwds): """ Draw a stacked area plot. An area plot displays quantitative data visually. This function wraps the plotly area function. Parameters ---------- x : label or position, optional Coordinates for the X axis. By default uses the index. y : label or position, optional Column to plot. By default uses all columns. stacked : bool, default True Area plots are stacked by default. Set to False to create a unstacked plot (matplotlib-only). **kwds : optional Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- For Series .. plotly:: >>> df = ps.DataFrame({ ... 'sales': [3, 2, 3, 9, 10, 6], ... 'signups': [5, 5, 6, 12, 14, 13], ... 'visits': [20, 42, 28, 62, 81, 50], ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01', ... freq='M')) >>> df.sales.plot.area() # doctest: +SKIP For DataFrame .. plotly:: >>> df = ps.DataFrame({ ... 'sales': [3, 2, 3, 9, 10, 6], ... 'signups': [5, 5, 6, 12, 14, 13], ... 'visits': [20, 42, 28, 62, 81, 50], ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01', ... freq='M')) >>> df.plot.area() # doctest: +SKIP """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="area", **kwds) elif isinstance(self.data, DataFrame): return self(kind="area", x=x, y=y, **kwds) def pie(self, **kwds): """ Generate a pie plot. A pie plot is a proportional representation of the numerical data in a column. This function wraps :meth:`plotly.express.pie` for the specified column. Parameters ---------- y : int or label, optional Label or position of the column to plot. If not provided, ``subplots=True`` argument must be passed (matplotlib-only). **kwds Keyword arguments to pass on to :meth:`pandas-on-Spark.Series.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- For Series: .. plotly:: >>> df = ps.DataFrame({'mass': [0.330, 4.87, 5.97], ... 'radius': [2439.7, 6051.8, 6378.1]}, ... index=['Mercury', 'Venus', 'Earth']) >>> df.mass.plot.pie() # doctest: +SKIP For DataFrame: .. plotly:: >>> df = ps.DataFrame({'mass': [0.330, 4.87, 5.97], ... 'radius': [2439.7, 6051.8, 6378.1]}, ... index=['Mercury', 'Venus', 'Earth']) >>> df.plot.pie(y='mass') # doctest: +SKIP """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="pie", **kwds) else: # pandas will raise an error if y is None and subplots if not True if ( isinstance(self.data, DataFrame) and kwds.get("y", None) is None and not kwds.get("subplots", False) ): raise ValueError( "pie requires either y column or 'subplots=True' (matplotlib-only)" ) return self(kind="pie", **kwds) def scatter(self, x, y, **kwds): """ Create a scatter plot with varying marker point size and color. The coordinates of each point are defined by two dataframe columns and filled circles are used to represent each point. This kind of plot is useful to see complex correlations between two variables. Points could be for instance natural 2D coordinates like longitude and latitude in a map or, in general, any pair of metrics that can be plotted against each other. Parameters ---------- x : int or str The column name or column position to be used as horizontal coordinates for each point. y : int or str The column name or column position to be used as vertical coordinates for each point. s : scalar or array_like, optional (matplotlib-only). c : str, int or array_like, optional (matplotlib-only). **kwds: Optional Keyword arguments to pass on to :meth:`pyspark.pandas.DataFrame.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). See Also -------- plotly.express.scatter : Scatter plot using multiple input data formats (plotly). matplotlib.pyplot.scatter : Scatter plot using multiple input data formats (matplotlib). Examples -------- Let's see how to draw a scatter plot using coordinates from the values in a DataFrame's columns. .. plotly:: >>> df = ps.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1], ... [6.4, 3.2, 1], [5.9, 3.0, 2]], ... columns=['length', 'width', 'species']) >>> df.plot.scatter(x='length', y='width') # doctest: +SKIP And now with dark scheme: .. plotly:: >>> df = ps.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1], ... [6.4, 3.2, 1], [5.9, 3.0, 2]], ... columns=['length', 'width', 'species']) >>> fig = df.plot.scatter(x='length', y='width') >>> fig.update_layout(template="plotly_dark") # doctest: +SKIP """ return self(kind="scatter", x=x, y=y, **kwds) def hexbin(self, **kwds): return unsupported_function(class_name="pd.DataFrame", method_name="hexbin")()
apache-2.0
hammerlab/varcode
test/benchmark_vcf_load.py
1
1220
""" Time how long it takes to open a VCF. Run as: python -m profile -s cumtime %(prog)s to get profiling output. """ import argparse import time import varcode parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "path", help="Path or URL to VCF") parser.add_argument( "--profile", action="store_true", default=False, help="Run in a profiler.") parser.add_argument( "--no-info-field", dest="info_field", action="store_false", default=True) parser.add_argument( "--pyvcf", help="use pyvcf implementation", action="store_true", default=False) def run(): args = parser.parse_args() extra_args = {} if not args.info_field: extra_args["include_info"] = False start = time.time() if args.pyvcf: result = varcode.load_vcf( args.path, allow_extended_nucleotides=True) else: result = varcode.load_vcf_fast( args.path, allow_extended_nucleotides=True, **extra_args) print("Loaded %d variants in %0.3f sec. " % ( len(result), time.time() - start)) print(result.to_string(limit=5)) if __name__ == '__main__': run()
apache-2.0
binux/pyspider
tests/data_test_webpage.py
8
1727
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<roy@binux.me> # http://binux.me # Created on 2015-01-24 13:44:10 from httpbin import app @app.route('/pyspider/test.html') def test_page(): return ''' <a href="/404">404 <a href="/links/10/0">0 <a href="/links/10/1">1 <a href="/links/10/2">2 <a href="/links/10/3">3 <a href="/links/10/4">4 <a href="/gzip">gzip <a href="/get">get <a href="/deflate">deflate <a href="/html">html <a href="/xml">xml <a href="/robots.txt">robots <a href="/cache">cache <a href="/stream/20">stream ''' @app.route('/pyspider/ajax.html') def test_ajax(): return ''' <div class=status>loading...</div> <div class=ua></div> <div class=ip></div> <script> var xhr = new XMLHttpRequest(); xhr.onload = function() { var data = JSON.parse(xhr.responseText); document.querySelector('.status').innerHTML = 'done'; document.querySelector('.ua').innerHTML = data.headers['User-Agent']; document.querySelector('.ip').innerHTML = data.origin; } xhr.open("get", "/get", true); xhr.send(); </script> ''' @app.route('/pyspider/ajax_click.html') def test_ajax_click(): return ''' <div class=status>loading...</div> <div class=ua></div> <div class=ip></div> <a href="javascript:void(0)" onclick="load()">load</a> <script> function load() { var xhr = new XMLHttpRequest(); xhr.onload = function() { var data = JSON.parse(xhr.responseText); document.querySelector('.status').innerHTML = 'done'; document.querySelector('.ua').innerHTML = data.headers['User-Agent']; document.querySelector('.ip').innerHTML = data.origin; } xhr.open("get", "/get", true); xhr.send(); } </script> '''
apache-2.0
webmakin/scrapy
scrapy/exporters.py
118
9277
""" Item Exporters are used to export/serialize items into different formats. """ import csv import sys import pprint import marshal import six from six.moves import cPickle as pickle from xml.sax.saxutils import XMLGenerator from scrapy.utils.serialize import ScrapyJSONEncoder from scrapy.item import BaseItem __all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter', 'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter', 'JsonItemExporter', 'MarshalItemExporter'] class BaseItemExporter(object): def __init__(self, **kwargs): self._configure(kwargs) def _configure(self, options, dont_fail=False): """Configure the exporter by poping options from the ``options`` dict. If dont_fail is set, it won't raise an exception on unexpected options (useful for using with keyword arguments in subclasses constructors) """ self.fields_to_export = options.pop('fields_to_export', None) self.export_empty_fields = options.pop('export_empty_fields', False) self.encoding = options.pop('encoding', 'utf-8') if not dont_fail and options: raise TypeError("Unexpected options: %s" % ', '.join(options.keys())) def export_item(self, item): raise NotImplementedError def serialize_field(self, field, name, value): serializer = field.get('serializer', self._to_str_if_unicode) return serializer(value) def start_exporting(self): pass def finish_exporting(self): pass def _to_str_if_unicode(self, value): return value.encode(self.encoding) if isinstance(value, unicode) else value def _get_serialized_fields(self, item, default_value=None, include_empty=None): """Return the fields to export as an iterable of tuples (name, serialized_value) """ if include_empty is None: include_empty = self.export_empty_fields if self.fields_to_export is None: if include_empty and not isinstance(item, dict): field_iter = six.iterkeys(item.fields) else: field_iter = six.iterkeys(item) else: if include_empty: field_iter = self.fields_to_export else: field_iter = (x for x in self.fields_to_export if x in item) for field_name in field_iter: if field_name in item: field = {} if isinstance(item, dict) else item.fields[field_name] value = self.serialize_field(field, field_name, item[field_name]) else: value = default_value yield field_name, value class JsonLinesItemExporter(BaseItemExporter): def __init__(self, file, **kwargs): self._configure(kwargs, dont_fail=True) self.file = file self.encoder = ScrapyJSONEncoder(**kwargs) def export_item(self, item): itemdict = dict(self._get_serialized_fields(item)) self.file.write(self.encoder.encode(itemdict) + '\n') class JsonItemExporter(JsonLinesItemExporter): def __init__(self, file, **kwargs): self._configure(kwargs, dont_fail=True) self.file = file self.encoder = ScrapyJSONEncoder(**kwargs) self.first_item = True def start_exporting(self): self.file.write("[") def finish_exporting(self): self.file.write("]") def export_item(self, item): if self.first_item: self.first_item = False else: self.file.write(',\n') itemdict = dict(self._get_serialized_fields(item)) self.file.write(self.encoder.encode(itemdict)) class XmlItemExporter(BaseItemExporter): def __init__(self, file, **kwargs): self.item_element = kwargs.pop('item_element', 'item') self.root_element = kwargs.pop('root_element', 'items') self._configure(kwargs) self.xg = XMLGenerator(file, encoding=self.encoding) def start_exporting(self): self.xg.startDocument() self.xg.startElement(self.root_element, {}) def export_item(self, item): self.xg.startElement(self.item_element, {}) for name, value in self._get_serialized_fields(item, default_value=''): self._export_xml_field(name, value) self.xg.endElement(self.item_element) def finish_exporting(self): self.xg.endElement(self.root_element) self.xg.endDocument() def _export_xml_field(self, name, serialized_value): self.xg.startElement(name, {}) if hasattr(serialized_value, 'items'): for subname, value in serialized_value.items(): self._export_xml_field(subname, value) elif hasattr(serialized_value, '__iter__'): for value in serialized_value: self._export_xml_field('value', value) else: self._xg_characters(serialized_value) self.xg.endElement(name) # Workaround for http://bugs.python.org/issue17606 # Before Python 2.7.4 xml.sax.saxutils required bytes; # since 2.7.4 it requires unicode. The bug is likely to be # fixed in 2.7.6, but 2.7.6 will still support unicode, # and Python 3.x will require unicode, so ">= 2.7.4" should be fine. if sys.version_info[:3] >= (2, 7, 4): def _xg_characters(self, serialized_value): if not isinstance(serialized_value, unicode): serialized_value = serialized_value.decode(self.encoding) return self.xg.characters(serialized_value) else: def _xg_characters(self, serialized_value): return self.xg.characters(serialized_value) class CsvItemExporter(BaseItemExporter): def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs): self._configure(kwargs, dont_fail=True) self.include_headers_line = include_headers_line self.csv_writer = csv.writer(file, **kwargs) self._headers_not_written = True self._join_multivalued = join_multivalued def _to_str_if_unicode(self, value): if isinstance(value, (list, tuple)): try: value = self._join_multivalued.join(value) except TypeError: # list in value may not contain strings pass return super(CsvItemExporter, self)._to_str_if_unicode(value) def export_item(self, item): if self._headers_not_written: self._headers_not_written = False self._write_headers_and_set_fields_to_export(item) fields = self._get_serialized_fields(item, default_value='', include_empty=True) values = [x[1] for x in fields] self.csv_writer.writerow(values) def _write_headers_and_set_fields_to_export(self, item): if self.include_headers_line: if not self.fields_to_export: if isinstance(item, dict): # for dicts try using fields of the first item self.fields_to_export = list(item.keys()) else: # use fields declared in Item self.fields_to_export = list(item.fields.keys()) self.csv_writer.writerow(self.fields_to_export) class PickleItemExporter(BaseItemExporter): def __init__(self, file, protocol=2, **kwargs): self._configure(kwargs) self.file = file self.protocol = protocol def export_item(self, item): d = dict(self._get_serialized_fields(item)) pickle.dump(d, self.file, self.protocol) class MarshalItemExporter(BaseItemExporter): def __init__(self, file, **kwargs): self._configure(kwargs) self.file = file def export_item(self, item): marshal.dump(dict(self._get_serialized_fields(item)), self.file) class PprintItemExporter(BaseItemExporter): def __init__(self, file, **kwargs): self._configure(kwargs) self.file = file def export_item(self, item): itemdict = dict(self._get_serialized_fields(item)) self.file.write(pprint.pformat(itemdict) + '\n') class PythonItemExporter(BaseItemExporter): """The idea behind this exporter is to have a mechanism to serialize items to built-in python types so any serialization library (like json, msgpack, binc, etc) can be used on top of it. Its main goal is to seamless support what BaseItemExporter does plus nested items. """ def serialize_field(self, field, name, value): serializer = field.get('serializer', self._serialize_value) return serializer(value) def _serialize_value(self, value): if isinstance(value, BaseItem): return self.export_item(value) if isinstance(value, dict): return dict(self._serialize_dict(value)) if hasattr(value, '__iter__'): return [self._serialize_value(v) for v in value] return self._to_str_if_unicode(value) def _serialize_dict(self, value): for key, val in six.iteritems(value): yield key, self._serialize_value(val) def export_item(self, item): return dict(self._get_serialized_fields(item))
bsd-3-clause
pombredanne/package-verify
test/test_validator_devspec.py
2
2566
# Copyright (C) 2014 Steve Milner # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Tests the example specification validator. """ import json from . import TestCase from package_verify.validator import _Validator, error from package_verify.validator.validators import devspec data = { "name": "name", "version": "1.0.0", "release": "1", "license": "GPLv3", "summary": "A thing", "url": "http://example.com", "signature": "12345", "sha256": "12345", "platform": "", "scope": "/", "packager": "Someone", "source_name": "name-1.0.0.tar.gz", "source_hash": "12345", "scm_id": "r1234", "patches": [], "dependencies": {}, "files": {}, "config_files": {} } class TestDevspec(TestCase): def test_validator_creation(self): """ Make sure the validator is created properly """ result = devspec.Validator() assert type(result) is devspec.Validator assert issubclass(result.__class__, _Validator) def test_validation(self): """ Make sure devspec properly validates input """ validator = devspec.Validator() result = validator.validate(json.dumps(data)) assert type(result) == tuple for item in result: assert type(item) == list assert len(item) == 0 # Something missing data2 = data del data2['files'] result = validator.validate(json.dumps(data2)) assert len(result[0]) == 0 assert len(result[1]) == 1 # Two items missing del data2['name'] result = validator.validate(json.dumps(data2)) assert len(result[0]) == 0 assert len(result[1]) == 2 # Bad data self.assertRaises( error.WrongFormatError, validator.validate, json.dumps(['hi'])) # Not even JSON self.assertRaises( error.WrongFormatError, validator.validate, ['hi'])
gpl-3.0
windyuuy/opera
chromium/src/third_party/trace-viewer/third_party/pywebsocket/src/example/echo_client.py
30
38662
#!/usr/bin/env python # # Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Simple WebSocket client named echo_client just because of historical reason. mod_pywebsocket directory must be in PYTHONPATH. Example Usage: # server setup % cd $pywebsocket % PYTHONPATH=$cwd/src python ./mod_pywebsocket/standalone.py -p 8880 \ -d $cwd/src/example # run client % PYTHONPATH=$cwd/src python ./src/example/echo_client.py -p 8880 \ -s localhost \ -o http://localhost -r /echo -m test or # run echo client to test IETF HyBi 00 protocol run with --protocol-version=hybi00 or # server setup to test Hixie 75 protocol run with --allow-draft75 # run echo client to test Hixie 75 protocol run with --protocol-version=hixie75 """ import base64 import codecs import logging from optparse import OptionParser import os import random import re import socket import struct import sys from mod_pywebsocket import common from mod_pywebsocket.extensions import DeflateFrameExtensionProcessor from mod_pywebsocket.stream import Stream from mod_pywebsocket.stream import StreamHixie75 from mod_pywebsocket.stream import StreamOptions from mod_pywebsocket import util _TIMEOUT_SEC = 10 _UNDEFINED_PORT = -1 _UPGRADE_HEADER = 'Upgrade: websocket\r\n' _UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n' _CONNECTION_HEADER = 'Connection: Upgrade\r\n' # Special message that tells the echo server to start closing handshake _GOODBYE_MESSAGE = 'Goodbye' _PROTOCOL_VERSION_HYBI13 = 'hybi13' _PROTOCOL_VERSION_HYBI08 = 'hybi08' _PROTOCOL_VERSION_HYBI00 = 'hybi00' _PROTOCOL_VERSION_HIXIE75 = 'hixie75' class ClientHandshakeError(Exception): pass def _build_method_line(resource): return 'GET %s HTTP/1.1\r\n' % resource def _origin_header(header, origin): # 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character, # and the /origin/ value, converted to ASCII lowercase, to /fields/. return '%s: %s\r\n' % (header, origin.lower()) def _format_host_header(host, port, secure): # 4.1 9. Let /hostport/ be an empty string. # 4.1 10. Append the /host/ value, converted to ASCII lowercase, to # /hostport/ hostport = host.lower() # 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/ # is true, and /port/ is not 443, then append a U+003A COLON character # (:) followed by the value of /port/, expressed as a base-ten integer, # to /hostport/ if ((not secure and port != common.DEFAULT_WEB_SOCKET_PORT) or (secure and port != common.DEFAULT_WEB_SOCKET_SECURE_PORT)): hostport += ':' + str(port) # 4.1 12. concatenation of the string "Host:", a U+0020 SPACE # character, and /hostport/, to /fields/. return '%s: %s\r\n' % (common.HOST_HEADER, hostport) def _receive_bytes(socket, length): bytes = [] remaining = length while remaining > 0: received_bytes = socket.recv(remaining) if not received_bytes: raise IOError( 'Connection closed before receiving requested length ' '(requested %d bytes but received only %d bytes)' % (length, length - remaining)) bytes.append(received_bytes) remaining -= len(received_bytes) return ''.join(bytes) def _get_mandatory_header(fields, name): """Gets the value of the header specified by name from fields. This function expects that there's only one header with the specified name in fields. Otherwise, raises an ClientHandshakeError. """ values = fields.get(name.lower()) if values is None or len(values) == 0: raise ClientHandshakeError( '%s header not found: %r' % (name, values)) if len(values) > 1: raise ClientHandshakeError( 'Multiple %s headers found: %r' % (name, values)) return values[0] def _validate_mandatory_header(fields, name, expected_value, case_sensitive=False): """Gets and validates the value of the header specified by name from fields. If expected_value is specified, compares expected value and actual value and raises an ClientHandshakeError on failure. You can specify case sensitiveness in this comparison by case_sensitive parameter. This function expects that there's only one header with the specified name in fields. Otherwise, raises an ClientHandshakeError. """ value = _get_mandatory_header(fields, name) if ((case_sensitive and value != expected_value) or (not case_sensitive and value.lower() != expected_value.lower())): raise ClientHandshakeError( 'Illegal value for header %s: %r (expected) vs %r (actual)' % (name, expected_value, value)) class _TLSSocket(object): """Wrapper for a TLS connection.""" def __init__(self, raw_socket): self._ssl = socket.ssl(raw_socket) def send(self, bytes): return self._ssl.write(bytes) def recv(self, size=-1): return self._ssl.read(size) def close(self): # Nothing to do. pass class ClientHandshakeBase(object): """A base class for WebSocket opening handshake processors for each protocol version. """ def __init__(self): self._logger = util.get_class_logger(self) def _read_fields(self): # 4.1 32. let /fields/ be a list of name-value pairs, initially empty. fields = {} while True: # "Field" # 4.1 33. let /name/ and /value/ be empty byte arrays name = '' value = '' # 4.1 34. read /name/ name = self._read_name() if name is None: break # 4.1 35. read spaces # TODO(tyoshino): Skip only one space as described in the spec. ch = self._skip_spaces() # 4.1 36. read /value/ value = self._read_value(ch) # 4.1 37. read a byte from the server ch = _receive_bytes(self._socket, 1) if ch != '\n': # 0x0A raise ClientHandshakeError( 'Expected LF but found %r while reading value %r for ' 'header %r' % (ch, value, name)) self._logger.debug('Received %r header', name) # 4.1 38. append an entry to the /fields/ list that has the name # given by the string obtained by interpreting the /name/ byte # array as a UTF-8 stream and the value given by the string # obtained by interpreting the /value/ byte array as a UTF-8 byte # stream. fields.setdefault(name, []).append(value) # 4.1 39. return to the "Field" step above return fields def _read_name(self): # 4.1 33. let /name/ be empty byte arrays name = '' while True: # 4.1 34. read a byte from the server ch = _receive_bytes(self._socket, 1) if ch == '\r': # 0x0D return None elif ch == '\n': # 0x0A raise ClientHandshakeError( 'Unexpected LF when reading header name %r' % name) elif ch == ':': # 0x3A return name elif ch >= 'A' and ch <= 'Z': # Range 0x31 to 0x5A ch = chr(ord(ch) + 0x20) name += ch else: name += ch def _skip_spaces(self): # 4.1 35. read a byte from the server while True: ch = _receive_bytes(self._socket, 1) if ch == ' ': # 0x20 continue return ch def _read_value(self, ch): # 4.1 33. let /value/ be empty byte arrays value = '' # 4.1 36. read a byte from server. while True: if ch == '\r': # 0x0D return value elif ch == '\n': # 0x0A raise ClientHandshakeError( 'Unexpected LF when reading header value %r' % value) else: value += ch ch = _receive_bytes(self._socket, 1) class ClientHandshakeProcessor(ClientHandshakeBase): """WebSocket opening handshake processor for draft-ietf-hybi-thewebsocketprotocol-06 and later. """ def __init__(self, socket, options): super(ClientHandshakeProcessor, self).__init__() self._socket = socket self._options = options self._logger = util.get_class_logger(self) def handshake(self): """Performs opening handshake on the specified socket. Raises: ClientHandshakeError: handshake failed. """ request_line = _build_method_line(self._options.resource) self._logger.debug('Client\'s opening handshake Request-Line: %r', request_line) self._socket.sendall(request_line) fields = [] fields.append(_format_host_header( self._options.server_host, self._options.server_port, self._options.use_tls)) fields.append(_UPGRADE_HEADER) fields.append(_CONNECTION_HEADER) if self._options.origin is not None: if self._options.protocol_version == _PROTOCOL_VERSION_HYBI08: fields.append(_origin_header( common.SEC_WEBSOCKET_ORIGIN_HEADER, self._options.origin)) else: fields.append(_origin_header(common.ORIGIN_HEADER, self._options.origin)) original_key = os.urandom(16) self._key = base64.b64encode(original_key) self._logger.debug( '%s: %r (%s)', common.SEC_WEBSOCKET_KEY_HEADER, self._key, util.hexify(original_key)) fields.append( '%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY_HEADER, self._key)) if self._options.version_header > 0: fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER, self._options.version_header)) elif self._options.protocol_version == _PROTOCOL_VERSION_HYBI08: fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER, common.VERSION_HYBI08)) else: fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER, common.VERSION_HYBI_LATEST)) extensions_to_request = [] if self._options.deflate_stream: extensions_to_request.append( common.ExtensionParameter( common.DEFLATE_STREAM_EXTENSION)) if self._options.deflate_frame: extensions_to_request.append( common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)) if len(extensions_to_request) != 0: fields.append( '%s: %s\r\n' % (common.SEC_WEBSOCKET_EXTENSIONS_HEADER, common.format_extensions(extensions_to_request))) for field in fields: self._socket.sendall(field) self._socket.sendall('\r\n') self._logger.debug('Sent client\'s opening handshake headers: %r', fields) self._logger.debug('Start reading Status-Line') status_line = '' while True: ch = _receive_bytes(self._socket, 1) status_line += ch if ch == '\n': break m = re.match('HTTP/\\d+\.\\d+ (\\d\\d\\d) .*\r\n', status_line) if m is None: raise ClientHandshakeError( 'Wrong status line format: %r' % status_line) status_code = m.group(1) if status_code != '101': self._logger.debug('Unexpected status code %s with following ' 'headers: %r', status_code, self._read_fields()) raise ClientHandshakeError( 'Expected HTTP status code 101 but found %r' % status_code) self._logger.debug('Received valid Status-Line') self._logger.debug('Start reading headers until we see an empty line') fields = self._read_fields() ch = _receive_bytes(self._socket, 1) if ch != '\n': # 0x0A raise ClientHandshakeError( 'Expected LF but found %r while reading value %r for header ' 'name %r' % (ch, value, name)) self._logger.debug('Received an empty line') self._logger.debug('Server\'s opening handshake headers: %r', fields) _validate_mandatory_header( fields, common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE, False) _validate_mandatory_header( fields, common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE, False) accept = _get_mandatory_header( fields, common.SEC_WEBSOCKET_ACCEPT_HEADER) # Validate try: binary_accept = base64.b64decode(accept) except TypeError, e: raise HandshakeError( 'Illegal value for header %s: %r' % (common.SEC_WEBSOCKET_ACCEPT_HEADER, accept)) if len(binary_accept) != 20: raise ClientHandshakeError( 'Decoded value of %s is not 20-byte long' % common.SEC_WEBSOCKET_ACCEPT_HEADER) self._logger.debug( 'Response for challenge : %r (%s)', accept, util.hexify(binary_accept)) binary_expected_accept = util.sha1_hash( self._key + common.WEBSOCKET_ACCEPT_UUID).digest() expected_accept = base64.b64encode(binary_expected_accept) self._logger.debug( 'Expected response for challenge: %r (%s)', expected_accept, util.hexify(binary_expected_accept)) if accept != expected_accept: raise ClientHandshakeError( 'Invalid %s header: %r (expected: %s)' % (common.SEC_WEBSOCKET_ACCEPT_HEADER, accept, expected_accept)) deflate_stream_accepted = False deflate_frame_accepted = False extensions_header = fields.get( common.SEC_WEBSOCKET_EXTENSIONS_HEADER.lower()) accepted_extensions = [] if extensions_header is not None and len(extensions_header) != 0: accepted_extensions = common.parse_extensions(extensions_header[0]) # TODO(bashi): Support the new style perframe compression extension. for extension in accepted_extensions: extension_name = extension.name() if (extension_name == common.DEFLATE_STREAM_EXTENSION and len(extension.get_parameter_names()) == 0 and self._options.deflate_stream): deflate_stream_accepted = True continue if (extension_name == common.DEFLATE_FRAME_EXTENSION and self._options.deflate_frame): deflate_frame_accepted = True processor = DeflateFrameExtensionProcessor(extension) unused_extension_response = processor.get_extension_response() self._options.deflate_frame = processor continue raise ClientHandshakeError( 'Unexpected extension %r' % extension_name) if (self._options.deflate_stream and not deflate_stream_accepted): raise ClientHandshakeError( 'Requested %s, but the server rejected it' % common.DEFLATE_STREAM_EXTENSION) if (self._options.deflate_frame and not deflate_frame_accepted): raise ClientHandshakeError( 'Requested %s, but the server rejected it' % common.DEFLATE_FRAME_EXTENSION) # TODO(tyoshino): Handle Sec-WebSocket-Protocol # TODO(tyoshino): Handle Cookie, etc. class ClientHandshakeProcessorHybi00(ClientHandshakeBase): """WebSocket opening handshake processor for draft-ietf-hybi-thewebsocketprotocol-00 (equivalent to draft-hixie-thewebsocketprotocol-76). """ def __init__(self, socket, options): super(ClientHandshakeProcessorHybi00, self).__init__() self._socket = socket self._options = options self._logger = util.get_class_logger(self) def handshake(self): """Performs opening handshake on the specified socket. Raises: ClientHandshakeError: handshake failed. """ # 4.1 5. send request line. self._socket.sendall(_build_method_line(self._options.resource)) # 4.1 6. Let /fields/ be an empty list of strings. fields = [] # 4.1 7. Add the string "Upgrade: WebSocket" to /fields/. fields.append(_UPGRADE_HEADER_HIXIE75) # 4.1 8. Add the string "Connection: Upgrade" to /fields/. fields.append(_CONNECTION_HEADER) # 4.1 9-12. Add Host: field to /fields/. fields.append(_format_host_header( self._options.server_host, self._options.server_port, self._options.use_tls)) # 4.1 13. Add Origin: field to /fields/. if not self._options.origin: raise ClientHandshakeError( 'Specify the origin of the connection by --origin flag') fields.append(_origin_header(common.ORIGIN_HEADER, self._options.origin)) # TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/. # TODO: 4.1 15 Add cookie headers to /fields/. # 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/. self._number1, key1 = self._generate_sec_websocket_key() self._logger.debug('Number1: %d', self._number1) fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY1_HEADER, key1)) self._number2, key2 = self._generate_sec_websocket_key() self._logger.debug('Number2: %d', self._number2) fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY2_HEADER, key2)) fields.append('%s: 0\r\n' % common.SEC_WEBSOCKET_DRAFT_HEADER) # 4.1 24. For each string in /fields/, in a random order: send the # string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE # RETURN U+000A LINE FEED character pair (CRLF). random.shuffle(fields) for field in fields: self._socket.sendall(field) # 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED # character pair (CRLF). self._socket.sendall('\r\n') # 4.1 26. let /key3/ be a string consisting of eight random bytes (or # equivalently, a random 64 bit integer encoded in a big-endian order). self._key3 = self._generate_key3() # 4.1 27. send /key3/ to the server. self._socket.sendall(self._key3) self._logger.debug( 'Key3: %r (%s)', self._key3, util.hexify(self._key3)) self._logger.info('Sent handshake') # 4.1 28. Read bytes from the server until either the connection # closes, or a 0x0A byte is read. let /field/ be these bytes, including # the 0x0A bytes. field = '' while True: ch = _receive_bytes(self._socket, 1) field += ch if ch == '\n': break # if /field/ is not at least seven bytes long, or if the last # two bytes aren't 0x0D and 0x0A respectively, or if it does not # contain at least two 0x20 bytes, then fail the WebSocket connection # and abort these steps. if len(field) < 7 or not field.endswith('\r\n'): raise ClientHandshakeError('Wrong status line: %r' % field) m = re.match('[^ ]* ([^ ]*) .*', field) if m is None: raise ClientHandshakeError( 'No HTTP status code found in status line: %r' % field) # 4.1 29. let /code/ be the substring of /field/ that starts from the # byte after the first 0x20 byte, and ends with the byte before the # second 0x20 byte. code = m.group(1) # 4.1 30. if /code/ is not three bytes long, or if any of the bytes in # /code/ are not in the range 0x30 to 0x90, then fail the WebSocket # connection and abort these steps. if not re.match('[0-9][0-9][0-9]', code): raise ClientHandshakeError( 'HTTP status code %r is not three digit in status line: %r' % (code, field)) # 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the # next step. if code != '101': raise ClientHandshakeError( 'Expected HTTP status code 101 but found %r in status line: ' '%r' % (code, field)) # 4.1 32-39. read fields into /fields/ fields = self._read_fields() # 4.1 40. _Fields processing_ # read a byte from server ch = _receive_bytes(self._socket, 1) if ch != '\n': # 0x0A raise ClientHandshakeError('Expected LF but found %r' % ch) # 4.1 41. check /fields/ # TODO(ukai): protocol # if the entry's name is "upgrade" # if the value is not exactly equal to the string "WebSocket", # then fail the WebSocket connection and abort these steps. _validate_mandatory_header( fields, common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75, True) # if the entry's name is "connection" # if the value, converted to ASCII lowercase, is not exactly equal # to the string "upgrade", then fail the WebSocket connection and # abort these steps. _validate_mandatory_header( fields, common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE, False) origin = _get_mandatory_header( fields, common.SEC_WEBSOCKET_ORIGIN_HEADER) location = _get_mandatory_header( fields, common.SEC_WEBSOCKET_LOCATION_HEADER) # TODO(ukai): check origin, location, cookie, .. # 4.1 42. let /challenge/ be the concatenation of /number_1/, # expressed as a big endian 32 bit integer, /number_2/, expressed # as big endian 32 bit integer, and the eight bytes of /key_3/ in the # order they were sent on the wire. challenge = struct.pack('!I', self._number1) challenge += struct.pack('!I', self._number2) challenge += self._key3 self._logger.debug( 'Challenge: %r (%s)', challenge, util.hexify(challenge)) # 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a # big-endian 128 bit string. expected = util.md5_hash(challenge).digest() self._logger.debug( 'Expected challenge response: %r (%s)', expected, util.hexify(expected)) # 4.1 44. read sixteen bytes from the server. # let /reply/ be those bytes. reply = _receive_bytes(self._socket, 16) self._logger.debug( 'Actual challenge response: %r (%s)', reply, util.hexify(reply)) # 4.1 45. if /reply/ does not exactly equal /expected/, then fail # the WebSocket connection and abort these steps. if expected != reply: raise ClientHandshakeError( 'Bad challenge response: %r (expected) != %r (actual)' % (expected, reply)) # 4.1 46. The *WebSocket connection is established*. def _generate_sec_websocket_key(self): # 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive. spaces = random.randint(1, 12) # 4.1 17. let /max_n/ be the largest integer not greater than # 4,294,967,295 divided by /spaces_n/. maxnum = 4294967295 / spaces # 4.1 18. let /number_n/ be a random integer from 0 to /max_n/ # inclusive. number = random.randint(0, maxnum) # 4.1 19. let /product_n/ be the result of multiplying /number_n/ and # /spaces_n/ together. product = number * spaces # 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed # in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to # U+0039 DIGIT NINE (9). key = str(product) # 4.1 21. insert between one and twelve random characters from the # range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random # positions. available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1) n = random.randint(1, 12) for _ in xrange(n): ch = random.choice(available_chars) pos = random.randint(0, len(key)) key = key[0:pos] + chr(ch) + key[pos:] # 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at # random positions other than start or end of the string. for _ in xrange(spaces): pos = random.randint(1, len(key) - 1) key = key[0:pos] + ' ' + key[pos:] return number, key def _generate_key3(self): # 4.1 26. let /key3/ be a string consisting of eight random bytes (or # equivalently, a random 64 bit integer encoded in a big-endian order). return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)]) class ClientHandshakeProcessorHixie75(object): """WebSocket opening handshake processor for draft-hixie-thewebsocketprotocol-75. """ _EXPECTED_RESPONSE = ( 'HTTP/1.1 101 Web Socket Protocol Handshake\r\n' + _UPGRADE_HEADER_HIXIE75 + _CONNECTION_HEADER) def __init__(self, socket, options): self._socket = socket self._options = options self._logger = util.get_class_logger(self) def _skip_headers(self): terminator = '\r\n\r\n' pos = 0 while pos < len(terminator): received = _receive_bytes(self._socket, 1) if received == terminator[pos]: pos += 1 elif received == terminator[0]: pos = 1 else: pos = 0 def handshake(self): """Performs opening handshake on the specified socket. Raises: ClientHandshakeError: handshake failed. """ self._socket.sendall(_build_method_line(self._options.resource)) self._socket.sendall(_UPGRADE_HEADER_HIXIE75) self._socket.sendall(_CONNECTION_HEADER) self._socket.sendall(_format_host_header( self._options.server_host, self._options.server_port, self._options.use_tls)) if not self._options.origin: raise ClientHandshakeError( 'Specify the origin of the connection by --origin flag') self._socket.sendall(_origin_header(common.ORIGIN_HEADER, self._options.origin)) self._socket.sendall('\r\n') self._logger.info('Sent handshake') for expected_char in ( ClientHandshakeProcessorHixie75._EXPECTED_RESPONSE): received = _receive_bytes(self._socket, 1) if expected_char != received: raise ClientHandshakeError('Handshake failure') # We cut corners and skip other headers. self._skip_headers() class ClientConnection(object): """A wrapper for socket object to provide the mp_conn interface. mod_pywebsocket library is designed to be working on Apache mod_python's mp_conn object. """ def __init__(self, socket): self._socket = socket def write(self, data): self._socket.sendall(data) def read(self, n): return self._socket.recv(n) def get_remote_addr(self): return self._socket.getpeername() remote_addr = property(get_remote_addr) class ClientRequest(object): """A wrapper class just to make it able to pass a socket object to functions that expect a mp_request object. """ def __init__(self, socket): self._logger = util.get_class_logger(self) self._socket = socket self.connection = ClientConnection(socket) def _drain_received_data(self): """Drains unread data in the receive buffer.""" drained_data = util.drain_received_data(self._socket) if drained_data: self._logger.debug( 'Drained data following close frame: %r', drained_data) class EchoClient(object): """WebSocket echo client.""" def __init__(self, options): self._options = options self._socket = None self._logger = util.get_class_logger(self) def run(self): """Run the client. Shake hands and then repeat sending message and receiving its echo. """ self._socket = socket.socket() self._socket.settimeout(self._options.socket_timeout) try: self._socket.connect((self._options.server_host, self._options.server_port)) if self._options.use_tls: self._socket = _TLSSocket(self._socket) version = self._options.protocol_version if (version == _PROTOCOL_VERSION_HYBI08 or version == _PROTOCOL_VERSION_HYBI13): self._handshake = ClientHandshakeProcessor( self._socket, self._options) elif version == _PROTOCOL_VERSION_HYBI00: self._handshake = ClientHandshakeProcessorHybi00( self._socket, self._options) elif version == _PROTOCOL_VERSION_HIXIE75: self._handshake = ClientHandshakeProcessorHixie75( self._socket, self._options) else: raise ValueError( 'Invalid --protocol-version flag: %r' % version) self._handshake.handshake() self._logger.info('Connection established') request = ClientRequest(self._socket) version_map = { _PROTOCOL_VERSION_HYBI08: common.VERSION_HYBI08, _PROTOCOL_VERSION_HYBI13: common.VERSION_HYBI13, _PROTOCOL_VERSION_HYBI00: common.VERSION_HYBI00, _PROTOCOL_VERSION_HIXIE75: common.VERSION_HIXIE75} request.ws_version = version_map[version] if (version == _PROTOCOL_VERSION_HYBI08 or version == _PROTOCOL_VERSION_HYBI13): stream_option = StreamOptions() stream_option.mask_send = True stream_option.unmask_receive = False if self._options.deflate_stream: stream_option.deflate_stream = True if self._options.deflate_frame is not False: processor = self._options.deflate_frame processor.setup_stream_options(stream_option) self._stream = Stream(request, stream_option) elif version == _PROTOCOL_VERSION_HYBI00: self._stream = StreamHixie75(request, True) elif version == _PROTOCOL_VERSION_HIXIE75: self._stream = StreamHixie75(request) for line in self._options.message.split(','): self._stream.send_message(line) if self._options.verbose: print 'Send: %s' % line try: received = self._stream.receive_message() if self._options.verbose: print 'Recv: %s' % received except Exception, e: if self._options.verbose: print 'Error: %s' % e raise if version != _PROTOCOL_VERSION_HIXIE75: self._do_closing_handshake() finally: self._socket.close() def _do_closing_handshake(self): """Perform closing handshake using the specified closing frame.""" if self._options.message.split(',')[-1] == _GOODBYE_MESSAGE: # requested server initiated closing handshake, so # expecting closing handshake message from server. self._logger.info('Wait for server-initiated closing handshake') message = self._stream.receive_message() if message is None: print 'Recv close' print 'Send ack' self._logger.info( 'Received closing handshake and sent ack') return print 'Send close' self._stream.close_connection() self._logger.info('Sent closing handshake') print 'Recv ack' self._logger.info('Received ack') def main(): sys.stdout = codecs.getwriter('utf-8')(sys.stdout) parser = OptionParser() # We accept --command_line_flag style flags which is the same as Google # gflags in addition to common --command-line-flag style flags. parser.add_option('-s', '--server-host', '--server_host', dest='server_host', type='string', default='localhost', help='server host') parser.add_option('-p', '--server-port', '--server_port', dest='server_port', type='int', default=_UNDEFINED_PORT, help='server port') parser.add_option('-o', '--origin', dest='origin', type='string', default=None, help='origin') parser.add_option('-r', '--resource', dest='resource', type='string', default='/echo', help='resource path') parser.add_option('-m', '--message', dest='message', type='string', help=('comma-separated messages to send. ' '%s will force close the connection from server.' % _GOODBYE_MESSAGE)) parser.add_option('-q', '--quiet', dest='verbose', action='store_false', default=True, help='suppress messages') parser.add_option('-t', '--tls', dest='use_tls', action='store_true', default=False, help='use TLS (wss://)') parser.add_option('-k', '--socket-timeout', '--socket_timeout', dest='socket_timeout', type='int', default=_TIMEOUT_SEC, help='Timeout(sec) for sockets') parser.add_option('--draft75', dest='draft75', action='store_true', default=False, help='use the Hixie 75 protocol. This overrides ' 'protocol-version flag') parser.add_option('--protocol-version', '--protocol_version', dest='protocol_version', type='string', default=_PROTOCOL_VERSION_HYBI13, help='WebSocket protocol version to use. One of \'' + _PROTOCOL_VERSION_HYBI13 + '\', \'' + _PROTOCOL_VERSION_HYBI08 + '\', \'' + _PROTOCOL_VERSION_HYBI00 + '\', \'' + _PROTOCOL_VERSION_HIXIE75 + '\'') parser.add_option('--version-header', '--version_header', dest='version_header', type='int', default=-1, help='specify Sec-WebSocket-Version header value') parser.add_option('--deflate-stream', '--deflate_stream', dest='deflate_stream', action='store_true', default=False, help='use deflate-stream extension. This value will be ' 'ignored if used with protocol version that doesn\'t ' 'support deflate-stream.') parser.add_option('--deflate-frame', '--deflate_frame', dest='deflate_frame', action='store_true', default=False, help='use deflate-frame extension. This value will be ' 'ignored if used with protocol version that doesn\'t ' 'support deflate-frame.') parser.add_option('--log-level', '--log_level', type='choice', dest='log_level', default='warn', choices=['debug', 'info', 'warn', 'error', 'critical'], help='Log level.') (options, unused_args) = parser.parse_args() logging.basicConfig(level=logging.getLevelName(options.log_level.upper())) if options.draft75: options.protocol_version = _PROTOCOL_VERSION_HIXIE75 # Default port number depends on whether TLS is used. if options.server_port == _UNDEFINED_PORT: if options.use_tls: options.server_port = common.DEFAULT_WEB_SOCKET_SECURE_PORT else: options.server_port = common.DEFAULT_WEB_SOCKET_PORT # optparse doesn't seem to handle non-ascii default values. # Set default message here. if not options.message: options.message = u'Hello,\u65e5\u672c' # "Japan" in Japanese EchoClient(options).run() if __name__ == '__main__': main() # vi:sts=4 sw=4 et
bsd-3-clause
salimfadhley/jenkinsapi
jenkinsapi_tests/systests/test_nodes.py
3
5746
""" System tests for `jenkinsapi.jenkins` module. """ import logging import pytest from jenkinsapi.node import Node from jenkinsapi.credential import SSHKeyCredential from jenkinsapi_tests.test_utils.random_strings import random_string log = logging.getLogger(__name__) def test_online_offline(jenkins): """ Can we flip the online / offline state of the master node. """ # Master node name should be case insensitive # mn0 = jenkins.get_node('MaStEr') mn = jenkins.get_node('master') # self.assertEqual(mn, mn0) mn.set_online() # It should already be online, hence no-op assert mn.is_online() is True mn.set_offline() # We switch that suckah off mn.set_offline() # This should be a no-op assert mn.is_online() is False mn.set_online() # Switch it back on assert mn.is_online() is True def test_create_jnlp_node(jenkins): node_name = random_string() node_dict = { 'num_executors': 1, 'node_description': 'Test JNLP Node', 'remote_fs': '/tmp', 'labels': 'systest_jnlp', 'exclusive': True, 'tool_location': [ { "key": "hudson.tasks.Maven$MavenInstallation$DescriptorImpl@Maven 3.0.5", "home": "/home/apache-maven-3.0.5/" }, ] } node = jenkins.nodes.create_node(node_name, node_dict) assert isinstance(node, Node) is True del jenkins.nodes[node_name] def test_create_ssh_node(jenkins): node_name = random_string() creds = jenkins.get_credentials() cred_descr = random_string() cred_dict = { 'description': cred_descr, 'userName': 'username', 'passphrase': '', 'private_key': '-----BEGIN RSA PRIVATE KEY-----' } creds[cred_descr] = SSHKeyCredential(cred_dict) node_dict = { 'num_executors': 1, 'node_description': 'Description %s' % node_name, 'remote_fs': '/tmp', 'labels': node_name, 'exclusive': False, 'host': '127.0.0.1', 'port': 22, 'credential_description': cred_descr, 'jvm_options': '', 'java_path': '', 'prefix_start_slave_cmd': '', 'suffix_start_slave_cmd': '', 'retention': 'ondemand', 'ondemand_delay': 0, 'ondemand_idle_delay': 5, 'tool_location': [ { "key": "hudson.tasks.Maven$MavenInstallation$DescriptorImpl@Maven 3.0.5", "home": "/home/apache-maven-3.0.5/" }, ] } node = jenkins.nodes.create_node(node_name, node_dict) assert isinstance(node, Node) is True del jenkins.nodes[node_name] jenkins.nodes[node_name] = node_dict assert isinstance(jenkins.nodes[node_name], Node) is True del jenkins.nodes[node_name] def test_delete_node(jenkins): node_name = random_string() node_dict = { 'num_executors': 1, 'node_description': 'Test JNLP Node', 'remote_fs': '/tmp', 'labels': 'systest_jnlp', 'exclusive': True } jenkins.nodes.create_node(node_name, node_dict) del jenkins.nodes[node_name] with pytest.raises(KeyError): jenkins.nodes[node_name] with pytest.raises(KeyError): del jenkins.nodes['not_exist'] def test_delete_all_nodes(jenkins): nodes = jenkins.nodes for name in nodes.keys(): del nodes[name] assert len(jenkins.nodes) == 1 def test_get_node_labels(jenkins): node_name = random_string() node_labels = 'LABEL1 LABEL2' node_dict = { 'num_executors': 1, 'node_description': 'Test Node with Labels', 'remote_fs': '/tmp', 'labels': node_labels, 'exclusive': True } node = jenkins.nodes.create_node(node_name, node_dict) assert node.get_labels() == node_labels del jenkins.nodes[node_name] def test_get_executors(jenkins): node_name = random_string() node_labels = 'LABEL1 LABEL2' node_dict = { 'num_executors': 1, 'node_description': 'Test Node with Labels', 'remote_fs': '/tmp', 'labels': node_labels, 'exclusive': True } node = jenkins.nodes.create_node(node_name, node_dict) with pytest.raises(AttributeError): assert node.get_config_element('executors') == '1' assert node.get_config_element('numExecutors') == '1' del jenkins.nodes[node_name] def test_set_executors(jenkins): node_name = random_string() node_labels = 'LABEL1 LABEL2' node_dict = { 'num_executors': 1, 'node_description': 'Test Node with Labels', 'remote_fs': '/tmp', 'labels': node_labels, 'exclusive': True } node = jenkins.nodes.create_node(node_name, node_dict) assert node.set_config_element('numExecutors', '5') is None assert node.get_config_element('numExecutors') == '5' del jenkins.nodes[node_name] def test_set_master_executors(jenkins): node = jenkins.nodes['master'] assert node.get_num_executors() == 2 node.set_num_executors(5) assert node.get_num_executors() == 5 node.set_num_executors(2) def test_offline_reason(jenkins): node_name = random_string() node_labels = 'LABEL1 LABEL2' node_dict = { 'num_executors': 1, 'node_description': 'Test Node with Labels', 'remote_fs': '/tmp', 'labels': node_labels, 'exclusive': True } node = jenkins.nodes.create_node(node_name, node_dict) node.toggle_temporarily_offline('test1') node.poll() assert node.offline_reason() == 'test1' node.update_offline_reason('test2') node.poll() assert node.offline_reason() == 'test2' del jenkins.nodes[node_name]
mit
dbaxa/django
tests/utils_tests/test_decorators.py
319
4870
from django.http import HttpResponse from django.template import engines from django.template.response import TemplateResponse from django.test import RequestFactory, SimpleTestCase from django.utils.decorators import classproperty, decorator_from_middleware class ProcessViewMiddleware(object): def process_view(self, request, view_func, view_args, view_kwargs): pass process_view_dec = decorator_from_middleware(ProcessViewMiddleware) @process_view_dec def process_view(request): return HttpResponse() class ClassProcessView(object): def __call__(self, request): return HttpResponse() class_process_view = process_view_dec(ClassProcessView()) class FullMiddleware(object): def process_request(self, request): request.process_request_reached = True def process_view(self, request, view_func, view_args, view_kwargs): request.process_view_reached = True def process_template_response(self, request, response): request.process_template_response_reached = True return response def process_response(self, request, response): # This should never receive unrendered content. request.process_response_content = response.content request.process_response_reached = True return response full_dec = decorator_from_middleware(FullMiddleware) class DecoratorFromMiddlewareTests(SimpleTestCase): """ Tests for view decorators created using ``django.utils.decorators.decorator_from_middleware``. """ rf = RequestFactory() def test_process_view_middleware(self): """ Test a middleware that implements process_view. """ process_view(self.rf.get('/')) def test_callable_process_view_middleware(self): """ Test a middleware that implements process_view, operating on a callable class. """ class_process_view(self.rf.get('/')) def test_full_dec_normal(self): """ Test that all methods of middleware are called for normal HttpResponses """ @full_dec def normal_view(request): template = engines['django'].from_string("Hello world") return HttpResponse(template.render()) request = self.rf.get('/') normal_view(request) self.assertTrue(getattr(request, 'process_request_reached', False)) self.assertTrue(getattr(request, 'process_view_reached', False)) # process_template_response must not be called for HttpResponse self.assertFalse(getattr(request, 'process_template_response_reached', False)) self.assertTrue(getattr(request, 'process_response_reached', False)) def test_full_dec_templateresponse(self): """ Test that all methods of middleware are called for TemplateResponses in the right sequence. """ @full_dec def template_response_view(request): template = engines['django'].from_string("Hello world") return TemplateResponse(request, template) request = self.rf.get('/') response = template_response_view(request) self.assertTrue(getattr(request, 'process_request_reached', False)) self.assertTrue(getattr(request, 'process_view_reached', False)) self.assertTrue(getattr(request, 'process_template_response_reached', False)) # response must not be rendered yet. self.assertFalse(response._is_rendered) # process_response must not be called until after response is rendered, # otherwise some decorators like csrf_protect and gzip_page will not # work correctly. See #16004 self.assertFalse(getattr(request, 'process_response_reached', False)) response.render() self.assertTrue(getattr(request, 'process_response_reached', False)) # Check that process_response saw the rendered content self.assertEqual(request.process_response_content, b"Hello world") class ClassPropertyTest(SimpleTestCase): def test_getter(self): class Foo(object): foo_attr = 123 def __init__(self): self.foo_attr = 456 @classproperty def foo(cls): return cls.foo_attr class Bar(object): bar = classproperty() @bar.getter def bar(cls): return 123 self.assertEqual(Foo.foo, 123) self.assertEqual(Foo().foo, 123) self.assertEqual(Bar.bar, 123) self.assertEqual(Bar().bar, 123) def test_override_getter(self): class Foo(object): @classproperty def foo(cls): return 123 @foo.getter def foo(cls): return 456 self.assertEqual(Foo.foo, 456) self.assertEqual(Foo().foo, 456)
bsd-3-clause
raamana/pyradigm
pyradigm/_version.py
1
18454
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" git_date = "$Format:%ci$" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "pyradigm-" cfg.versionfile_source = "pyradigm/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
mit
morpheby/levelup-by
common/lib/xmodule/xmodule/modulestore/parsers.py
9
4058
import re # Prefix for the branch portion of a locator URL BRANCH_PREFIX = "/branch/" # Prefix for the block portion of a locator URL BLOCK_PREFIX = "/block/" # Prefix for the version portion of a locator URL, when it is preceded by a course ID VERSION_PREFIX = "/version/" # Prefix for version when it begins the URL (no course ID). URL_VERSION_PREFIX = 'version/' URL_RE = re.compile(r'^edx://(.+)$', re.IGNORECASE) def parse_url(string): """ A url must begin with 'edx://' (case-insensitive match), followed by either a version_guid or a course_id. Examples: 'edx://version/0123FFFF' 'edx://mit.eecs.6002x' 'edx://mit.eecs.6002x;published' 'edx://mit.eecs.6002x;published/block/HW3' 'edx://mit.eecs.6002x;published/version/000eee12345/block/HW3' This returns None if string cannot be parsed. If it can be parsed as a version_guid with no preceding course_id, returns a dict with key 'version_guid' and the value, If it can be parsed as a course_id, returns a dict with key 'id' and optional keys 'branch' and 'version_guid'. """ match = URL_RE.match(string) if not match: return None path = match.group(1) if path.startswith(URL_VERSION_PREFIX): return parse_guid(path[len(URL_VERSION_PREFIX):]) return parse_course_id(path) BLOCK_RE = re.compile(r'^\w+$', re.IGNORECASE) def parse_block_ref(string): r""" A block_ref is a string of word_chars. <word_chars> matches one or more Unicode word characters; this includes most characters that can be part of a word in any language, as well as numbers and the underscore. (see definition of \w in python regular expressions, at http://docs.python.org/dev/library/re.html) If string is a block_ref, returns a dict with key 'block_ref' and the value, otherwise returns None. """ if len(string) > 0 and BLOCK_RE.match(string): return {'block': string} return None GUID_RE = re.compile(r'^(?P<version_guid>[A-F0-9]+)(' + BLOCK_PREFIX + '(?P<block>\w+))?$', re.IGNORECASE) def parse_guid(string): """ A version_guid is a string of hex digits (0-F). If string is a version_guid, returns a dict with key 'version_guid' and the value, otherwise returns None. """ m = GUID_RE.match(string) if m is not None: return m.groupdict() else: return None COURSE_ID_RE = re.compile( r'^(?P<id>(\w+)(\.\w+\w*)*)(' + BRANCH_PREFIX + '(?P<branch>\w+))?(' + VERSION_PREFIX + '(?P<version_guid>[A-F0-9]+))?(' + BLOCK_PREFIX + '(?P<block>\w+))?$', re.IGNORECASE ) def parse_course_id(string): r""" A course_id has a main id component. There may also be an optional branch (/branch/published or /branch/draft). There may also be an optional version (/version/519665f6223ebd6980884f2b). There may also be an optional block (/block/HW3 or /block/Quiz2). Examples of valid course_ids: 'mit.eecs.6002x' 'mit.eecs.6002x/branch/published' 'mit.eecs.6002x/block/HW3' 'mit.eecs.6002x/branch/published/block/HW3' 'mit.eecs.6002x/branch/published/version/519665f6223ebd6980884f2b/block/HW3' Syntax: course_id = main_id [/branch/ branch] [/version/ version ] [/block/ block] main_id = name [. name]* branch = name block = name name = <word_chars> <word_chars> matches one or more Unicode word characters; this includes most characters that can be part of a word in any language, as well as numbers and the underscore. (see definition of \w in python regular expressions, at http://docs.python.org/dev/library/re.html) If string is a course_id, returns a dict with keys 'id', 'branch', and 'block'. Revision is optional: if missing returned_dict['branch'] is None. Block is optional: if missing returned_dict['block'] is None. Else returns None. """ match = COURSE_ID_RE.match(string) if not match: return None return match.groupdict()
agpl-3.0
XVPCoin/vaporcoin
qa/rpc-tests/util.py
115
5260
# Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # # Add python-bitcoinrpc to module search path: import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc")) from decimal import Decimal import json import shutil import subprocess import time from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * START_P2P_PORT=11000 START_RPC_PORT=11100 def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def sync_blocks(rpc_connections): """ Wait until everybody has the same block count """ while True: counts = [ x.getblockcount() for x in rpc_connections ] if counts == [ counts[0] ]*len(counts): break time.sleep(1) def sync_mempools(rpc_connections): """ Wait until everybody has the same transactions in their memory pools """ while True: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match+1 if num_match == len(rpc_connections): break time.sleep(1) bitcoind_processes = [] def initialize_chain(test_dir): """ Create (or copy from cache) a 200-block-long chain and 4 wallets. bitcoind and bitcoin-cli must be in search path. """ if not os.path.isdir(os.path.join("cache", "node0")): devnull = open("/dev/null", "w+") # Create cache directories, run bitcoinds: for i in range(4): datadir = os.path.join("cache", "node"+str(i)) os.makedirs(datadir) with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f: f.write("regtest=1\n"); f.write("rpcuser=rt\n"); f.write("rpcpassword=rt\n"); f.write("port="+str(START_P2P_PORT+i)+"\n"); f.write("rpcport="+str(START_RPC_PORT+i)+"\n"); args = [ "bitcoind", "-keypool=1", "-datadir="+datadir ] if i > 0: args.append("-connect=127.0.0.1:"+str(START_P2P_PORT)) bitcoind_processes.append(subprocess.Popen(args)) subprocess.check_call([ "bitcoin-cli", "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) devnull.close() rpcs = [] for i in range(4): try: url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,) rpcs.append(AuthServiceProxy(url)) except: sys.stderr.write("Error connecting to "+url+"\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 nodes # gets 25 mature blocks and 25 immature. for i in range(4): rpcs[i].setgenerate(True, 25) sync_blocks(rpcs) for i in range(4): rpcs[i].setgenerate(True, 25) sync_blocks(rpcs) # Shut them down, and remove debug.logs: stop_nodes(rpcs) wait_bitcoinds() for i in range(4): os.remove(debug_log("cache", i)) for i in range(4): from_dir = os.path.join("cache", "node"+str(i)) to_dir = os.path.join(test_dir, "node"+str(i)) shutil.copytree(from_dir, to_dir) def start_nodes(num_nodes, dir): # Start bitcoinds, and wait for RPC interface to be up and running: devnull = open("/dev/null", "w+") for i in range(num_nodes): datadir = os.path.join(dir, "node"+str(i)) args = [ "bitcoind", "-datadir="+datadir ] bitcoind_processes.append(subprocess.Popen(args)) subprocess.check_call([ "bitcoin-cli", "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) devnull.close() # Create&return JSON-RPC connections rpc_connections = [] for i in range(num_nodes): url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,) rpc_connections.append(AuthServiceProxy(url)) return rpc_connections def debug_log(dir, n_node): return os.path.join(dir, "node"+str(n_node), "regtest", "debug.log") def stop_nodes(nodes): for i in range(len(nodes)): nodes[i].stop() del nodes[:] # Emptying array closes connections as a side effect def wait_bitcoinds(): # Wait for all bitcoinds to cleanly exit for bitcoind in bitcoind_processes: bitcoind.wait() del bitcoind_processes[:] def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:"+str(START_P2P_PORT+node_num) from_connection.addnode(ip_port, "onetry") def assert_equal(thing1, thing2): if thing1 != thing2: raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
mit
degoldschmidt/pytrack-analysis
gui/videoplayer.py
1
2535
import numpy as np import cv2 import tkinter as tk from PIL import Image, ImageTk import os.path as op VIDEODIR = '/media/degoldschmidt/DATA_DENNIS_002/working_data/0007_KPEG' VIDEOFILE = op.join(VIDEODIR, 'cam01_2018-04-18T15_39_08.avi') START_FRAME = 4000 #Set up GUI window = tk.Tk() #Makes main window window.wm_title("Videoplayer") window.config(background="#FFFFFF") #Graphics window imageFrame = tk.Frame(window, width=600, height=500) imageFrame.grid(row=0, column=0, padx=10, pady=2) #Slider window (slider controls stage position) sliderFrame = tk.Frame(window, width=600, height=100) sliderFrame.grid(row = 600, column=0, padx=10, pady=2) w = tk.Scale(sliderFrame, from_=0, to=108000, width =10, length=700, resolution=1, orient=tk.HORIZONTAL) w.pack(expand=True, fill=tk.BOTH) var = w.get() #Capture video frames lmain = tk.Label(imageFrame) lmain.grid(row=0, column=0) cap = cv2.VideoCapture(VIDEOFILE) def get_background(nframes, bg_frames, startat=0): for ii,iframe in enumerate(np.random.choice(nframes, bg_frames)): cap.set(cv2.CAP_PROP_POS_FRAMES, startat+iframe) ret, frame = cap.read() if ii == 0: ### arrays image = np.zeros(frame.shape, dtype=frame.dtype) difference = np.zeros(frame.shape, dtype=frame.dtype) background = np.zeros(frame.shape, dtype=np.float32) output = np.zeros(frame.shape, dtype=frame.dtype) outputgray = cv2.cvtColor(output,cv2.COLOR_BGR2GRAY).astype(np.uint8) background += frame if ii == bg_frames-1: background /= bg_frames return background bg = get_background(108000, 100, startat=START_FRAME) def show_frame(): var = -1 if var != w.get(): var = w.get() cap.set(cv2.CAP_PROP_POS_FRAMES, var) _, frame = cap.read() diff = bg - frame ret, mask = cv2.threshold(cv2.cvtColor(diff,cv2.COLOR_BGR2GRAY), 30, 255, cv2.THRESH_BINARY) #output = 255 - output #output = cv2.bitwise_and(frame, frame, mask = mask) outputgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY).astype(np.uint8) preview = cv2.flip(outputgray, 1) preview = cv2.resize(preview, (700, 700)) cv2image = preview#cv2.cvtColor(preview, cv2.COLOR_BGR2RGBA) img = Image.fromarray(cv2image) imgtk = ImageTk.PhotoImage(image=img) lmain.imgtk = imgtk lmain.configure(image=imgtk) lmain.after(100, show_frame) show_frame() #Display 2 window.mainloop() #Starts GUI
gpl-3.0
malaterre/ITK
Modules/Filtering/ImageFeature/wrapping/test/HoughTransform2DLinesImageFilterTest.py
2
1337
#========================================================================== # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ # # Example on the use of the LaplacianImageFilter # import itk from sys import argv itk.auto_progress(2) edges = itk.imread(argv[1], itk.F) houghF = itk.HoughTransform2DLinesImageFilter[itk.F, itk.F].New() houghF.SetInput(edges) houghF.SetAngleResolution(100) houghF.SetNumberOfLines(2) houghF.Update() detected_lines = houghF.GetLines() # Check that we detected 2 lines as we requested. assert len(detected_lines) == 2 # Check that we can access the line by index. line1 = detected_lines[0] # Check that we can access the points of the line assert len(line1.GetPoints()) == 2
apache-2.0
SecurityFTW/cs-suite
tools/G-Scout/G-Scout-master/categories/instance_groups.py
2
1836
from googleapiclient import discovery from tinydb import TinyDB from core.utility import get_gcloud_creds db = TinyDB('entities.json') group_table = db.table('Instance Groups') template_table = db.table('Instance Templates') credentials = GoogleCredentials.get_application_default() from oauth2client.file import Storage storage = Storage('creds.data') service = discovery.build('compute', 'v1', credentials=get_gcloud_creds()) instanceGroups = service.instances() instanceTemplates = service.instanceTemplates() zones = service.zones() def insert_templates(): projectId = TinyDB('projects.json').table("Project").all() request = instanceTemplates.list(project=projectId) try: while request is not None: response = request.execute() for instanceTemplate in response['items']: template_table.insert(instanceTemplate) request = instanceTemplates.list_next(previous_request=request, previous_response=response) except KeyError: pass def insert_instance_groups(): projectId = TinyDB('projects.json').table("Project").all() for zone in get_zones(): request = instanceGroups.list(project=projectId, zone=zone) try: while request is not None: response = request.execute() for instanceGroup in response['items']: group_table.insert(instanceGroup) request = instanceGroups.list_next(previous_request=request, previous_response=response) except KeyError: pass def get_zones(): projectId = TinyDB('projects.json').table("Project").all() results = [] request = zones.list(project=projectId) response = request.execute()['items'] for result in response: results.append(result['name']) return results
gpl-3.0
nre-aachen/GeMpy
gempy/GemPy_f.py
1
13575
""" Module with classes and methods to perform implicit regional modelling based on the potential field method. Tested on Ubuntu 14 Created on 10/10 /2016 @author: Miguel de la Varga """ from __future__ import division import os from os import path import sys # This is for sphenix to find the packages sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) ) # --DEP # import theano # import theano.tensor as T import numpy as _np # --DEP-- import pandas as _pn import warnings import copy from gempy.Visualization import PlotData try: from gempy.visualization_vtk import visualize, export_vtk_rectilinear except ModuleNotFoundError: warnings.warn('Vtk package is not installed. No vtk visualization available.') from gempy.DataManagement import InputData, InterpolatorInput from IPython.core.debugger import Tracer # DEP? # def rescale_data(geo_data, rescaling_factor=None): # """ # Rescale the data of a DataManagement object between 0 and 1 due to stability problem of the float32. # Args: # geo_data: DataManagement object with the real scale data # rescaling_factor(float): factor of the rescaling. Default to maximum distance in one the axis # # Returns: # # """ # max_coord = _pn.concat( # [geo_data.foliations, geo_data.interfaces]).max()[['X', 'Y', 'Z']] # min_coord = _pn.concat( # [geo_data.foliations, geo_data.interfaces]).min()[['X', 'Y', 'Z']] # # if not rescaling_factor: # rescaling_factor = 2*_np.max(max_coord - min_coord) # # centers = (max_coord+min_coord)/2 # # new_coord_interfaces = (geo_data.interfaces[['X', 'Y', 'Z']] - # centers) / rescaling_factor + 0.5001 # # new_coord_foliations = (geo_data.foliations[['X', 'Y', 'Z']] - # centers) / rescaling_factor + 0.5001 # # new_coord_extent = (geo_data.extent - _np.repeat(centers, 2)) / rescaling_factor + 0.5001 # # geo_data_rescaled = copy.deepcopy(geo_data) # geo_data_rescaled.interfaces[['X', 'Y', 'Z']] = new_coord_interfaces # geo_data_rescaled.foliations[['X', 'Y', 'Z']] = new_coord_foliations # geo_data_rescaled.extent = new_coord_extent.as_matrix() # # geo_data_rescaled.grid.grid = (geo_data.grid.grid - centers.as_matrix()) /rescaling_factor + 0.5001 # # geo_data_rescaled.rescaling_factor = rescaling_factor # # return geo_data_rescaled # TODO needs to be updated # def compute_block_model(geo_data, series_number="all", # series_distribution=None, order_series=None, # extent=None, resolution=None, grid_type="regular_3D", # verbose=0, **kwargs): # # if extent or resolution: # set_grid(geo_data, extent=extent, resolution=resolution, grid_type=grid_type, **kwargs) # # if series_distribution: # set_data_series(geo_data, series_distribution=series_distribution, order_series=order_series, verbose=0) # # if not getattr(geo_data, 'interpolator', None): # import warnings # # warnings.warn('Using default interpolation values') # set_interpolator(geo_data) # # geo_data.interpolator.tg.final_block.set_value(_np.zeros_like(geo_data.grid.grid[:, 0])) # # geo_data.interpolator.compute_block_model(series_number=series_number, verbose=verbose) # # return geo_data.interpolator.tg.final_block def data_to_pickle(geo_data, path=False): geo_data.data_to_pickle(path) def read_pickle(path): import pickle with open(path, 'rb') as f: # The protocol version used is detected automatically, so we do not # have to specify it. data = pickle.load(f) return data def get_series(geo_gata): """ Args: geo_gata: Returns: """ return geo_gata.series def get_grid(geo_data): return geo_data.grid.grid def get_resolution(geo_data): return geo_data.resolution def get_extent(geo_data): return geo_data.extent def get_raw_data(geo_data, dtype='all'): return geo_data.get_raw_data(itype=dtype) def create_data(extent, resolution=[50, 50, 50], **kwargs): """ Method to initialize the class data. Calling this function some of the data has to be provided (TODO give to everything a default). Args: extent (list or array): [x_min, x_max, y_min, y_max, z_min, z_max]. Extent for the visualization of data and default of for the grid class. resolution (list or array): [nx, ny, nz]. Resolution for the visualization of data and default of for the grid class. **kwargs: Arbitrary keyword arguments. Keyword Args: Resolution ((Optional[list])): [nx, ny, nz]. Defaults to 50 path_i: Path to the data bases of interfaces. Default os.getcwd(), path_f: Path to the data bases of foliations. Default os.getcwd() Returns: GeMpy.DataManagement: Object that encapsulate all raw data of the project dep: self.Plot(GeMpy_core.PlotData): Object to visualize data and results """ return InputData(extent, resolution, **kwargs) def i_set_data(geo_data, dtype="foliations", action="Open"): if action == 'Close': geo_data.i_close_set_data() if action == 'Open': geo_data.i_open_set_data(itype=dtype) def select_series(geo_data, series): """ Return the formations of a given serie in string :param series: list of int or list of str :return: formations of a given serie in string separeted by | """ new_geo_data = copy.deepcopy(geo_data) if type(series) == int or type(series[0]) == int: new_geo_data.interfaces = geo_data.interfaces[geo_data.interfaces['order_series'].isin(series)] new_geo_data.foliations = geo_data.foliations[geo_data.foliations['order_series'].isin(series)] elif type(series[0]) == str: new_geo_data.interfaces = geo_data.interfaces[geo_data.interfaces['series'].isin(series)] new_geo_data.foliations = geo_data.foliations[geo_data.foliations['series'].isin(series)] return new_geo_data def set_data_series(geo_data, series_distribution=None, order_series=None, update_p_field=True, verbose=0): geo_data.set_series(series_distribution=series_distribution, order=order_series) try: if update_p_field: geo_data.interpolator.compute_potential_fields() except AttributeError: pass if verbose > 0: return get_series(geo_data) def set_interfaces(geo_data, interf_Dataframe, append=False, update_p_field=True): geo_data.set_interfaces(interf_Dataframe, append=append) # To update the interpolator parameters without calling a new object try: geo_data.interpolator._data = geo_data geo_data.interpolator._grid = geo_data.grid # geo_data.interpolator._set_constant_parameteres(geo_data, geo_data.interpolator._grid) if update_p_field: geo_data.interpolator.compute_potential_fields() except AttributeError: pass def set_foliations(geo_data, foliat_Dataframe, append=False, update_p_field=True): geo_data.set_foliations(foliat_Dataframe, append=append) # To update the interpolator parameters without calling a new object try: geo_data.interpolator._data = geo_data geo_data.interpolator._grid = geo_data.grid # geo_data.interpolator._set_constant_parameteres(geo_data, geo_data.interpolator._grid) if update_p_field: geo_data.interpolator.compute_potential_fields() except AttributeError: pass #DEP? def set_grid(geo_data, new_grid=None, extent=None, resolution=None, grid_type="regular_3D", **kwargs): """ Method to initialize the class new_grid. So far is really simple and only has the regular new_grid type Args: grid_type (str): regular_3D or regular_2D (I am not even sure if regular 2D still working) **kwargs: Arbitrary keyword arguments. Returns: self.new_grid(GeMpy_core.new_grid): Object that contain different grids """ if new_grid is not None: assert new_grid.shape[1] is 3 and len(new_grid.shape) is 2, 'The shape of new grid must be (n,3) where n is' \ 'the number of points of the grid' geo_data.grid.grid = new_grid else: if not extent: extent = geo_data.extent if not resolution: resolution = geo_data.resolution geo_data.grid = geo_data.GridClass(extent, resolution, grid_type=grid_type, **kwargs) #DEP? # def set_interpolator(geo_data, *args, **kwargs): # """ # Method to initialize the class interpolator. All the constant parameters for the interpolation can be passed # as args, otherwise they will take the default value (TODO: documentation of the dafault values) # # Args: # *args: Variable length argument list # **kwargs: Arbitrary keyword arguments. # # Keyword Args: # range_var: Range of the variogram. Default None # c_o: Covariance at 0. Default None # nugget_effect: Nugget effect of the gradients. Default 0.01 # u_grade: Grade of the polynomial used in the universal part of the Kriging. Default 2 # rescaling_factor: Magic factor that multiplies the covariances). Default 2 # # Returns: # self.Interpolator (GeMpy_core.Interpolator): Object to perform the potential field method # self.Plot(GeMpy_core.PlotData): Object to visualize data and results. It gets updated. # """ # # rescaling_factor = kwargs.get('rescaling_factor', None) # # if 'u_grade' in kwargs: # compile_theano = True # # if not getattr(geo_data, 'grid', None): # set_grid(geo_data) # # geo_data_int = rescale_data(geo_data, rescaling_factor=rescaling_factor) # # if not getattr(geo_data_int, 'interpolator', None) or compile_theano: # print('I am in the setting') # geo_data_int.interpolator = geo_data_int.InterpolatorClass(geo_data_int, geo_data_int.grid, # *args, **kwargs) # else: # geo_data_int.interpolator._data = geo_data_int # geo_data_int.interpolator._grid = geo_data_int.grid # geo_data_int.interpolator.set_theano_shared_parameteres(geo_data_int, geo_data_int.interpolator._grid, **kwargs) # # return geo_data_int def plot_data(geo_data, direction="y", series="all", **kwargs): plot = PlotData(geo_data) plot.plot_data(direction=direction, series=series, **kwargs) # TODO saving options return plot def plot_section(geo_data, block, cell_number, direction="y", **kwargs): plot = PlotData(geo_data) plot.plot_block_section(cell_number, block=block, direction=direction, **kwargs) # TODO saving options return plot def plot_potential_field(geo_data, potential_field, cell_number, n_pf=0, direction="y", plot_data=True, series="all", *args, **kwargs): plot = PlotData(geo_data) plot.plot_potential_field(potential_field, cell_number, n_pf=n_pf, direction=direction, plot_data=plot_data, series=series, *args, **kwargs) def plot_data_3D(geo_data): r, i = visualize(geo_data) del r, i return None # DEP # def compute_potential_fields(geo_data, verbose=0): # geo_data.interpolator.compute_potential_fields(verbose=verbose) def set_interpolation_data(geo_data, **kwargs): in_data = InterpolatorInput(geo_data, **kwargs) return in_data # ===================================== # Functions for the InterpolatorData # ===================================== # TODO check that is a interp_data object and if not try to create within the function one from the geo_data def get_kriging_parameters(interp_data, verbose=0): return interp_data.interpolator.get_kriging_parameters(verbose=verbose) def get_th_fn(interp_data, dtype=None, u_grade=None, **kwargs): """ Args: geo_data: **kwargs: Returns: """ # DEP? # Choosing float precision for the computation # if not dtype: # if theano.config.device == 'gpu': # dtype = 'float32' # else: # print('making float 64') # dtype = 'float64' # # # We make a rescaled version of geo_data for stability reasons # data_interp = set_interpolator(geo_data, dtype=dtype) # # # This are the shared parameters and the compilation of the function. This will be hidden as well at some point # input_data_T = data_interp.interpolator.tg.input_parameters_list() # # # This prepares the user data to the theano function # #input_data_P = data_interp.interpolator.data_prep(u_grade=u_grade) # # # then we compile we have to pass the number of formations that are faults!! # th_fn = theano.function(input_data_T, data_interp.interpolator.tg.whole_block_model(data_interp.n_faults), # on_unused_input='ignore', # allow_input_downcast=True, # profile=False) return interp_data.compile_th_fn(dtype=dtype, **kwargs) def compute_model(interp_data, u_grade=None): if getattr(interp_data, 'th_th', None): interp_data.compile_th_fn() i = interp_data.get_input_data(u_grade=u_grade) sol = interp_data.th_fn(*i) return _np.squeeze(sol)
mit
joone/chromium-crosswalk
tools/telemetry/third_party/webpagereplay/rules/log_url.py
30
2237
#!/usr/bin/env python # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import re from rules import rule class LogUrl(rule.Rule): """Logs the request URL.""" def __init__(self, url, stop=False): r"""Initializes with a url pattern. Args: url: a string regex, e.g. r'example\.com/id=(\d{6})'. stop: boolean ApplyRule should_stop value, defaults to True. """ self._url_re = re.compile(url) self._stop = stop def IsType(self, rule_type_name): """Returns True if the name matches this rule.""" return rule_type_name == 'log_url' def ApplyRule(self, return_value, request, response): """Returns True if logged. Args: return_value: the prior log_url rule's return_value (if any). request: the httparchive ArchivedHttpRequest. response: the httparchive ArchivedHttpResponse. Returns: A (should_stop, return_value) tuple, e.g. (False, True). """ del response # unused. url = '%s%s' % (request.host, request.full_path) if not self._url_re.match(url): return False, return_value logging.debug('url: %s', url) return self._stop, True def __str__(self): return _ToString(self, ('url', self._url_re.pattern), None if self._stop else ('stop', False)) def __repr__(self): return str(self) def _ToString(obj, *items): pkg = (obj.__module__[:obj.__module__.rfind('.') + 1] if '.' in obj.__module__ else '') clname = obj.__class__.__name__ args = [('%s=r\'%s\'' % item if isinstance(item[1], basestring) else '%s=%s' % item) for item in items if item] return '%s%s(%s)' % (pkg, clname, ', '.join(args))
bsd-3-clause
rdeheele/odoo
addons/l10n_pa/__init__.py
2120
1456
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com). # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
madan96/sympy
sympy/solvers/ode.py
7
330554
r""" This module contains :py:meth:`~sympy.solvers.ode.dsolve` and different helper functions that it uses. :py:meth:`~sympy.solvers.ode.dsolve` solves ordinary differential equations. See the docstring on the various functions for their uses. Note that partial differential equations support is in ``pde.py``. Note that hint functions have docstrings describing their various methods, but they are intended for internal use. Use ``dsolve(ode, func, hint=hint)`` to solve an ODE using a specific hint. See also the docstring on :py:meth:`~sympy.solvers.ode.dsolve`. **Functions in this module** These are the user functions in this module: - :py:meth:`~sympy.solvers.ode.dsolve` - Solves ODEs. - :py:meth:`~sympy.solvers.ode.classify_ode` - Classifies ODEs into possible hints for :py:meth:`~sympy.solvers.ode.dsolve`. - :py:meth:`~sympy.solvers.ode.checkodesol` - Checks if an equation is the solution to an ODE. - :py:meth:`~sympy.solvers.ode.homogeneous_order` - Returns the homogeneous order of an expression. - :py:meth:`~sympy.solvers.ode.infinitesimals` - Returns the infinitesimals of the Lie group of point transformations of an ODE, such that it is invariant. - :py:meth:`~sympy.solvers.ode_checkinfsol` - Checks if the given infinitesimals are the actual infinitesimals of a first order ODE. These are the non-solver helper functions that are for internal use. The user should use the various options to :py:meth:`~sympy.solvers.ode.dsolve` to obtain the functionality provided by these functions: - :py:meth:`~sympy.solvers.ode.odesimp` - Does all forms of ODE simplification. - :py:meth:`~sympy.solvers.ode.ode_sol_simplicity` - A key function for comparing solutions by simplicity. - :py:meth:`~sympy.solvers.ode.constantsimp` - Simplifies arbitrary constants. - :py:meth:`~sympy.solvers.ode.constant_renumber` - Renumber arbitrary constants. - :py:meth:`~sympy.solvers.ode._handle_Integral` - Evaluate unevaluated Integrals. See also the docstrings of these functions. **Currently implemented solver methods** The following methods are implemented for solving ordinary differential equations. See the docstrings of the various hint functions for more information on each (run ``help(ode)``): - 1st order separable differential equations. - 1st order differential equations whose coefficients or `dx` and `dy` are functions homogeneous of the same order. - 1st order exact differential equations. - 1st order linear differential equations. - 1st order Bernoulli differential equations. - Power series solutions for first order differential equations. - Lie Group method of solving first order differential equations. - 2nd order Liouville differential equations. - Power series solutions for second order differential equations at ordinary and regular singular points. - `n`\th order linear homogeneous differential equation with constant coefficients. - `n`\th order linear inhomogeneous differential equation with constant coefficients using the method of undetermined coefficients. - `n`\th order linear inhomogeneous differential equation with constant coefficients using the method of variation of parameters. **Philosophy behind this module** This module is designed to make it easy to add new ODE solving methods without having to mess with the solving code for other methods. The idea is that there is a :py:meth:`~sympy.solvers.ode.classify_ode` function, which takes in an ODE and tells you what hints, if any, will solve the ODE. It does this without attempting to solve the ODE, so it is fast. Each solving method is a hint, and it has its own function, named ``ode_<hint>``. That function takes in the ODE and any match expression gathered by :py:meth:`~sympy.solvers.ode.classify_ode` and returns a solved result. If this result has any integrals in it, the hint function will return an unevaluated :py:class:`~sympy.integrals.Integral` class. :py:meth:`~sympy.solvers.ode.dsolve`, which is the user wrapper function around all of this, will then call :py:meth:`~sympy.solvers.ode.odesimp` on the result, which, among other things, will attempt to solve the equation for the dependent variable (the function we are solving for), simplify the arbitrary constants in the expression, and evaluate any integrals, if the hint allows it. **How to add new solution methods** If you have an ODE that you want :py:meth:`~sympy.solvers.ode.dsolve` to be able to solve, try to avoid adding special case code here. Instead, try finding a general method that will solve your ODE, as well as others. This way, the :py:mod:`~sympy.solvers.ode` module will become more robust, and unhindered by special case hacks. WolphramAlpha and Maple's DETools[odeadvisor] function are two resources you can use to classify a specific ODE. It is also better for a method to work with an `n`\th order ODE instead of only with specific orders, if possible. To add a new method, there are a few things that you need to do. First, you need a hint name for your method. Try to name your hint so that it is unambiguous with all other methods, including ones that may not be implemented yet. If your method uses integrals, also include a ``hint_Integral`` hint. If there is more than one way to solve ODEs with your method, include a hint for each one, as well as a ``<hint>_best`` hint. Your ``ode_<hint>_best()`` function should choose the best using min with ``ode_sol_simplicity`` as the key argument. See :py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_best`, for example. The function that uses your method will be called ``ode_<hint>()``, so the hint must only use characters that are allowed in a Python function name (alphanumeric characters and the underscore '``_``' character). Include a function for every hint, except for ``_Integral`` hints (:py:meth:`~sympy.solvers.ode.dsolve` takes care of those automatically). Hint names should be all lowercase, unless a word is commonly capitalized (such as Integral or Bernoulli). If you have a hint that you do not want to run with ``all_Integral`` that doesn't have an ``_Integral`` counterpart (such as a best hint that would defeat the purpose of ``all_Integral``), you will need to remove it manually in the :py:meth:`~sympy.solvers.ode.dsolve` code. See also the :py:meth:`~sympy.solvers.ode.classify_ode` docstring for guidelines on writing a hint name. Determine *in general* how the solutions returned by your method compare with other methods that can potentially solve the same ODEs. Then, put your hints in the :py:data:`~sympy.solvers.ode.allhints` tuple in the order that they should be called. The ordering of this tuple determines which hints are default. Note that exceptions are ok, because it is easy for the user to choose individual hints with :py:meth:`~sympy.solvers.ode.dsolve`. In general, ``_Integral`` variants should go at the end of the list, and ``_best`` variants should go before the various hints they apply to. For example, the ``undetermined_coefficients`` hint comes before the ``variation_of_parameters`` hint because, even though variation of parameters is more general than undetermined coefficients, undetermined coefficients generally returns cleaner results for the ODEs that it can solve than variation of parameters does, and it does not require integration, so it is much faster. Next, you need to have a match expression or a function that matches the type of the ODE, which you should put in :py:meth:`~sympy.solvers.ode.classify_ode` (if the match function is more than just a few lines, like :py:meth:`~sympy.solvers.ode._undetermined_coefficients_match`, it should go outside of :py:meth:`~sympy.solvers.ode.classify_ode`). It should match the ODE without solving for it as much as possible, so that :py:meth:`~sympy.solvers.ode.classify_ode` remains fast and is not hindered by bugs in solving code. Be sure to consider corner cases. For example, if your solution method involves dividing by something, make sure you exclude the case where that division will be 0. In most cases, the matching of the ODE will also give you the various parts that you need to solve it. You should put that in a dictionary (``.match()`` will do this for you), and add that as ``matching_hints['hint'] = matchdict`` in the relevant part of :py:meth:`~sympy.solvers.ode.classify_ode`. :py:meth:`~sympy.solvers.ode.classify_ode` will then send this to :py:meth:`~sympy.solvers.ode.dsolve`, which will send it to your function as the ``match`` argument. Your function should be named ``ode_<hint>(eq, func, order, match)`. If you need to send more information, put it in the ``match`` dictionary. For example, if you had to substitute in a dummy variable in :py:meth:`~sympy.solvers.ode.classify_ode` to match the ODE, you will need to pass it to your function using the `match` dict to access it. You can access the independent variable using ``func.args[0]``, and the dependent variable (the function you are trying to solve for) as ``func.func``. If, while trying to solve the ODE, you find that you cannot, raise ``NotImplementedError``. :py:meth:`~sympy.solvers.ode.dsolve` will catch this error with the ``all`` meta-hint, rather than causing the whole routine to fail. Add a docstring to your function that describes the method employed. Like with anything else in SymPy, you will need to add a doctest to the docstring, in addition to real tests in ``test_ode.py``. Try to maintain consistency with the other hint functions' docstrings. Add your method to the list at the top of this docstring. Also, add your method to ``ode.rst`` in the ``docs/src`` directory, so that the Sphinx docs will pull its docstring into the main SymPy documentation. Be sure to make the Sphinx documentation by running ``make html`` from within the doc directory to verify that the docstring formats correctly. If your solution method involves integrating, use :py:meth:`Integral() <sympy.integrals.integrals.Integral>` instead of :py:meth:`~sympy.core.expr.Expr.integrate`. This allows the user to bypass hard/slow integration by using the ``_Integral`` variant of your hint. In most cases, calling :py:meth:`sympy.core.basic.Basic.doit` will integrate your solution. If this is not the case, you will need to write special code in :py:meth:`~sympy.solvers.ode._handle_Integral`. Arbitrary constants should be symbols named ``C1``, ``C2``, and so on. All solution methods should return an equality instance. If you need an arbitrary number of arbitrary constants, you can use ``constants = numbered_symbols(prefix='C', cls=Symbol, start=1)``. If it is possible to solve for the dependent function in a general way, do so. Otherwise, do as best as you can, but do not call solve in your ``ode_<hint>()`` function. :py:meth:`~sympy.solvers.ode.odesimp` will attempt to solve the solution for you, so you do not need to do that. Lastly, if your ODE has a common simplification that can be applied to your solutions, you can add a special case in :py:meth:`~sympy.solvers.ode.odesimp` for it. For example, solutions returned from the ``1st_homogeneous_coeff`` hints often have many :py:meth:`~sympy.functions.log` terms, so :py:meth:`~sympy.solvers.ode.odesimp` calls :py:meth:`~sympy.simplify.simplify.logcombine` on them (it also helps to write the arbitrary constant as ``log(C1)`` instead of ``C1`` in this case). Also consider common ways that you can rearrange your solution to have :py:meth:`~sympy.solvers.ode.constantsimp` take better advantage of it. It is better to put simplification in :py:meth:`~sympy.solvers.ode.odesimp` than in your method, because it can then be turned off with the simplify flag in :py:meth:`~sympy.solvers.ode.dsolve`. If you have any extraneous simplification in your function, be sure to only run it using ``if match.get('simplify', True):``, especially if it can be slow or if it can reduce the domain of the solution. Finally, as with every contribution to SymPy, your method will need to be tested. Add a test for each method in ``test_ode.py``. Follow the conventions there, i.e., test the solver using ``dsolve(eq, f(x), hint=your_hint)``, and also test the solution using :py:meth:`~sympy.solvers.ode.checkodesol` (you can put these in a separate tests and skip/XFAIL if it runs too slow/doesn't work). Be sure to call your hint specifically in :py:meth:`~sympy.solvers.ode.dsolve`, that way the test won't be broken simply by the introduction of another matching hint. If your method works for higher order (>1) ODEs, you will need to run ``sol = constant_renumber(sol, 'C', 1, order)`` for each solution, where ``order`` is the order of the ODE. This is because ``constant_renumber`` renumbers the arbitrary constants by printing order, which is platform dependent. Try to test every corner case of your solver, including a range of orders if it is a `n`\th order solver, but if your solver is slow, such as if it involves hard integration, try to keep the test run time down. Feel free to refactor existing hints to avoid duplicating code or creating inconsistencies. If you can show that your method exactly duplicates an existing method, including in the simplicity and speed of obtaining the solutions, then you can remove the old, less general method. The existing code is tested extensively in ``test_ode.py``, so if anything is broken, one of those tests will surely fail. """ from __future__ import print_function, division from collections import defaultdict from itertools import islice from sympy.core import Add, S, Mul, Pow, oo from sympy.core.compatibility import ordered, iterable, is_sequence, range from sympy.core.containers import Tuple from sympy.core.exprtools import factor_terms from sympy.core.expr import AtomicExpr, Expr from sympy.core.function import (Function, Derivative, AppliedUndef, diff, expand, expand_mul, Subs, _mexpand) from sympy.core.multidimensional import vectorize from sympy.core.numbers import NaN, zoo, I, Number from sympy.core.relational import Equality, Eq from sympy.core.symbol import Symbol, Wild, Dummy, symbols from sympy.core.sympify import sympify from sympy.logic.boolalg import BooleanAtom from sympy.functions import cos, exp, im, log, re, sin, tan, sqrt, \ atan2, conjugate from sympy.functions.combinatorial.factorials import factorial from sympy.integrals.integrals import Integral, integrate from sympy.matrices import wronskian, Matrix, eye, zeros from sympy.polys import (Poly, RootOf, rootof, terms_gcd, PolynomialError, lcm) from sympy.polys.polyroots import roots_quartic from sympy.polys.polytools import cancel, degree, div from sympy.series import Order from sympy.series.series import series from sympy.simplify import collect, logcombine, powsimp, separatevars, \ simplify, trigsimp, denom, posify, cse from sympy.simplify.powsimp import powdenest from sympy.simplify.radsimp import collect_const from sympy.solvers import solve from sympy.solvers.pde import pdsolve from sympy.utilities import numbered_symbols, default_sort_key, sift from sympy.solvers.deutils import _preprocess, ode_order, _desolve #: This is a list of hints in the order that they should be preferred by #: :py:meth:`~sympy.solvers.ode.classify_ode`. In general, hints earlier in the #: list should produce simpler solutions than those later in the list (for #: ODEs that fit both). For now, the order of this list is based on empirical #: observations by the developers of SymPy. #: #: The hint used by :py:meth:`~sympy.solvers.ode.dsolve` for a specific ODE #: can be overridden (see the docstring). #: #: In general, ``_Integral`` hints are grouped at the end of the list, unless #: there is a method that returns an unevaluable integral most of the time #: (which go near the end of the list anyway). ``default``, ``all``, #: ``best``, and ``all_Integral`` meta-hints should not be included in this #: list, but ``_best`` and ``_Integral`` hints should be included. allhints = ( "separable", "1st_exact", "1st_linear", "Bernoulli", "Riccati_special_minus2", "1st_homogeneous_coeff_best", "1st_homogeneous_coeff_subs_indep_div_dep", "1st_homogeneous_coeff_subs_dep_div_indep", "almost_linear", "linear_coefficients", "separable_reduced", "1st_power_series", "lie_group", "nth_linear_constant_coeff_homogeneous", "nth_linear_euler_eq_homogeneous", "nth_linear_constant_coeff_undetermined_coefficients", "nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients", "nth_linear_constant_coeff_variation_of_parameters", "nth_linear_euler_eq_nonhomogeneous_variation_of_parameters", "Liouville", "2nd_power_series_ordinary", "2nd_power_series_regular", "separable_Integral", "1st_exact_Integral", "1st_linear_Integral", "Bernoulli_Integral", "1st_homogeneous_coeff_subs_indep_div_dep_Integral", "1st_homogeneous_coeff_subs_dep_div_indep_Integral", "almost_linear_Integral", "linear_coefficients_Integral", "separable_reduced_Integral", "nth_linear_constant_coeff_variation_of_parameters_Integral", "nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral", "Liouville_Integral", ) lie_heuristics = ( "abaco1_simple", "abaco1_product", "abaco2_similar", "abaco2_unique_unknown", "abaco2_unique_general", "linear", "function_sum", "bivariate", "chi" ) def sub_func_doit(eq, func, new): r""" When replacing the func with something else, we usually want the derivative evaluated, so this function helps in making that happen. To keep subs from having to look through all derivatives, we mask them off with dummy variables, do the func sub, and then replace masked-off derivatives with their doit values. Examples ======== >>> from sympy import Derivative, symbols, Function >>> from sympy.solvers.ode import sub_func_doit >>> x, z = symbols('x, z') >>> y = Function('y') >>> sub_func_doit(3*Derivative(y(x), x) - 1, y(x), x) 2 >>> sub_func_doit(x*Derivative(y(x), x) - y(x)**2 + y(x), y(x), ... 1/(x*(z + 1/x))) x*(-1/(x**2*(z + 1/x)) + 1/(x**3*(z + 1/x)**2)) + 1/(x*(z + 1/x)) ...- 1/(x**2*(z + 1/x)**2) """ reps = {} repu = {} for d in eq.atoms(Derivative): u = Dummy('u') repu[u] = d.subs(func, new).doit() reps[d] = u return eq.subs(reps).subs(func, new).subs(repu) def get_numbered_constants(eq, num=1, start=1, prefix='C'): """ Returns a list of constants that do not occur in eq already. """ if isinstance(eq, Expr): eq = [eq] elif not iterable(eq): raise ValueError("Expected Expr or iterable but got %s" % eq) atom_set = set().union(*[i.free_symbols for i in eq]) ncs = numbered_symbols(start=start, prefix=prefix, exclude=atom_set) Cs = [next(ncs) for i in range(num)] return (Cs[0] if num == 1 else tuple(Cs)) def dsolve(eq, func=None, hint="default", simplify=True, ics= None, xi=None, eta=None, x0=0, n=6, **kwargs): r""" Solves any (supported) kind of ordinary differential equation and system of ordinary differential equations. For single ordinary differential equation ========================================= It is classified under this when number of equation in ``eq`` is one. **Usage** ``dsolve(eq, f(x), hint)`` -> Solve ordinary differential equation ``eq`` for function ``f(x)``, using method ``hint``. **Details** ``eq`` can be any supported ordinary differential equation (see the :py:mod:`~sympy.solvers.ode` docstring for supported methods). This can either be an :py:class:`~sympy.core.relational.Equality`, or an expression, which is assumed to be equal to ``0``. ``f(x)`` is a function of one variable whose derivatives in that variable make up the ordinary differential equation ``eq``. In many cases it is not necessary to provide this; it will be autodetected (and an error raised if it couldn't be detected). ``hint`` is the solving method that you want dsolve to use. Use ``classify_ode(eq, f(x))`` to get all of the possible hints for an ODE. The default hint, ``default``, will use whatever hint is returned first by :py:meth:`~sympy.solvers.ode.classify_ode`. See Hints below for more options that you can use for hint. ``simplify`` enables simplification by :py:meth:`~sympy.solvers.ode.odesimp`. See its docstring for more information. Turn this off, for example, to disable solving of solutions for ``func`` or simplification of arbitrary constants. It will still integrate with this hint. Note that the solution may contain more arbitrary constants than the order of the ODE with this option enabled. ``xi`` and ``eta`` are the infinitesimal functions of an ordinary differential equation. They are the infinitesimals of the Lie group of point transformations for which the differential equation is invariant. The user can specify values for the infinitesimals. If nothing is specified, ``xi`` and ``eta`` are calculated using :py:meth:`~sympy.solvers.ode.infinitesimals` with the help of various heuristics. ``ics`` is the set of boundary conditions for the differential equation. It should be given in the form of ``{f(x0): x1, f(x).diff(x).subs(x, x2): x3}`` and so on. For now initial conditions are implemented only for power series solutions of first-order differential equations which should be given in the form of ``{f(x0): x1}`` (See issue 4720). If nothing is specified for this case ``f(0)`` is assumed to be ``C0`` and the power series solution is calculated about 0. ``x0`` is the point about which the power series solution of a differential equation is to be evaluated. ``n`` gives the exponent of the dependent variable up to which the power series solution of a differential equation is to be evaluated. **Hints** Aside from the various solving methods, there are also some meta-hints that you can pass to :py:meth:`~sympy.solvers.ode.dsolve`: ``default``: This uses whatever hint is returned first by :py:meth:`~sympy.solvers.ode.classify_ode`. This is the default argument to :py:meth:`~sympy.solvers.ode.dsolve`. ``all``: To make :py:meth:`~sympy.solvers.ode.dsolve` apply all relevant classification hints, use ``dsolve(ODE, func, hint="all")``. This will return a dictionary of ``hint:solution`` terms. If a hint causes dsolve to raise the ``NotImplementedError``, value of that hint's key will be the exception object raised. The dictionary will also include some special keys: - ``order``: The order of the ODE. See also :py:meth:`~sympy.solvers.deutils.ode_order` in ``deutils.py``. - ``best``: The simplest hint; what would be returned by ``best`` below. - ``best_hint``: The hint that would produce the solution given by ``best``. If more than one hint produces the best solution, the first one in the tuple returned by :py:meth:`~sympy.solvers.ode.classify_ode` is chosen. - ``default``: The solution that would be returned by default. This is the one produced by the hint that appears first in the tuple returned by :py:meth:`~sympy.solvers.ode.classify_ode`. ``all_Integral``: This is the same as ``all``, except if a hint also has a corresponding ``_Integral`` hint, it only returns the ``_Integral`` hint. This is useful if ``all`` causes :py:meth:`~sympy.solvers.ode.dsolve` to hang because of a difficult or impossible integral. This meta-hint will also be much faster than ``all``, because :py:meth:`~sympy.core.expr.Expr.integrate` is an expensive routine. ``best``: To have :py:meth:`~sympy.solvers.ode.dsolve` try all methods and return the simplest one. This takes into account whether the solution is solvable in the function, whether it contains any Integral classes (i.e. unevaluatable integrals), and which one is the shortest in size. See also the :py:meth:`~sympy.solvers.ode.classify_ode` docstring for more info on hints, and the :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints. **Tips** - You can declare the derivative of an unknown function this way: >>> from sympy import Function, Derivative >>> from sympy.abc import x # x is the independent variable >>> f = Function("f")(x) # f is a function of x >>> # f_ will be the derivative of f with respect to x >>> f_ = Derivative(f, x) - See ``test_ode.py`` for many tests, which serves also as a set of examples for how to use :py:meth:`~sympy.solvers.ode.dsolve`. - :py:meth:`~sympy.solvers.ode.dsolve` always returns an :py:class:`~sympy.core.relational.Equality` class (except for the case when the hint is ``all`` or ``all_Integral``). If possible, it solves the solution explicitly for the function being solved for. Otherwise, it returns an implicit solution. - Arbitrary constants are symbols named ``C1``, ``C2``, and so on. - Because all solutions should be mathematically equivalent, some hints may return the exact same result for an ODE. Often, though, two different hints will return the same solution formatted differently. The two should be equivalent. Also note that sometimes the values of the arbitrary constants in two different solutions may not be the same, because one constant may have "absorbed" other constants into it. - Do ``help(ode.ode_<hintname>)`` to get help more information on a specific hint, where ``<hintname>`` is the name of a hint without ``_Integral``. For system of ordinary differential equations ============================================= **Usage** ``dsolve(eq, func)`` -> Solve a system of ordinary differential equations ``eq`` for ``func`` being list of functions including `x(t)`, `y(t)`, `z(t)` where number of functions in the list depends upon the number of equations provided in ``eq``. **Details** ``eq`` can be any supported system of ordinary differential equations This can either be an :py:class:`~sympy.core.relational.Equality`, or an expression, which is assumed to be equal to ``0``. ``func`` holds ``x(t)`` and ``y(t)`` being functions of one variable which together with some of their derivatives make up the system of ordinary differential equation ``eq``. It is not necessary to provide this; it will be autodetected (and an error raised if it couldn't be detected). **Hints** The hints are formed by parameters returned by classify_sysode, combining them give hints name used later for forming method name. Examples ======== >>> from sympy import Function, dsolve, Eq, Derivative, sin, cos, symbols >>> from sympy.abc import x >>> f = Function('f') >>> dsolve(Derivative(f(x), x, x) + 9*f(x), f(x)) Eq(f(x), C1*sin(3*x) + C2*cos(3*x)) >>> eq = sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x) >>> dsolve(eq, hint='1st_exact') [Eq(f(x), -acos(C1/cos(x)) + 2*pi), Eq(f(x), acos(C1/cos(x)))] >>> dsolve(eq, hint='almost_linear') [Eq(f(x), -acos(C1/sqrt(-cos(x)**2)) + 2*pi), Eq(f(x), acos(C1/sqrt(-cos(x)**2)))] >>> t = symbols('t') >>> x, y = symbols('x, y', function=True) >>> eq = (Eq(Derivative(x(t),t), 12*t*x(t) + 8*y(t)), Eq(Derivative(y(t),t), 21*x(t) + 7*t*y(t))) >>> dsolve(eq) [Eq(x(t), C1*x0 + C2*x0*Integral(8*exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0**2, t)), Eq(y(t), C1*y0 + C2(y0*Integral(8*exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0**2, t) + exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0))] >>> eq = (Eq(Derivative(x(t),t),x(t)*y(t)*sin(t)), Eq(Derivative(y(t),t),y(t)**2*sin(t))) >>> dsolve(eq) {Eq(x(t), -exp(C1)/(C2*exp(C1) - cos(t))), Eq(y(t), -1/(C1 - cos(t)))} """ if iterable(eq): match = classify_sysode(eq, func) eq = match['eq'] order = match['order'] func = match['func'] t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] # keep highest order term coefficient positive for i in range(len(eq)): for func_ in func: if isinstance(func_, list): pass else: if eq[i].coeff(diff(func[i],t,ode_order(eq[i], func[i]))).is_negative: eq[i] = -eq[i] match['eq'] = eq if len(set(order.values()))!=1: raise ValueError("It solves only those systems of equations whose orders are equal") match['order'] = list(order.values())[0] def recur_len(l): return sum(recur_len(item) if isinstance(item,list) else 1 for item in l) if recur_len(func) != len(eq): raise ValueError("dsolve() and classify_sysode() work with " "number of functions being equal to number of equations") if match['type_of_equation'] is None: raise NotImplementedError else: if match['is_linear'] == True: if match['no_of_equation'] > 3: solvefunc = globals()['sysode_linear_neq_order%(order)s' % match] else: solvefunc = globals()['sysode_linear_%(no_of_equation)seq_order%(order)s' % match] else: solvefunc = globals()['sysode_nonlinear_%(no_of_equation)seq_order%(order)s' % match] sols = solvefunc(match) return sols else: given_hint = hint # hint given by the user # See the docstring of _desolve for more details. hints = _desolve(eq, func=func, hint=hint, simplify=True, xi=xi, eta=eta, type='ode', ics=ics, x0=x0, n=n, **kwargs) eq = hints.pop('eq', eq) all_ = hints.pop('all', False) if all_: retdict = {} failed_hints = {} gethints = classify_ode(eq, dict=True) orderedhints = gethints['ordered_hints'] for hint in hints: try: rv = _helper_simplify(eq, hint, hints[hint], simplify) except NotImplementedError as detail: failed_hints[hint] = detail else: retdict[hint] = rv func = hints[hint]['func'] retdict['best'] = min(list(retdict.values()), key=lambda x: ode_sol_simplicity(x, func, trysolving=not simplify)) if given_hint == 'best': return retdict['best'] for i in orderedhints: if retdict['best'] == retdict.get(i, None): retdict['best_hint'] = i break retdict['default'] = gethints['default'] retdict['order'] = gethints['order'] retdict.update(failed_hints) return retdict else: # The key 'hint' stores the hint needed to be solved for. hint = hints['hint'] return _helper_simplify(eq, hint, hints, simplify) def _helper_simplify(eq, hint, match, simplify=True, **kwargs): r""" Helper function of dsolve that calls the respective :py:mod:`~sympy.solvers.ode` functions to solve for the ordinary differential equations. This minimises the computation in calling :py:meth:`~sympy.solvers.deutils._desolve` multiple times. """ r = match if hint.endswith('_Integral'): solvefunc = globals()['ode_' + hint[:-len('_Integral')]] else: solvefunc = globals()['ode_' + hint] func = r['func'] order = r['order'] match = r[hint] if simplify: # odesimp() will attempt to integrate, if necessary, apply constantsimp(), # attempt to solve for func, and apply any other hint specific # simplifications sols = solvefunc(eq, func, order, match) free = eq.free_symbols cons = lambda s: s.free_symbols.difference(free) if isinstance(sols, Expr): return odesimp(sols, func, order, cons(sols), hint) return [odesimp(s, func, order, cons(s), hint) for s in sols] else: # We still want to integrate (you can disable it separately with the hint) match['simplify'] = False # Some hints can take advantage of this option rv = _handle_Integral(solvefunc(eq, func, order, match), func, order, hint) return rv def classify_ode(eq, func=None, dict=False, ics=None, **kwargs): r""" Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve` classifications for an ODE. The tuple is ordered so that first item is the classification that :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In general, classifications at the near the beginning of the list will produce better solutions faster than those near the end, thought there are always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a different classification, use ``dsolve(ODE, func, hint=<classification>)``. See also the :py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints you can use. If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will return a dictionary of ``hint:match`` expression terms. This is intended for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that because dictionaries are ordered arbitrarily, this will most likely not be in the same order as the tuple. You can get help on different hints by executing ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint without ``_Integral``. See :py:data:`~sympy.solvers.ode.allhints` or the :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`. Notes ===== These are remarks on hint names. ``_Integral`` If a classification has ``_Integral`` at the end, it will return the expression with an unevaluated :py:class:`~sympy.integrals.Integral` class in it. Note that a hint may do this anyway if :py:meth:`~sympy.core.expr.Expr.integrate` cannot do the integral, though just using an ``_Integral`` will do so much faster. Indeed, an ``_Integral`` hint will always be faster than its corresponding hint without ``_Integral`` because :py:meth:`~sympy.core.expr.Expr.integrate` is an expensive routine. If :py:meth:`~sympy.solvers.ode.dsolve` hangs, it is probably because :py:meth:`~sympy.core.expr.Expr.integrate` is hanging on a tough or impossible integral. Try using an ``_Integral`` hint or ``all_Integral`` to get it return something. Note that some hints do not have ``_Integral`` counterparts. This is because :py:meth:`~sympy.solvers.ode.integrate` is not used in solving the ODE for those method. For example, `n`\th order linear homogeneous ODEs with constant coefficients do not require integration to solve, so there is no ``nth_linear_homogeneous_constant_coeff_Integrate`` hint. You can easily evaluate any unevaluated :py:class:`~sympy.integrals.Integral`\s in an expression by doing ``expr.doit()``. Ordinals Some hints contain an ordinal such as ``1st_linear``. This is to help differentiate them from other hints, as well as from other methods that may not be implemented yet. If a hint has ``nth`` in it, such as the ``nth_linear`` hints, this means that the method used to applies to ODEs of any order. ``indep`` and ``dep`` Some hints contain the words ``indep`` or ``dep``. These reference the independent variable and the dependent function, respectively. For example, if an ODE is in terms of `f(x)`, then ``indep`` will refer to `x` and ``dep`` will refer to `f`. ``subs`` If a hints has the word ``subs`` in it, it means the the ODE is solved by substituting the expression given after the word ``subs`` for a single dummy variable. This is usually in terms of ``indep`` and ``dep`` as above. The substituted expression will be written only in characters allowed for names of Python objects, meaning operators will be spelled out. For example, ``indep``/``dep`` will be written as ``indep_div_dep``. ``coeff`` The word ``coeff`` in a hint refers to the coefficients of something in the ODE, usually of the derivative terms. See the docstring for the individual methods for more info (``help(ode)``). This is contrast to ``coefficients``, as in ``undetermined_coefficients``, which refers to the common name of a method. ``_best`` Methods that have more than one fundamental way to solve will have a hint for each sub-method and a ``_best`` meta-classification. This will evaluate all hints and return the best, using the same considerations as the normal ``best`` meta-hint. Examples ======== >>> from sympy import Function, classify_ode, Eq >>> from sympy.abc import x >>> f = Function('f') >>> classify_ode(Eq(f(x).diff(x), 0), f(x)) ('separable', '1st_linear', '1st_homogeneous_coeff_best', '1st_homogeneous_coeff_subs_indep_div_dep', '1st_homogeneous_coeff_subs_dep_div_indep', '1st_power_series', 'lie_group', 'nth_linear_constant_coeff_homogeneous', 'separable_Integral', '1st_linear_Integral', '1st_homogeneous_coeff_subs_indep_div_dep_Integral', '1st_homogeneous_coeff_subs_dep_div_indep_Integral') >>> classify_ode(f(x).diff(x, 2) + 3*f(x).diff(x) + 2*f(x) - 4) ('nth_linear_constant_coeff_undetermined_coefficients', 'nth_linear_constant_coeff_variation_of_parameters', 'nth_linear_constant_coeff_variation_of_parameters_Integral') """ prep = kwargs.pop('prep', True) if func and len(func.args) != 1: raise ValueError("dsolve() and classify_ode() only " "work with functions of one variable, not %s" % func) if prep or func is None: eq, func_ = _preprocess(eq, func) if func is None: func = func_ x = func.args[0] f = func.func y = Dummy('y') xi = kwargs.get('xi') eta = kwargs.get('eta') terms = kwargs.get('n') if isinstance(eq, Equality): if eq.rhs != 0: return classify_ode(eq.lhs - eq.rhs, func, ics=ics, xi=xi, n=terms, eta=eta, prep=False) eq = eq.lhs order = ode_order(eq, f(x)) # hint:matchdict or hint:(tuple of matchdicts) # Also will contain "default":<default hint> and "order":order items. matching_hints = {"order": order} if not order: if dict: matching_hints["default"] = None return matching_hints else: return () df = f(x).diff(x) a = Wild('a', exclude=[f(x)]) b = Wild('b', exclude=[f(x)]) c = Wild('c', exclude=[f(x)]) d = Wild('d', exclude=[df, f(x).diff(x, 2)]) e = Wild('e', exclude=[df]) k = Wild('k', exclude=[df]) n = Wild('n', exclude=[f(x)]) c1 = Wild('c1', exclude=[x]) a2 = Wild('a2', exclude=[x, f(x), df]) b2 = Wild('b2', exclude=[x, f(x), df]) c2 = Wild('c2', exclude=[x, f(x), df]) d2 = Wild('d2', exclude=[x, f(x), df]) a3 = Wild('a3', exclude=[f(x), df, f(x).diff(x, 2)]) b3 = Wild('b3', exclude=[f(x), df, f(x).diff(x, 2)]) c3 = Wild('c3', exclude=[f(x), df, f(x).diff(x, 2)]) r3 = {'xi': xi, 'eta': eta} # Used for the lie_group hint boundary = {} # Used to extract initial conditions C1 = Symbol("C1") eq = expand(eq) # Preprocessing to get the initial conditions out if ics is not None: for funcarg in ics: # Separating derivatives if isinstance(funcarg, Subs): deriv = funcarg.expr old = funcarg.variables[0] new = funcarg.point[0] if isinstance(deriv, Derivative) and isinstance(deriv.args[0], AppliedUndef) and deriv.args[0].func == f and old == x and not new.has(x): dorder = ode_order(deriv, x) temp = 'f' + str(dorder) boundary.update({temp: new, temp + 'val': ics[funcarg]}) else: raise ValueError("Enter valid boundary conditions for Derivatives") # Separating functions elif isinstance(funcarg, AppliedUndef): if funcarg.func == f and len(funcarg.args) == 1 and \ not funcarg.args[0].has(x): boundary.update({'f0': funcarg.args[0], 'f0val': ics[funcarg]}) else: raise ValueError("Enter valid boundary conditions for Function") else: raise ValueError("Enter boundary conditions of the form ics " " = {f(point}: value, f(point).diff(point, order).subs(arg, point) " ":value") # Precondition to try remove f(x) from highest order derivative reduced_eq = None if eq.is_Add: deriv_coef = eq.coeff(f(x).diff(x, order)) if deriv_coef not in (1, 0): r = deriv_coef.match(a*f(x)**c1) if r and r[c1]: den = f(x)**r[c1] reduced_eq = Add(*[arg/den for arg in eq.args]) if not reduced_eq: reduced_eq = eq if order == 1: ## Linear case: a(x)*y'+b(x)*y+c(x) == 0 if eq.is_Add: ind, dep = reduced_eq.as_independent(f) else: u = Dummy('u') ind, dep = (reduced_eq + u).as_independent(f) ind, dep = [tmp.subs(u, 0) for tmp in [ind, dep]] r = {a: dep.coeff(df), b: dep.coeff(f(x)), c: ind} # double check f[a] since the preconditioning may have failed if not r[a].has(f) and not r[b].has(f) and ( r[a]*df + r[b]*f(x) + r[c]).expand() - reduced_eq == 0: r['a'] = a r['b'] = b r['c'] = c matching_hints["1st_linear"] = r matching_hints["1st_linear_Integral"] = r ## Bernoulli case: a(x)*y'+b(x)*y+c(x)*y**n == 0 r = collect( reduced_eq, f(x), exact=True).match(a*df + b*f(x) + c*f(x)**n) if r and r[c] != 0 and r[n] != 1: # See issue 4676 r['a'] = a r['b'] = b r['c'] = c r['n'] = n matching_hints["Bernoulli"] = r matching_hints["Bernoulli_Integral"] = r ## Riccati special n == -2 case: a2*y'+b2*y**2+c2*y/x+d2/x**2 == 0 r = collect(reduced_eq, f(x), exact=True).match(a2*df + b2*f(x)**2 + c2*f(x)/x + d2/x**2) if r and r[b2] != 0 and (r[c2] != 0 or r[d2] != 0): r['a2'] = a2 r['b2'] = b2 r['c2'] = c2 r['d2'] = d2 matching_hints["Riccati_special_minus2"] = r # NON-REDUCED FORM OF EQUATION matches r = collect(eq, df, exact=True).match(d + e * df) if r: r['d'] = d r['e'] = e r['y'] = y r[d] = r[d].subs(f(x), y) r[e] = r[e].subs(f(x), y) # FIRST ORDER POWER SERIES WHICH NEEDS INITIAL CONDITIONS # TODO: Hint first order series should match only if d/e is analytic. # For now, only d/e and (d/e).diff(arg) is checked for existence at # at a given point. # This is currently done internally in ode_1st_power_series. point = boundary.get('f0', 0) value = boundary.get('f0val', C1) check = cancel(r[d]/r[e]) check1 = check.subs({x: point, y: value}) if not check1.has(oo) and not check1.has(zoo) and \ not check1.has(NaN) and not check1.has(-oo): check2 = (check1.diff(x)).subs({x: point, y: value}) if not check2.has(oo) and not check2.has(zoo) and \ not check2.has(NaN) and not check2.has(-oo): rseries = r.copy() rseries.update({'terms': terms, 'f0': point, 'f0val': value}) matching_hints["1st_power_series"] = rseries r3.update(r) ## Exact Differential Equation: P(x, y) + Q(x, y)*y' = 0 where # dP/dy == dQ/dx try: if r[d] != 0: numerator = simplify(r[d].diff(y) - r[e].diff(x)) # The following few conditions try to convert a non-exact # differential equation into an exact one. # References : Differential equations with applications # and historical notes - George E. Simmons if numerator: # If (dP/dy - dQ/dx) / Q = f(x) # then exp(integral(f(x))*equation becomes exact factor = simplify(numerator/r[e]) variables = factor.free_symbols if len(variables) == 1 and x == variables.pop(): factor = exp(Integral(factor).doit()) r[d] *= factor r[e] *= factor matching_hints["1st_exact"] = r matching_hints["1st_exact_Integral"] = r else: # If (dP/dy - dQ/dx) / -P = f(y) # then exp(integral(f(y))*equation becomes exact factor = simplify(-numerator/r[d]) variables = factor.free_symbols if len(variables) == 1 and y == variables.pop(): factor = exp(Integral(factor).doit()) r[d] *= factor r[e] *= factor matching_hints["1st_exact"] = r matching_hints["1st_exact_Integral"] = r else: matching_hints["1st_exact"] = r matching_hints["1st_exact_Integral"] = r except NotImplementedError: # Differentiating the coefficients might fail because of things # like f(2*x).diff(x). See issue 4624 and issue 4719. pass # Any first order ODE can be ideally solved by the Lie Group # method matching_hints["lie_group"] = r3 # This match is used for several cases below; we now collect on # f(x) so the matching works. r = collect(reduced_eq, df, exact=True).match(d + e*df) if r: # Using r[d] and r[e] without any modification for hints # linear-coefficients and separable-reduced. num, den = r[d], r[e] # ODE = d/e + df r['d'] = d r['e'] = e r['y'] = y r[d] = num.subs(f(x), y) r[e] = den.subs(f(x), y) ## Separable Case: y' == P(y)*Q(x) r[d] = separatevars(r[d]) r[e] = separatevars(r[e]) # m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y' m1 = separatevars(r[d], dict=True, symbols=(x, y)) m2 = separatevars(r[e], dict=True, symbols=(x, y)) if m1 and m2: r1 = {'m1': m1, 'm2': m2, 'y': y} matching_hints["separable"] = r1 matching_hints["separable_Integral"] = r1 ## First order equation with homogeneous coefficients: # dy/dx == F(y/x) or dy/dx == F(x/y) ordera = homogeneous_order(r[d], x, y) if ordera is not None: orderb = homogeneous_order(r[e], x, y) if ordera == orderb: # u1=y/x and u2=x/y u1 = Dummy('u1') u2 = Dummy('u2') s = "1st_homogeneous_coeff_subs" s1 = s + "_dep_div_indep" s2 = s + "_indep_div_dep" if simplify((r[d] + u1*r[e]).subs({x: 1, y: u1})) != 0: matching_hints[s1] = r matching_hints[s1 + "_Integral"] = r if simplify((r[e] + u2*r[d]).subs({x: u2, y: 1})) != 0: matching_hints[s2] = r matching_hints[s2 + "_Integral"] = r if s1 in matching_hints and s2 in matching_hints: matching_hints["1st_homogeneous_coeff_best"] = r ## Linear coefficients of the form # y'+ F((a*x + b*y + c)/(a'*x + b'y + c')) = 0 # that can be reduced to homogeneous form. F = num/den params = _linear_coeff_match(F, func) if params: xarg, yarg = params u = Dummy('u') t = Dummy('t') # Dummy substitution for df and f(x). dummy_eq = reduced_eq.subs(((df, t), (f(x), u))) reps = ((x, x + xarg), (u, u + yarg), (t, df), (u, f(x))) dummy_eq = simplify(dummy_eq.subs(reps)) # get the re-cast values for e and d r2 = collect(expand(dummy_eq), [df, f(x)]).match(e*df + d) if r2: orderd = homogeneous_order(r2[d], x, f(x)) if orderd is not None: ordere = homogeneous_order(r2[e], x, f(x)) if orderd == ordere: # Match arguments are passed in such a way that it # is coherent with the already existing homogeneous # functions. r2[d] = r2[d].subs(f(x), y) r2[e] = r2[e].subs(f(x), y) r2.update({'xarg': xarg, 'yarg': yarg, 'd': d, 'e': e, 'y': y}) matching_hints["linear_coefficients"] = r2 matching_hints["linear_coefficients_Integral"] = r2 ## Equation of the form y' + (y/x)*H(x^n*y) = 0 # that can be reduced to separable form factor = simplify(x/f(x)*num/den) # Try representing factor in terms of x^n*y # where n is lowest power of x in factor; # first remove terms like sqrt(2)*3 from factor.atoms(Mul) u = None for mul in ordered(factor.atoms(Mul)): if mul.has(x): _, u = mul.as_independent(x, f(x)) break if u and u.has(f(x)): h = x**(degree(Poly(u.subs(f(x), y), gen=x)))*f(x) p = Wild('p') if (u/h == 1) or ((u/h).simplify().match(x**p)): t = Dummy('t') r2 = {'t': t} xpart, ypart = u.as_independent(f(x)) test = factor.subs(((u, t), (1/u, 1/t))) free = test.free_symbols if len(free) == 1 and free.pop() == t: r2.update({'power': xpart.as_base_exp()[1], 'u': test}) matching_hints["separable_reduced"] = r2 matching_hints["separable_reduced_Integral"] = r2 ## Almost-linear equation of the form f(x)*g(y)*y' + k(x)*l(y) + m(x) = 0 r = collect(eq, [df, f(x)]).match(e*df + d) if r: r2 = r.copy() r2[c] = S.Zero if r2[d].is_Add: # Separate the terms having f(x) to r[d] and # remaining to r[c] no_f, r2[d] = r2[d].as_independent(f(x)) r2[c] += no_f factor = simplify(r2[d].diff(f(x))/r[e]) if factor and not factor.has(f(x)): r2[d] = factor_terms(r2[d]) u = r2[d].as_independent(f(x), as_Add=False)[1] r2.update({'a': e, 'b': d, 'c': c, 'u': u}) r2[d] /= u r2[e] /= u.diff(f(x)) matching_hints["almost_linear"] = r2 matching_hints["almost_linear_Integral"] = r2 elif order == 2: # Liouville ODE in the form # f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x) # See Goldstein and Braun, "Advanced Methods for the Solution of # Differential Equations", pg. 98 s = d*f(x).diff(x, 2) + e*df**2 + k*df r = reduced_eq.match(s) if r and r[d] != 0: y = Dummy('y') g = simplify(r[e]/r[d]).subs(f(x), y) h = simplify(r[k]/r[d]) if h.has(f(x)) or g.has(x): pass else: r = {'g': g, 'h': h, 'y': y} matching_hints["Liouville"] = r matching_hints["Liouville_Integral"] = r # Homogeneous second order differential equation of the form # a3*f(x).diff(x, 2) + b3*f(x).diff(x) + c3, where # for simplicity, a3, b3 and c3 are assumed to be polynomials. # It has a definite power series solution at point x0 if, b3/a3 and c3/a3 # are analytic at x0. deq = a3*(f(x).diff(x, 2)) + b3*df + c3*f(x) r = collect(reduced_eq, [f(x).diff(x, 2), f(x).diff(x), f(x)]).match(deq) ordinary = False if r and r[a3] != 0: if all([r[key].is_polynomial() for key in r]): p = cancel(r[b3]/r[a3]) # Used below q = cancel(r[c3]/r[a3]) # Used below point = kwargs.get('x0', 0) check = p.subs(x, point) if not check.has(oo) and not check.has(NaN) and \ not check.has(zoo) and not check.has(-oo): check = q.subs(x, point) if not check.has(oo) and not check.has(NaN) and \ not check.has(zoo) and not check.has(-oo): ordinary = True r.update({'a3': a3, 'b3': b3, 'c3': c3, 'x0': point, 'terms': terms}) matching_hints["2nd_power_series_ordinary"] = r # Checking if the differential equation has a regular singular point # at x0. It has a regular singular point at x0, if (b3/a3)*(x - x0) # and (c3/a3)*((x - x0)**2) are analytic at x0. if not ordinary: p = cancel((x - point)*p) check = p.subs(x, point) if not check.has(oo) and not check.has(NaN) and \ not check.has(zoo) and not check.has(-oo): q = cancel(((x - point)**2)*q) check = q.subs(x, point) if not check.has(oo) and not check.has(NaN) and \ not check.has(zoo) and not check.has(-oo): coeff_dict = {'p': p, 'q': q, 'x0': point, 'terms': terms} matching_hints["2nd_power_series_regular"] = coeff_dict if order > 0: # nth order linear ODE # a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b r = _nth_linear_match(reduced_eq, func, order) # Constant coefficient case (a_i is constant for all i) if r and not any(r[i].has(x) for i in r if i >= 0): # Inhomogeneous case: F(x) is not identically 0 if r[-1]: undetcoeff = _undetermined_coefficients_match(r[-1], x) s = "nth_linear_constant_coeff_variation_of_parameters" matching_hints[s] = r matching_hints[s + "_Integral"] = r if undetcoeff['test']: r['trialset'] = undetcoeff['trialset'] matching_hints[ "nth_linear_constant_coeff_undetermined_coefficients" ] = r # Homogeneous case: F(x) is identically 0 else: matching_hints["nth_linear_constant_coeff_homogeneous"] = r # nth order Euler equation a_n*x**n*y^(n) + ... + a_1*x*y' + a_0*y = F(x) #In case of Homogeneous euler equation F(x) = 0 def _test_term(coeff, order): r""" Linear Euler ODEs have the form K*x**order*diff(y(x),x,order) = F(x), where K is independent of x and y(x), order>= 0. So we need to check that for each term, coeff == K*x**order from some K. We have a few cases, since coeff may have several different types. """ if order < 0: raise ValueError("order should be greater than 0") if coeff == 0: return True if order == 0: if x in coeff.free_symbols: return False return True if coeff.is_Mul: if coeff.has(f(x)): return False return x**order in coeff.args elif coeff.is_Pow: return coeff.as_base_exp() == (x, order) elif order == 1: return x == coeff return False if r and not any(not _test_term(r[i], i) for i in r if i >= 0): if not r[-1]: matching_hints["nth_linear_euler_eq_homogeneous"] = r else: matching_hints["nth_linear_euler_eq_nonhomogeneous_variation_of_parameters"] = r matching_hints["nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral"] = r e, re = posify(r[-1].subs(x, exp(x))) undetcoeff = _undetermined_coefficients_match(e.subs(re), x) if undetcoeff['test']: r['trialset'] = undetcoeff['trialset'] matching_hints["nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients"] = r # Order keys based on allhints. retlist = [i for i in allhints if i in matching_hints] if dict: # Dictionaries are ordered arbitrarily, so make note of which # hint would come first for dsolve(). Use an ordered dict in Py 3. matching_hints["default"] = retlist[0] if retlist else None matching_hints["ordered_hints"] = tuple(retlist) return matching_hints else: return tuple(retlist) def classify_sysode(eq, funcs=None, **kwargs): r""" Returns a dictionary of parameter names and values that define the system of ordinary differential equations in ``eq``. The parameters are further used in :py:meth:`~sympy.solvers.ode.dsolve` for solving that system. The parameter names and values are: 'is_linear' (boolean), which tells whether the given system is linear. Note that "linear" here refers to the operator: terms such as ``x*diff(x,t)`` are nonlinear, whereas terms like ``sin(t)*diff(x,t)`` are still linear operators. 'func' (list) contains the :py:class:`~sympy.core.function.Function`s that appear with a derivative in the ODE, i.e. those that we are trying to solve the ODE for. 'order' (dict) with the maximum derivative for each element of the 'func' parameter. 'func_coeff' (dict) with the coefficient for each triple ``(equation number, function, order)```. The coefficients are those subexpressions that do not appear in 'func', and hence can be considered constant for purposes of ODE solving. 'eq' (list) with the equations from ``eq``, sympified and transformed into expressions (we are solving for these expressions to be zero). 'no_of_equations' (int) is the number of equations (same as ``len(eq)``). 'type_of_equation' (string) is an internal classification of the type of ODE. References ========== -http://eqworld.ipmnet.ru/en/solutions/sysode/sode-toc1.htm -A. D. Polyanin and A. V. Manzhirov, Handbook of Mathematics for Engineers and Scientists Examples ======== >>> from sympy import Function, Eq, symbols, diff >>> from sympy.solvers.ode import classify_sysode >>> from sympy.abc import t >>> f, x, y = symbols('f, x, y', function=True) >>> k, l, m, n = symbols('k, l, m, n', Integer=True) >>> x1 = diff(x(t), t) ; y1 = diff(y(t), t) >>> x2 = diff(x(t), t, t) ; y2 = diff(y(t), t, t) >>> eq = (Eq(5*x1, 12*x(t) - 6*y(t)), Eq(2*y1, 11*x(t) + 3*y(t))) >>> classify_sysode(eq) {'eq': [-12*x(t) + 6*y(t) + 5*Derivative(x(t), t), -11*x(t) - 3*y(t) + 2*Derivative(y(t), t)], 'func': [x(t), y(t)], 'func_coeff': {(0, x(t), 0): -12, (0, x(t), 1): 5, (0, y(t), 0): 6, (0, y(t), 1): 0, (1, x(t), 0): -11, (1, x(t), 1): 0, (1, y(t), 0): -3, (1, y(t), 1): 2}, 'is_linear': True, 'no_of_equation': 2, 'order': {x(t): 1, y(t): 1}, 'type_of_equation': 'type1'} >>> eq = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t)), Eq(diff(y(t),t), -t**2*x(t) + 5*t*y(t))) >>> classify_sysode(eq) {'eq': [-t**2*y(t) - 5*t*x(t) + Derivative(x(t), t), t**2*x(t) - 5*t*y(t) + Derivative(y(t), t)], 'func': [x(t), y(t)], 'func_coeff': {(0, x(t), 0): -5*t, (0, x(t), 1): 1, (0, y(t), 0): -t**2, (0, y(t), 1): 0, (1, x(t), 0): t**2, (1, x(t), 1): 0, (1, y(t), 0): -5*t, (1, y(t), 1): 1}, 'is_linear': True, 'no_of_equation': 2, 'order': {x(t): 1, y(t): 1}, 'type_of_equation': 'type4'} """ # Sympify equations and convert iterables of equations into # a list of equations def _sympify(eq): return list(map(sympify, eq if iterable(eq) else [eq])) eq, funcs = (_sympify(w) for w in [eq, funcs]) for i, fi in enumerate(eq): if isinstance(fi, Equality): eq[i] = fi.lhs - fi.rhs matching_hints = {"no_of_equation":i+1} matching_hints['eq'] = eq if i==0: raise ValueError("classify_sysode() works for systems of ODEs. " "For scalar ODEs, classify_ode should be used") t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] # find all the functions if not given order = dict() if funcs==[None]: funcs = [] for eqs in eq: derivs = eqs.atoms(Derivative) func = set().union(*[d.atoms(AppliedUndef) for d in derivs]) for func_ in func: funcs.append(func_) funcs = list(set(funcs)) if len(funcs) < len(eq): raise ValueError("Number of functions given is less than number of equations %s" % funcs) func_dict = dict() for func in funcs: if not order.get(func, False): max_order = 0 for i, eqs_ in enumerate(eq): order_ = ode_order(eqs_,func) if max_order < order_: max_order = order_ eq_no = i if eq_no in func_dict: list_func = [] list_func.append(func_dict[eq_no]) list_func.append(func) func_dict[eq_no] = list_func else: func_dict[eq_no] = func order[func] = max_order funcs = [func_dict[i] for i in range(len(func_dict))] matching_hints['func'] = funcs for func in funcs: if isinstance(func, list): for func_elem in func: if len(func_elem.args) != 1: raise ValueError("dsolve() and classify_sysode() work with " "functions of one variable only, not %s" % func) else: if func and len(func.args) != 1: raise ValueError("dsolve() and classify_sysode() work with " "functions of one variable only, not %s" % func) # find the order of all equation in system of odes matching_hints["order"] = order # find coefficients of terms f(t), diff(f(t),t) and higher derivatives # and similarly for other functions g(t), diff(g(t),t) in all equations. # Here j denotes the equation number, funcs[l] denotes the function about # which we are talking about and k denotes the order of function funcs[l] # whose coefficient we are calculating. def linearity_check(eqs, j, func, is_linear_): for k in range(order[func]+1): func_coef[j,func,k] = collect(eqs.expand(),[diff(func,t,k)]).coeff(diff(func,t,k)) if is_linear_ == True: if func_coef[j,func,k]==0: if k==0: coef = eqs.as_independent(func)[1] for xr in range(1, ode_order(eqs,func)+1): coef -= eqs.as_independent(diff(func,t,xr))[1] if coef != 0: is_linear_ = False else: if eqs.as_independent(diff(func,t,k))[1]: is_linear_ = False else: for func_ in funcs: if isinstance(func_, list): for elem_func_ in func_: dep = func_coef[j,func,k].as_independent(elem_func_)[1] if dep!=1 and dep!=0: is_linear_ = False else: dep = func_coef[j,func,k].as_independent(func_)[1] if dep!=1 and dep!=0: is_linear_ = False return is_linear_ func_coef = {} is_linear = True for j, eqs in enumerate(eq): for func in funcs: if isinstance(func, list): for func_elem in func: is_linear = linearity_check(eqs, j, func_elem, is_linear) else: is_linear = linearity_check(eqs, j, func, is_linear) matching_hints['func_coeff'] = func_coef matching_hints['is_linear'] = is_linear if len(set(order.values()))==1: order_eq = list(matching_hints['order'].values())[0] if matching_hints['is_linear'] == True: if matching_hints['no_of_equation'] == 2: if order_eq == 1: type_of_equation = check_linear_2eq_order1(eq, funcs, func_coef) elif order_eq == 2: type_of_equation = check_linear_2eq_order2(eq, funcs, func_coef) else: type_of_equation = None elif matching_hints['no_of_equation'] == 3: if order_eq == 1: type_of_equation = check_linear_3eq_order1(eq, funcs, func_coef) if type_of_equation==None: type_of_equation = check_linear_neq_order1(eq, funcs, func_coef) else: type_of_equation = None else: if order_eq == 1: type_of_equation = check_linear_neq_order1(eq, funcs, func_coef) else: type_of_equation = None else: if matching_hints['no_of_equation'] == 2: if order_eq == 1: type_of_equation = check_nonlinear_2eq_order1(eq, funcs, func_coef) else: type_of_equation = None elif matching_hints['no_of_equation'] == 3: if order_eq == 1: type_of_equation = check_nonlinear_3eq_order1(eq, funcs, func_coef) else: type_of_equation = None else: type_of_equation = None else: type_of_equation = None matching_hints['type_of_equation'] = type_of_equation return matching_hints def check_linear_2eq_order1(eq, func, func_coef): x = func[0].func y = func[1].func fc = func_coef t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] r = dict() # for equations Eq(a1*diff(x(t),t), b1*x(t) + c1*y(t) + d1) # and Eq(a2*diff(y(t),t), b2*x(t) + c2*y(t) + d2) r['a1'] = fc[0,x(t),1] ; r['a2'] = fc[1,y(t),1] r['b1'] = -fc[0,x(t),0]/fc[0,x(t),1] ; r['b2'] = -fc[1,x(t),0]/fc[1,y(t),1] r['c1'] = -fc[0,y(t),0]/fc[0,x(t),1] ; r['c2'] = -fc[1,y(t),0]/fc[1,y(t),1] forcing = [S(0),S(0)] for i in range(2): for j in Add.make_args(eq[i]): if not j.has(x(t), y(t)): forcing[i] += j if not (forcing[0].has(t) or forcing[1].has(t)): # We can handle homogeneous case and simple constant forcings r['d1'] = forcing[0] r['d2'] = forcing[1] else: # Issue #9244: nonhomogeneous linear systems are not supported return None # Conditions to check for type 6 whose equations are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and # Eq(diff(y(t),t), a*[f(t) + a*h(t)]x(t) + a*[g(t) - h(t)]*y(t)) p = 0 q = 0 p1 = cancel(r['b2']/(cancel(r['b2']/r['c2']).as_numer_denom()[0])) p2 = cancel(r['b1']/(cancel(r['b1']/r['c1']).as_numer_denom()[0])) for n, i in enumerate([p1, p2]): for j in Mul.make_args(collect_const(i)): if not j.has(t): q = j if q and n==0: if ((r['b2']/j - r['b1'])/(r['c1'] - r['c2']/j)) == j: p = 1 elif q and n==1: if ((r['b1']/j - r['b2'])/(r['c2'] - r['c1']/j)) == j: p = 2 # End of condition for type 6 if r['d1']!=0 or r['d2']!=0: if not r['d1'].has(t) and not r['d2'].has(t): if all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2'.split()): # Equations for type 2 are Eq(a1*diff(x(t),t),b1*x(t)+c1*y(t)+d1) and Eq(a2*diff(y(t),t),b2*x(t)+c2*y(t)+d2) return "type2" else: return None else: if all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2'.split()): # Equations for type 1 are Eq(a1*diff(x(t),t),b1*x(t)+c1*y(t)) and Eq(a2*diff(y(t),t),b2*x(t)+c2*y(t)) return "type1" else: r['b1'] = r['b1']/r['a1'] ; r['b2'] = r['b2']/r['a2'] r['c1'] = r['c1']/r['a1'] ; r['c2'] = r['c2']/r['a2'] if (r['b1'] == r['c2']) and (r['c1'] == r['b2']): # Equation for type 3 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), g(t)*x(t) + f(t)*y(t)) return "type3" elif (r['b1'] == r['c2']) and (r['c1'] == -r['b2']) or (r['b1'] == -r['c2']) and (r['c1'] == r['b2']): # Equation for type 4 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), -g(t)*x(t) + f(t)*y(t)) return "type4" elif (not cancel(r['b2']/r['c1']).has(t) and not cancel((r['c2']-r['b1'])/r['c1']).has(t)) \ or (not cancel(r['b1']/r['c2']).has(t) and not cancel((r['c1']-r['b2'])/r['c2']).has(t)): # Equations for type 5 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), a*g(t)*x(t) + [f(t) + b*g(t)]*y(t) return "type5" elif p: return "type6" else: # Equations for type 7 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), h(t)*x(t) + p(t)*y(t)) return "type7" def check_linear_2eq_order2(eq, func, func_coef): x = func[0].func y = func[1].func fc = func_coef t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] r = dict() a = Wild('a', exclude=[1/t]) b = Wild('b', exclude=[1/t**2]) u = Wild('u', exclude=[t, t**2]) v = Wild('v', exclude=[t, t**2]) w = Wild('w', exclude=[t, t**2]) p = Wild('p', exclude=[t, t**2]) r['a1'] = fc[0,x(t),2] ; r['a2'] = fc[1,y(t),2] r['b1'] = fc[0,x(t),1] ; r['b2'] = fc[1,x(t),1] r['c1'] = fc[0,y(t),1] ; r['c2'] = fc[1,y(t),1] r['d1'] = fc[0,x(t),0] ; r['d2'] = fc[1,x(t),0] r['e1'] = fc[0,y(t),0] ; r['e2'] = fc[1,y(t),0] const = [S(0), S(0)] for i in range(2): for j in Add.make_args(eq[i]): if not (j.has(x(t)) or j.has(y(t))): const[i] += j r['f1'] = const[0] r['f2'] = const[1] if r['f1']!=0 or r['f2']!=0: if all(not r[k].has(t) for k in 'a1 a2 d1 d2 e1 e2 f1 f2'.split()) \ and r['b1']==r['c1']==r['b2']==r['c2']==0: return "type2" elif all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2 d1 d2 e1 e1'.split()): p = [S(0), S(0)] ; q = [S(0), S(0)] for n, e in enumerate([r['f1'], r['f2']]): if e.has(t): tpart = e.as_independent(t, Mul)[1] for i in Mul.make_args(tpart): if i.has(exp): b, e = i.as_base_exp() co = e.coeff(t) if co and not co.has(t) and co.has(I): p[n] = 1 else: q[n] = 1 else: q[n] = 1 else: q[n] = 1 if p[0]==1 and p[1]==1 and q[0]==0 and q[1]==0: return "type4" else: return None else: return None else: if r['b1']==r['b2']==r['c1']==r['c2']==0 and all(not r[k].has(t) \ for k in 'a1 a2 d1 d2 e1 e2'.split()): return "type1" elif r['b1']==r['e1']==r['c2']==r['d2']==0 and all(not r[k].has(t) \ for k in 'a1 a2 b2 c1 d1 e2'.split()) and r['c1'] == -r['b2'] and \ r['d1'] == r['e2']: return "type3" elif cancel(-r['b2']/r['d2'])==t and cancel(-r['c1']/r['e1'])==t and not \ (r['d2']/r['a2']).has(t) and not (r['e1']/r['a1']).has(t) and \ r['b1']==r['d1']==r['c2']==r['e2']==0: return "type5" elif ((r['a1']/r['d1']).expand()).match((p*(u*t**2+v*t+w)**2).expand()) and not \ (cancel(r['a1']*r['d2']/(r['a2']*r['d1']))).has(t) and not (r['d1']/r['e1']).has(t) and not \ (r['d2']/r['e2']).has(t) and r['b1'] == r['b2'] == r['c1'] == r['c2'] == 0: return "type10" elif not cancel(r['d1']/r['e1']).has(t) and not cancel(r['d2']/r['e2']).has(t) and not \ cancel(r['d1']*r['a2']/(r['d2']*r['a1'])).has(t) and r['b1']==r['b2']==r['c1']==r['c2']==0: return "type6" elif not cancel(r['b1']/r['c1']).has(t) and not cancel(r['b2']/r['c2']).has(t) and not \ cancel(r['b1']*r['a2']/(r['b2']*r['a1'])).has(t) and r['d1']==r['d2']==r['e1']==r['e2']==0: return "type7" elif cancel(-r['b2']/r['d2'])==t and cancel(-r['c1']/r['e1'])==t and not \ cancel(r['e1']*r['a2']/(r['d2']*r['a1'])).has(t) and r['e1'].has(t) \ and r['b1']==r['d1']==r['c2']==r['e2']==0: return "type8" elif (r['b1']/r['a1']).match(a/t) and (r['b2']/r['a2']).match(a/t) and not \ (r['b1']/r['c1']).has(t) and not (r['b2']/r['c2']).has(t) and \ (r['d1']/r['a1']).match(b/t**2) and (r['d2']/r['a2']).match(b/t**2) \ and not (r['d1']/r['e1']).has(t) and not (r['d2']/r['e2']).has(t): return "type9" elif -r['b1']/r['d1']==-r['c1']/r['e1']==-r['b2']/r['d2']==-r['c2']/r['e2']==t: return "type11" else: return None def check_linear_3eq_order1(eq, func, func_coef): x = func[0].func y = func[1].func z = func[2].func fc = func_coef t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] r = dict() r['a1'] = fc[0,x(t),1]; r['a2'] = fc[1,y(t),1]; r['a3'] = fc[2,z(t),1] r['b1'] = fc[0,x(t),0]; r['b2'] = fc[1,x(t),0]; r['b3'] = fc[2,x(t),0] r['c1'] = fc[0,y(t),0]; r['c2'] = fc[1,y(t),0]; r['c3'] = fc[2,y(t),0] r['d1'] = fc[0,z(t),0]; r['d2'] = fc[1,z(t),0]; r['d3'] = fc[2,z(t),0] forcing = [S(0), S(0), S(0)] for i in range(3): for j in Add.make_args(eq[i]): if not j.has(x(t), y(t), z(t)): forcing[i] += j if forcing[0].has(t) or forcing[1].has(t) or forcing[2].has(t): # We can handle homogeneous case and simple constant forcings. # Issue #9244: nonhomogeneous linear systems are not supported return None if all(not r[k].has(t) for k in 'a1 a2 a3 b1 b2 b3 c1 c2 c3 d1 d2 d3'.split()): if r['c1']==r['d1']==r['d2']==0: return 'type1' elif r['c1'] == -r['b2'] and r['d1'] == -r['b3'] and r['d2'] == -r['c3'] \ and r['b1'] == r['c2'] == r['d3'] == 0: return 'type2' elif r['b1'] == r['c2'] == r['d3'] == 0 and r['c1']/r['a1'] == -r['d1']/r['a1'] \ and r['d2']/r['a2'] == -r['b2']/r['a2'] and r['b3']/r['a3'] == -r['c3']/r['a3']: return 'type3' else: return None else: for k1 in 'c1 d1 b2 d2 b3 c3'.split(): if r[k1] == 0: continue else: if all(not cancel(r[k1]/r[k]).has(t) for k in 'd1 b2 d2 b3 c3'.split() if r[k]!=0) \ and all(not cancel(r[k1]/(r['b1'] - r[k])).has(t) for k in 'b1 c2 d3'.split() if r['b1']!=r[k]): return 'type4' else: break return None def check_linear_neq_order1(eq, func, func_coef): x = func[0].func y = func[1].func z = func[2].func fc = func_coef t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] r = dict() n = len(eq) for i in range(n): for j in range(n): if (fc[i,func[j],0]/fc[i,func[i],1]).has(t): return None if len(eq)==3: return 'type6' return 'type1' def check_nonlinear_2eq_order1(eq, func, func_coef): t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] f = Wild('f') g = Wild('g') u, v = symbols('u, v', cls=Dummy) def check_type(x, y): r1 = eq[0].match(t*diff(x(t),t) - x(t) + f) r2 = eq[1].match(t*diff(y(t),t) - y(t) + g) if not (r1 and r2): r1 = eq[0].match(diff(x(t),t) - x(t)/t + f/t) r2 = eq[1].match(diff(y(t),t) - y(t)/t + g/t) if not (r1 and r2): r1 = (-eq[0]).match(t*diff(x(t),t) - x(t) + f) r2 = (-eq[1]).match(t*diff(y(t),t) - y(t) + g) if not (r1 and r2): r1 = (-eq[0]).match(diff(x(t),t) - x(t)/t + f/t) r2 = (-eq[1]).match(diff(y(t),t) - y(t)/t + g/t) if r1 and r2 and not (r1[f].subs(diff(x(t),t),u).subs(diff(y(t),t),v).has(t) \ or r2[g].subs(diff(x(t),t),u).subs(diff(y(t),t),v).has(t)): return 'type5' else: return None for func_ in func: if isinstance(func_, list): x = func[0][0].func y = func[0][1].func eq_type = check_type(x, y) if not eq_type: eq_type = check_type(y, x) return eq_type x = func[0].func y = func[1].func fc = func_coef n = Wild('n', exclude=[x(t),y(t)]) f1 = Wild('f1', exclude=[v,t]) f2 = Wild('f2', exclude=[v,t]) g1 = Wild('g1', exclude=[u,t]) g2 = Wild('g2', exclude=[u,t]) for i in range(2): eqs = 0 for terms in Add.make_args(eq[i]): eqs += terms/fc[i,func[i],1] eq[i] = eqs r = eq[0].match(diff(x(t),t) - x(t)**n*f) if r: g = (diff(y(t),t) - eq[1])/r[f] if r and not (g.has(x(t)) or g.subs(y(t),v).has(t) or r[f].subs(x(t),u).subs(y(t),v).has(t)): return 'type1' r = eq[0].match(diff(x(t),t) - exp(n*x(t))*f) if r: g = (diff(y(t),t) - eq[1])/r[f] if r and not (g.has(x(t)) or g.subs(y(t),v).has(t) or r[f].subs(x(t),u).subs(y(t),v).has(t)): return 'type2' g = Wild('g') r1 = eq[0].match(diff(x(t),t) - f) r2 = eq[1].match(diff(y(t),t) - g) if r1 and r2 and not (r1[f].subs(x(t),u).subs(y(t),v).has(t) or \ r2[g].subs(x(t),u).subs(y(t),v).has(t)): return 'type3' r1 = eq[0].match(diff(x(t),t) - f) r2 = eq[1].match(diff(y(t),t) - g) num, den = ( (r1[f].subs(x(t),u).subs(y(t),v))/ (r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom() R1 = num.match(f1*g1) R2 = den.match(f2*g2) phi = (r1[f].subs(x(t),u).subs(y(t),v))/num if R1 and R2: return 'type4' return None def check_nonlinear_2eq_order2(eq, func, func_coef): return None def check_nonlinear_3eq_order1(eq, func, func_coef): x = func[0].func y = func[1].func z = func[2].func fc = func_coef t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] u, v, w = symbols('u, v, w', cls=Dummy) a = Wild('a', exclude=[x(t), y(t), z(t), t]) b = Wild('b', exclude=[x(t), y(t), z(t), t]) c = Wild('c', exclude=[x(t), y(t), z(t), t]) f = Wild('f') F1 = Wild('F1') F2 = Wild('F2') F3 = Wild('F3') for i in range(3): eqs = 0 for terms in Add.make_args(eq[i]): eqs += terms/fc[i,func[i],1] eq[i] = eqs r1 = eq[0].match(diff(x(t),t) - a*y(t)*z(t)) r2 = eq[1].match(diff(y(t),t) - b*z(t)*x(t)) r3 = eq[2].match(diff(z(t),t) - c*x(t)*y(t)) if r1 and r2 and r3: num1, den1 = r1[a].as_numer_denom() num2, den2 = r2[b].as_numer_denom() num3, den3 = r3[c].as_numer_denom() if solve([num1*u-den1*(v-w), num2*v-den2*(w-u), num3*w-den3*(u-v)],[u, v]): return 'type1' r = eq[0].match(diff(x(t),t) - y(t)*z(t)*f) if r: r1 = collect_const(r[f]).match(a*f) r2 = ((diff(y(t),t) - eq[1])/r1[f]).match(b*z(t)*x(t)) r3 = ((diff(z(t),t) - eq[2])/r1[f]).match(c*x(t)*y(t)) if r1 and r2 and r3: num1, den1 = r1[a].as_numer_denom() num2, den2 = r2[b].as_numer_denom() num3, den3 = r3[c].as_numer_denom() if solve([num1*u-den1*(v-w), num2*v-den2*(w-u), num3*w-den3*(u-v)],[u, v]): return 'type2' r = eq[0].match(diff(x(t),t) - (F2-F3)) if r: r1 = collect_const(r[F2]).match(c*F2) r1.update(collect_const(r[F3]).match(b*F3)) if r1: if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]): r1[F2], r1[F3] = r1[F3], r1[F2] r1[c], r1[b] = -r1[b], -r1[c] r2 = eq[1].match(diff(y(t),t) - a*r1[F3] + r1[c]*F1) if r2: r3 = (eq[2] == diff(z(t),t) - r1[b]*r2[F1] + r2[a]*r1[F2]) if r1 and r2 and r3: return 'type3' r = eq[0].match(diff(x(t),t) - z(t)*F2 + y(t)*F3) if r: r1 = collect_const(r[F2]).match(c*F2) r1.update(collect_const(r[F3]).match(b*F3)) if r1: if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]): r1[F2], r1[F3] = r1[F3], r1[F2] r1[c], r1[b] = -r1[b], -r1[c] r2 = (diff(y(t),t) - eq[1]).match(a*x(t)*r1[F3] - r1[c]*z(t)*F1) if r2: r3 = (diff(z(t),t) - eq[2] == r1[b]*y(t)*r2[F1] - r2[a]*x(t)*r1[F2]) if r1 and r2 and r3: return 'type4' r = (diff(x(t),t) - eq[0]).match(x(t)*(F2 - F3)) if r: r1 = collect_const(r[F2]).match(c*F2) r1.update(collect_const(r[F3]).match(b*F3)) if r1: if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]): r1[F2], r1[F3] = r1[F3], r1[F2] r1[c], r1[b] = -r1[b], -r1[c] r2 = (diff(y(t),t) - eq[1]).match(y(t)*(a*r1[F3] - r1[c]*F1)) if r2: r3 = (diff(z(t),t) - eq[2] == z(t)*(r1[b]*r2[F1] - r2[a]*r1[F2])) if r1 and r2 and r3: return 'type5' return None def check_nonlinear_3eq_order2(eq, func, func_coef): return None def checksysodesol(eqs, sols, func=None): r""" Substitutes corresponding ``sols`` for each functions into each ``eqs`` and checks that the result of substitutions for each equation is ``0``. The equations and solutions passed can be any iterable. This only works when each ``sols`` have one function only, like `x(t)` or `y(t)`. For each function, ``sols`` can have a single solution or a list of solutions. In most cases it will not be necessary to explicitly identify the function, but if the function cannot be inferred from the original equation it can be supplied through the ``func`` argument. When a sequence of equations is passed, the same sequence is used to return the result for each equation with each function substitued with corresponding solutions. It tries the following method to find zero equivalence for each equation: Substitute the solutions for functions, like `x(t)` and `y(t)` into the original equations containing those functions. This function returns a tuple. The first item in the tuple is ``True`` if the substitution results for each equation is ``0``, and ``False`` otherwise. The second item in the tuple is what the substitution results in. Each element of the ``list`` should always be ``0`` corresponding to each equation if the first item is ``True``. Note that sometimes this function may return ``False``, but with an expression that is identically equal to ``0``, instead of returning ``True``. This is because :py:meth:`~sympy.simplify.simplify.simplify` cannot reduce the expression to ``0``. If an expression returned by each function vanishes identically, then ``sols`` really is a solution to ``eqs``. If this function seems to hang, it is probably because of a difficult simplification. Examples ======== >>> from sympy import Eq, diff, symbols, sin, cos, exp, sqrt, S >>> from sympy.solvers.ode import checksysodesol >>> C1, C2 = symbols('C1:3') >>> t = symbols('t') >>> x, y = symbols('x, y', function=True) >>> eq = (Eq(diff(x(t),t), x(t) + y(t) + 17), Eq(diff(y(t),t), -2*x(t) + y(t) + 12)) >>> sol = [Eq(x(t), (C1*sin(sqrt(2)*t) + C2*cos(sqrt(2)*t))*exp(t) - S(5)/3), ... Eq(y(t), (sqrt(2)*C1*cos(sqrt(2)*t) - sqrt(2)*C2*sin(sqrt(2)*t))*exp(t) - S(46)/3)] >>> checksysodesol(eq, sol) (True, [0, 0]) >>> eq = (Eq(diff(x(t),t),x(t)*y(t)**4), Eq(diff(y(t),t),y(t)**3)) >>> sol = [Eq(x(t), C1*exp(-1/(4*(C2 + t)))), Eq(y(t), -sqrt(2)*sqrt(-1/(C2 + t))/2), ... Eq(x(t), C1*exp(-1/(4*(C2 + t)))), Eq(y(t), sqrt(2)*sqrt(-1/(C2 + t))/2)] >>> checksysodesol(eq, sol) (True, [0, 0]) """ def _sympify(eq): return list(map(sympify, eq if iterable(eq) else [eq])) eqs = _sympify(eqs) for i in range(len(eqs)): if isinstance(eqs[i], Equality): eqs[i] = eqs[i].lhs - eqs[i].rhs if func is None: funcs = [] for eq in eqs: derivs = eq.atoms(Derivative) func = set().union(*[d.atoms(AppliedUndef) for d in derivs]) for func_ in func: funcs.append(func_) funcs = list(set(funcs)) if not all(isinstance(func, AppliedUndef) and len(func.args) == 1 for func in funcs)\ and len({func.args for func in funcs})!=1: raise ValueError("func must be a function of one variable, not %s" % func) for sol in sols: if len(sol.atoms(AppliedUndef)) != 1: raise ValueError("solutions should have one function only") if len(funcs) != len({sol.lhs for sol in sols}): raise ValueError("number of solutions provided does not match the number of equations") t = funcs[0].args[0] dictsol = dict() for sol in sols: func = list(sol.atoms(AppliedUndef))[0] if sol.rhs == func: sol = sol.reversed solved = sol.lhs == func and not sol.rhs.has(func) if not solved: rhs = solve(sol, func) if not rhs: raise NotImplementedError else: rhs = sol.rhs dictsol[func] = rhs checkeq = [] for eq in eqs: for func in funcs: eq = sub_func_doit(eq, func, dictsol[func]) ss = simplify(eq) if ss != 0: eq = ss.expand(force=True) else: eq = 0 checkeq.append(eq) if len(set(checkeq)) == 1 and list(set(checkeq))[0] == 0: return (True, checkeq) else: return (False, checkeq) @vectorize(0) def odesimp(eq, func, order, constants, hint): r""" Simplifies ODEs, including trying to solve for ``func`` and running :py:meth:`~sympy.solvers.ode.constantsimp`. It may use knowledge of the type of solution that the hint returns to apply additional simplifications. It also attempts to integrate any :py:class:`~sympy.integrals.Integral`\s in the expression, if the hint is not an ``_Integral`` hint. This function should have no effect on expressions returned by :py:meth:`~sympy.solvers.ode.dsolve`, as :py:meth:`~sympy.solvers.ode.dsolve` already calls :py:meth:`~sympy.solvers.ode.odesimp`, but the individual hint functions do not call :py:meth:`~sympy.solvers.ode.odesimp` (because the :py:meth:`~sympy.solvers.ode.dsolve` wrapper does). Therefore, this function is designed for mainly internal use. Examples ======== >>> from sympy import sin, symbols, dsolve, pprint, Function >>> from sympy.solvers.ode import odesimp >>> x , u2, C1= symbols('x,u2,C1') >>> f = Function('f') >>> eq = dsolve(x*f(x).diff(x) - f(x) - x*sin(f(x)/x), f(x), ... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral', ... simplify=False) >>> pprint(eq, wrap_line=False) x ---- f(x) / | | / 1 \ | -|u2 + -------| | | /1 \| | | sin|--|| | \ \u2// log(f(x)) = log(C1) + | ---------------- d(u2) | 2 | u2 | / >>> pprint(odesimp(eq, f(x), 1, {C1}, ... hint='1st_homogeneous_coeff_subs_indep_div_dep' ... )) #doctest: +SKIP x --------- = C1 /f(x)\ tan|----| \2*x / """ x = func.args[0] f = func.func C1 = get_numbered_constants(eq, num=1) # First, integrate if the hint allows it. eq = _handle_Integral(eq, func, order, hint) if hint.startswith("nth_linear_euler_eq_nonhomogeneous"): eq = simplify(eq) if not isinstance(eq, Equality): raise TypeError("eq should be an instance of Equality") # Second, clean up the arbitrary constants. # Right now, nth linear hints can put as many as 2*order constants in an # expression. If that number grows with another hint, the third argument # here should be raised accordingly, or constantsimp() rewritten to handle # an arbitrary number of constants. eq = constantsimp(eq, constants) # Lastly, now that we have cleaned up the expression, try solving for func. # When CRootOf is implemented in solve(), we will want to return a CRootOf # everytime instead of an Equality. # Get the f(x) on the left if possible. if eq.rhs == func and not eq.lhs.has(func): eq = [Eq(eq.rhs, eq.lhs)] # make sure we are working with lists of solutions in simplified form. if eq.lhs == func and not eq.rhs.has(func): # The solution is already solved eq = [eq] # special simplification of the rhs if hint.startswith("nth_linear_constant_coeff"): # Collect terms to make the solution look nice. # This is also necessary for constantsimp to remove unnecessary # terms from the particular solution from variation of parameters # # Collect is not behaving reliably here. The results for # some linear constant-coefficient equations with repeated # roots do not properly simplify all constants sometimes. # 'collectterms' gives different orders sometimes, and results # differ in collect based on that order. The # sort-reverse trick fixes things, but may fail in the # future. In addition, collect is splitting exponentials with # rational powers for no reason. We have to do a match # to fix this using Wilds. global collectterms try: collectterms.sort(key=default_sort_key) collectterms.reverse() except Exception: pass assert len(eq) == 1 and eq[0].lhs == f(x) sol = eq[0].rhs sol = expand_mul(sol) for i, reroot, imroot in collectterms: sol = collect(sol, x**i*exp(reroot*x)*sin(abs(imroot)*x)) sol = collect(sol, x**i*exp(reroot*x)*cos(imroot*x)) for i, reroot, imroot in collectterms: sol = collect(sol, x**i*exp(reroot*x)) del collectterms # Collect is splitting exponentials with rational powers for # no reason. We call powsimp to fix. sol = powsimp(sol) eq[0] = Eq(f(x), sol) else: # The solution is not solved, so try to solve it try: floats = any(i.is_Float for i in eq.atoms(Number)) eqsol = solve(eq, func, force=True, rational=False if floats else None) if not eqsol: raise NotImplementedError except (NotImplementedError, PolynomialError): eq = [eq] else: def _expand(expr): numer, denom = expr.as_numer_denom() if denom.is_Add: return expr else: return powsimp(expr.expand(), combine='exp', deep=True) # XXX: the rest of odesimp() expects each ``t`` to be in a # specific normal form: rational expression with numerator # expanded, but with combined exponential functions (at # least in this setup all tests pass). eq = [Eq(f(x), _expand(t)) for t in eqsol] # special simplification of the lhs. if hint.startswith("1st_homogeneous_coeff"): for j, eqi in enumerate(eq): newi = logcombine(eqi, force=True) if newi.lhs.func is log and newi.rhs == 0: newi = Eq(newi.lhs.args[0]/C1, C1) eq[j] = newi # We cleaned up the constants before solving to help the solve engine with # a simpler expression, but the solved expression could have introduced # things like -C1, so rerun constantsimp() one last time before returning. for i, eqi in enumerate(eq): eq[i] = constantsimp(eqi, constants) eq[i] = constant_renumber(eq[i], 'C', 1, 2*order) # If there is only 1 solution, return it; # otherwise return the list of solutions. if len(eq) == 1: eq = eq[0] return eq def checkodesol(ode, sol, func=None, order='auto', solve_for_func=True): r""" Substitutes ``sol`` into ``ode`` and checks that the result is ``0``. This only works when ``func`` is one function, like `f(x)`. ``sol`` can be a single solution or a list of solutions. Each solution may be an :py:class:`~sympy.core.relational.Equality` that the solution satisfies, e.g. ``Eq(f(x), C1), Eq(f(x) + C1, 0)``; or simply an :py:class:`~sympy.core.expr.Expr`, e.g. ``f(x) - C1``. In most cases it will not be necessary to explicitly identify the function, but if the function cannot be inferred from the original equation it can be supplied through the ``func`` argument. If a sequence of solutions is passed, the same sort of container will be used to return the result for each solution. It tries the following methods, in order, until it finds zero equivalence: 1. Substitute the solution for `f` in the original equation. This only works if ``ode`` is solved for `f`. It will attempt to solve it first unless ``solve_for_func == False``. 2. Take `n` derivatives of the solution, where `n` is the order of ``ode``, and check to see if that is equal to the solution. This only works on exact ODEs. 3. Take the 1st, 2nd, ..., `n`\th derivatives of the solution, each time solving for the derivative of `f` of that order (this will always be possible because `f` is a linear operator). Then back substitute each derivative into ``ode`` in reverse order. This function returns a tuple. The first item in the tuple is ``True`` if the substitution results in ``0``, and ``False`` otherwise. The second item in the tuple is what the substitution results in. It should always be ``0`` if the first item is ``True``. Note that sometimes this function will ``False``, but with an expression that is identically equal to ``0``, instead of returning ``True``. This is because :py:meth:`~sympy.simplify.simplify.simplify` cannot reduce the expression to ``0``. If an expression returned by this function vanishes identically, then ``sol`` really is a solution to ``ode``. If this function seems to hang, it is probably because of a hard simplification. To use this function to test, test the first item of the tuple. Examples ======== >>> from sympy import Eq, Function, checkodesol, symbols >>> x, C1 = symbols('x,C1') >>> f = Function('f') >>> checkodesol(f(x).diff(x), Eq(f(x), C1)) (True, 0) >>> assert checkodesol(f(x).diff(x), C1)[0] >>> assert not checkodesol(f(x).diff(x), x)[0] >>> checkodesol(f(x).diff(x, 2), x**2) (False, 2) """ if not isinstance(ode, Equality): ode = Eq(ode, 0) if func is None: try: _, func = _preprocess(ode.lhs) except ValueError: funcs = [s.atoms(AppliedUndef) for s in ( sol if is_sequence(sol, set) else [sol])] funcs = set().union(*funcs) if len(funcs) != 1: raise ValueError( 'must pass func arg to checkodesol for this case.') func = funcs.pop() if not isinstance(func, AppliedUndef) or len(func.args) != 1: raise ValueError( "func must be a function of one variable, not %s" % func) if is_sequence(sol, set): return type(sol)([checkodesol(ode, i, order=order, solve_for_func=solve_for_func) for i in sol]) if not isinstance(sol, Equality): sol = Eq(func, sol) elif sol.rhs == func: sol = sol.reversed if order == 'auto': order = ode_order(ode, func) solved = sol.lhs == func and not sol.rhs.has(func) if solve_for_func and not solved: rhs = solve(sol, func) if rhs: eqs = [Eq(func, t) for t in rhs] if len(rhs) == 1: eqs = eqs[0] return checkodesol(ode, eqs, order=order, solve_for_func=False) s = True testnum = 0 x = func.args[0] while s: if testnum == 0: # First pass, try substituting a solved solution directly into the # ODE. This has the highest chance of succeeding. ode_diff = ode.lhs - ode.rhs if sol.lhs == func: s = sub_func_doit(ode_diff, func, sol.rhs) else: testnum += 1 continue ss = simplify(s) if ss: # with the new numer_denom in power.py, if we do a simple # expansion then testnum == 0 verifies all solutions. s = ss.expand(force=True) else: s = 0 testnum += 1 elif testnum == 1: # Second pass. If we cannot substitute f, try seeing if the nth # derivative is equal, this will only work for odes that are exact, # by definition. s = simplify( trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) - trigsimp(ode.lhs) + trigsimp(ode.rhs)) # s2 = simplify( # diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \ # ode.lhs + ode.rhs) testnum += 1 elif testnum == 2: # Third pass. Try solving for df/dx and substituting that into the # ODE. Thanks to Chris Smith for suggesting this method. Many of # the comments below are his, too. # The method: # - Take each of 1..n derivatives of the solution. # - Solve each nth derivative for d^(n)f/dx^(n) # (the differential of that order) # - Back substitute into the ODE in decreasing order # (i.e., n, n-1, ...) # - Check the result for zero equivalence if sol.lhs == func and not sol.rhs.has(func): diffsols = {0: sol.rhs} elif sol.rhs == func and not sol.lhs.has(func): diffsols = {0: sol.lhs} else: diffsols = {} sol = sol.lhs - sol.rhs for i in range(1, order + 1): # Differentiation is a linear operator, so there should always # be 1 solution. Nonetheless, we test just to make sure. # We only need to solve once. After that, we automatically # have the solution to the differential in the order we want. if i == 1: ds = sol.diff(x) try: sdf = solve(ds, func.diff(x, i)) if not sdf: raise NotImplementedError except NotImplementedError: testnum += 1 break else: diffsols[i] = sdf[0] else: # This is what the solution says df/dx should be. diffsols[i] = diffsols[i - 1].diff(x) # Make sure the above didn't fail. if testnum > 2: continue else: # Substitute it into ODE to check for self consistency. lhs, rhs = ode.lhs, ode.rhs for i in range(order, -1, -1): if i == 0 and 0 not in diffsols: # We can only substitute f(x) if the solution was # solved for f(x). break lhs = sub_func_doit(lhs, func.diff(x, i), diffsols[i]) rhs = sub_func_doit(rhs, func.diff(x, i), diffsols[i]) ode_or_bool = Eq(lhs, rhs) ode_or_bool = simplify(ode_or_bool) if isinstance(ode_or_bool, (bool, BooleanAtom)): if ode_or_bool: lhs = rhs = S.Zero else: lhs = ode_or_bool.lhs rhs = ode_or_bool.rhs # No sense in overworking simplify -- just prove that the # numerator goes to zero num = trigsimp((lhs - rhs).as_numer_denom()[0]) # since solutions are obtained using force=True we test # using the same level of assumptions ## replace function with dummy so assumptions will work _func = Dummy('func') num = num.subs(func, _func) ## posify the expression num, reps = posify(num) s = simplify(num).xreplace(reps).xreplace({_func: func}) testnum += 1 else: break if not s: return (True, s) elif s is True: # The code above never was able to change s raise NotImplementedError("Unable to test if " + str(sol) + " is a solution to " + str(ode) + ".") else: return (False, s) def ode_sol_simplicity(sol, func, trysolving=True): r""" Returns an extended integer representing how simple a solution to an ODE is. The following things are considered, in order from most simple to least: - ``sol`` is solved for ``func``. - ``sol`` is not solved for ``func``, but can be if passed to solve (e.g., a solution returned by ``dsolve(ode, func, simplify=False``). - If ``sol`` is not solved for ``func``, then base the result on the length of ``sol``, as computed by ``len(str(sol))``. - If ``sol`` has any unevaluated :py:class:`~sympy.integrals.Integral`\s, this will automatically be considered less simple than any of the above. This function returns an integer such that if solution A is simpler than solution B by above metric, then ``ode_sol_simplicity(sola, func) < ode_sol_simplicity(solb, func)``. Currently, the following are the numbers returned, but if the heuristic is ever improved, this may change. Only the ordering is guaranteed. +----------------------------------------------+-------------------+ | Simplicity | Return | +==============================================+===================+ | ``sol`` solved for ``func`` | ``-2`` | +----------------------------------------------+-------------------+ | ``sol`` not solved for ``func`` but can be | ``-1`` | +----------------------------------------------+-------------------+ | ``sol`` is not solved nor solvable for | ``len(str(sol))`` | | ``func`` | | +----------------------------------------------+-------------------+ | ``sol`` contains an | ``oo`` | | :py:class:`~sympy.integrals.Integral` | | +----------------------------------------------+-------------------+ ``oo`` here means the SymPy infinity, which should compare greater than any integer. If you already know :py:meth:`~sympy.solvers.solvers.solve` cannot solve ``sol``, you can use ``trysolving=False`` to skip that step, which is the only potentially slow step. For example, :py:meth:`~sympy.solvers.ode.dsolve` with the ``simplify=False`` flag should do this. If ``sol`` is a list of solutions, if the worst solution in the list returns ``oo`` it returns that, otherwise it returns ``len(str(sol))``, that is, the length of the string representation of the whole list. Examples ======== This function is designed to be passed to ``min`` as the key argument, such as ``min(listofsolutions, key=lambda i: ode_sol_simplicity(i, f(x)))``. >>> from sympy import symbols, Function, Eq, tan, cos, sqrt, Integral >>> from sympy.solvers.ode import ode_sol_simplicity >>> x, C1, C2 = symbols('x, C1, C2') >>> f = Function('f') >>> ode_sol_simplicity(Eq(f(x), C1*x**2), f(x)) -2 >>> ode_sol_simplicity(Eq(x**2 + f(x), C1), f(x)) -1 >>> ode_sol_simplicity(Eq(f(x), C1*Integral(2*x, x)), f(x)) oo >>> eq1 = Eq(f(x)/tan(f(x)/(2*x)), C1) >>> eq2 = Eq(f(x)/tan(f(x)/(2*x) + f(x)), C2) >>> [ode_sol_simplicity(eq, f(x)) for eq in [eq1, eq2]] [28, 35] >>> min([eq1, eq2], key=lambda i: ode_sol_simplicity(i, f(x))) Eq(f(x)/tan(f(x)/(2*x)), C1) """ # TODO: if two solutions are solved for f(x), we still want to be # able to get the simpler of the two # See the docstring for the coercion rules. We check easier (faster) # things here first, to save time. if iterable(sol): # See if there are Integrals for i in sol: if ode_sol_simplicity(i, func, trysolving=trysolving) == oo: return oo return len(str(sol)) if sol.has(Integral): return oo # Next, try to solve for func. This code will change slightly when CRootOf # is implemented in solve(). Probably a CRootOf solution should fall # somewhere between a normal solution and an unsolvable expression. # First, see if they are already solved if sol.lhs == func and not sol.rhs.has(func) or \ sol.rhs == func and not sol.lhs.has(func): return -2 # We are not so lucky, try solving manually if trysolving: try: sols = solve(sol, func) if not sols: raise NotImplementedError except NotImplementedError: pass else: return -1 # Finally, a naive computation based on the length of the string version # of the expression. This may favor combined fractions because they # will not have duplicate denominators, and may slightly favor expressions # with fewer additions and subtractions, as those are separated by spaces # by the printer. # Additional ideas for simplicity heuristics are welcome, like maybe # checking if a equation has a larger domain, or if constantsimp has # introduced arbitrary constants numbered higher than the order of a # given ODE that sol is a solution of. return len(str(sol)) def _get_constant_subexpressions(expr, Cs): Cs = set(Cs) Ces = [] def _recursive_walk(expr): expr_syms = expr.free_symbols if len(expr_syms) > 0 and expr_syms.issubset(Cs): Ces.append(expr) else: if expr.func == exp: expr = expr.expand(mul=True) if expr.func in (Add, Mul): d = sift(expr.args, lambda i : i.free_symbols.issubset(Cs)) if len(d[True]) > 1: x = expr.func(*d[True]) if not x.is_number: Ces.append(x) elif isinstance(expr, Integral): if expr.free_symbols.issubset(Cs) and \ all(len(x) == 3 for x in expr.limits): Ces.append(expr) for i in expr.args: _recursive_walk(i) return _recursive_walk(expr) return Ces def __remove_linear_redundancies(expr, Cs): cnts = {i: expr.count(i) for i in Cs} Cs = [i for i in Cs if cnts[i] > 0] def _linear(expr): if expr.func is Add: xs = [i for i in Cs if expr.count(i)==cnts[i] \ and 0 == expr.diff(i, 2)] d = {} for x in xs: y = expr.diff(x) if y not in d: d[y]=[] d[y].append(x) for y in d: if len(d[y]) > 1: d[y].sort(key=str) for x in d[y][1:]: expr = expr.subs(x, 0) return expr def _recursive_walk(expr): if len(expr.args) != 0: expr = expr.func(*[_recursive_walk(i) for i in expr.args]) expr = _linear(expr) return expr if expr.func is Equality: lhs, rhs = [_recursive_walk(i) for i in expr.args] f = lambda i: isinstance(i, Number) or i in Cs if lhs.func is Symbol and lhs in Cs: rhs, lhs = lhs, rhs if lhs.func in (Add, Symbol) and rhs.func in (Add, Symbol): dlhs = sift([lhs] if isinstance(lhs, AtomicExpr) else lhs.args, f) drhs = sift([rhs] if isinstance(rhs, AtomicExpr) else rhs.args, f) for i in [True, False]: for hs in [dlhs, drhs]: if i not in hs: hs[i] = [0] # this calculation can be simplified lhs = Add(*dlhs[False]) - Add(*drhs[False]) rhs = Add(*drhs[True]) - Add(*dlhs[True]) elif lhs.func in (Mul, Symbol) and rhs.func in (Mul, Symbol): dlhs = sift([lhs] if isinstance(lhs, AtomicExpr) else lhs.args, f) if True in dlhs: if False not in dlhs: dlhs[False] = [1] lhs = Mul(*dlhs[False]) rhs = rhs/Mul(*dlhs[True]) return Eq(lhs, rhs) else: return _recursive_walk(expr) @vectorize(0) def constantsimp(expr, constants): r""" Simplifies an expression with arbitrary constants in it. This function is written specifically to work with :py:meth:`~sympy.solvers.ode.dsolve`, and is not intended for general use. Simplification is done by "absorbing" the arbitrary constants into other arbitrary constants, numbers, and symbols that they are not independent of. The symbols must all have the same name with numbers after it, for example, ``C1``, ``C2``, ``C3``. The ``symbolname`` here would be '``C``', the ``startnumber`` would be 1, and the ``endnumber`` would be 3. If the arbitrary constants are independent of the variable ``x``, then the independent symbol would be ``x``. There is no need to specify the dependent function, such as ``f(x)``, because it already has the independent symbol, ``x``, in it. Because terms are "absorbed" into arbitrary constants and because constants are renumbered after simplifying, the arbitrary constants in expr are not necessarily equal to the ones of the same name in the returned result. If two or more arbitrary constants are added, multiplied, or raised to the power of each other, they are first absorbed together into a single arbitrary constant. Then the new constant is combined into other terms if necessary. Absorption of constants is done with limited assistance: 1. terms of :py:class:`~sympy.core.add.Add`\s are collected to try join constants so `e^x (C_1 \cos(x) + C_2 \cos(x))` will simplify to `e^x C_1 \cos(x)`; 2. powers with exponents that are :py:class:`~sympy.core.add.Add`\s are expanded so `e^{C_1 + x}` will be simplified to `C_1 e^x`. Use :py:meth:`~sympy.solvers.ode.constant_renumber` to renumber constants after simplification or else arbitrary numbers on constants may appear, e.g. `C_1 + C_3 x`. In rare cases, a single constant can be "simplified" into two constants. Every differential equation solution should have as many arbitrary constants as the order of the differential equation. The result here will be technically correct, but it may, for example, have `C_1` and `C_2` in an expression, when `C_1` is actually equal to `C_2`. Use your discretion in such situations, and also take advantage of the ability to use hints in :py:meth:`~sympy.solvers.ode.dsolve`. Examples ======== >>> from sympy import symbols >>> from sympy.solvers.ode import constantsimp >>> C1, C2, C3, x, y = symbols('C1, C2, C3, x, y') >>> constantsimp(2*C1*x, {C1, C2, C3}) C1*x >>> constantsimp(C1 + 2 + x, {C1, C2, C3}) C1 + x >>> constantsimp(C1*C2 + 2 + C2 + C3*x, {C1, C2, C3}) C1 + C3*x """ # This function works recursively. The idea is that, for Mul, # Add, Pow, and Function, if the class has a constant in it, then # we can simplify it, which we do by recursing down and # simplifying up. Otherwise, we can skip that part of the # expression. Cs = constants orig_expr = expr constant_subexprs = _get_constant_subexpressions(expr, Cs) for xe in constant_subexprs: xes = list(xe.free_symbols) if not xes: continue if all([expr.count(c) == xe.count(c) for c in xes]): xes.sort(key=str) expr = expr.subs(xe, xes[0]) # try to perform common sub-expression elimination of constant terms try: commons, rexpr = cse(expr) commons.reverse() rexpr = rexpr[0] for s in commons: cs = list(s[1].atoms(Symbol)) if len(cs) == 1 and cs[0] in Cs: rexpr = rexpr.subs(s[0], cs[0]) else: rexpr = rexpr.subs(*s) expr = rexpr except Exception: pass expr = __remove_linear_redundancies(expr, Cs) def _conditional_term_factoring(expr): new_expr = terms_gcd(expr, clear=False, deep=True, expand=False) # we do not want to factor exponentials, so handle this separately if new_expr.is_Mul: infac = False asfac = False for m in new_expr.args: if m.func is exp: asfac = True elif m.is_Add: infac = any(fi.func is exp for t in m.args for fi in Mul.make_args(t)) if asfac and infac: new_expr = expr break return new_expr expr = _conditional_term_factoring(expr) # call recursively if more simplification is possible if orig_expr != expr: return constantsimp(expr, Cs) return expr def constant_renumber(expr, symbolname, startnumber, endnumber): r""" Renumber arbitrary constants in ``expr`` to have numbers 1 through `N` where `N` is ``endnumber - startnumber + 1`` at most. In the process, this reorders expression terms in a standard way. This is a simple function that goes through and renumbers any :py:class:`~sympy.core.symbol.Symbol` with a name in the form ``symbolname + num`` where ``num`` is in the range from ``startnumber`` to ``endnumber``. Symbols are renumbered based on ``.sort_key()``, so they should be numbered roughly in the order that they appear in the final, printed expression. Note that this ordering is based in part on hashes, so it can produce different results on different machines. The structure of this function is very similar to that of :py:meth:`~sympy.solvers.ode.constantsimp`. Examples ======== >>> from sympy import symbols, Eq, pprint >>> from sympy.solvers.ode import constant_renumber >>> x, C0, C1, C2, C3, C4 = symbols('x,C:5') Only constants in the given range (inclusive) are renumbered; the renumbering always starts from 1: >>> constant_renumber(C1 + C3 + C4, 'C', 1, 3) C1 + C2 + C4 >>> constant_renumber(C0 + C1 + C3 + C4, 'C', 2, 4) C0 + 2*C1 + C2 >>> constant_renumber(C0 + 2*C1 + C2, 'C', 0, 1) C1 + 3*C2 >>> pprint(C2 + C1*x + C3*x**2) 2 C1*x + C2 + C3*x >>> pprint(constant_renumber(C2 + C1*x + C3*x**2, 'C', 1, 3)) 2 C1 + C2*x + C3*x """ if type(expr) in (set, list, tuple): return type(expr)( [constant_renumber(i, symbolname=symbolname, startnumber=startnumber, endnumber=endnumber) for i in expr] ) global newstartnumber newstartnumber = 1 constants_found = [None]*(endnumber + 2) constantsymbols = [Symbol( symbolname + "%d" % t) for t in range(startnumber, endnumber + 1)] # make a mapping to send all constantsymbols to S.One and use # that to make sure that term ordering is not dependent on # the indexed value of C C_1 = [(ci, S.One) for ci in constantsymbols] sort_key=lambda arg: default_sort_key(arg.subs(C_1)) def _constant_renumber(expr): r""" We need to have an internal recursive function so that newstartnumber maintains its values throughout recursive calls. """ global newstartnumber if isinstance(expr, Equality): return Eq( _constant_renumber(expr.lhs), _constant_renumber(expr.rhs)) if type(expr) not in (Mul, Add, Pow) and not expr.is_Function and \ not expr.has(*constantsymbols): # Base case, as above. Hope there aren't constants inside # of some other class, because they won't be renumbered. return expr elif expr.is_Piecewise: return expr elif expr in constantsymbols: if expr not in constants_found: constants_found[newstartnumber] = expr newstartnumber += 1 return expr elif expr.is_Function or expr.is_Pow or isinstance(expr, Tuple): return expr.func( *[_constant_renumber(x) for x in expr.args]) else: sortedargs = list(expr.args) sortedargs.sort(key=sort_key) return expr.func(*[_constant_renumber(x) for x in sortedargs]) expr = _constant_renumber(expr) # Renumbering happens here newconsts = symbols('C1:%d' % newstartnumber) expr = expr.subs(zip(constants_found[1:], newconsts), simultaneous=True) return expr def _handle_Integral(expr, func, order, hint): r""" Converts a solution with Integrals in it into an actual solution. For most hints, this simply runs ``expr.doit()``. """ global y x = func.args[0] f = func.func if hint == "1st_exact": sol = (expr.doit()).subs(y, f(x)) del y elif hint == "1st_exact_Integral": sol = Eq(Subs(expr.lhs, y, f(x)), expr.rhs) del y elif hint == "nth_linear_constant_coeff_homogeneous": sol = expr elif not hint.endswith("_Integral"): sol = expr.doit() else: sol = expr return sol # FIXME: replace the general solution in the docstring with # dsolve(equation, hint='1st_exact_Integral'). You will need to be able # to have assumptions on P and Q that dP/dy = dQ/dx. def ode_1st_exact(eq, func, order, match): r""" Solves 1st order exact ordinary differential equations. A 1st order differential equation is called exact if it is the total differential of a function. That is, the differential equation .. math:: P(x, y) \,\partial{}x + Q(x, y) \,\partial{}y = 0 is exact if there is some function `F(x, y)` such that `P(x, y) = \partial{}F/\partial{}x` and `Q(x, y) = \partial{}F/\partial{}y`. It can be shown that a necessary and sufficient condition for a first order ODE to be exact is that `\partial{}P/\partial{}y = \partial{}Q/\partial{}x`. Then, the solution will be as given below:: >>> from sympy import Function, Eq, Integral, symbols, pprint >>> x, y, t, x0, y0, C1= symbols('x,y,t,x0,y0,C1') >>> P, Q, F= map(Function, ['P', 'Q', 'F']) >>> pprint(Eq(Eq(F(x, y), Integral(P(t, y), (t, x0, x)) + ... Integral(Q(x0, t), (t, y0, y))), C1)) x y / / | | F(x, y) = | P(t, y) dt + | Q(x0, t) dt = C1 | | / / x0 y0 Where the first partials of `P` and `Q` exist and are continuous in a simply connected region. A note: SymPy currently has no way to represent inert substitution on an expression, so the hint ``1st_exact_Integral`` will return an integral with `dy`. This is supposed to represent the function that you are solving for. Examples ======== >>> from sympy import Function, dsolve, cos, sin >>> from sympy.abc import x >>> f = Function('f') >>> dsolve(cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x), ... f(x), hint='1st_exact') Eq(x*cos(f(x)) + f(x)**3/3, C1) References ========== - http://en.wikipedia.org/wiki/Exact_differential_equation - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 73 # indirect doctest """ x = func.args[0] f = func.func r = match # d+e*diff(f(x),x) e = r[r['e']] d = r[r['d']] global y # This is the only way to pass dummy y to _handle_Integral y = r['y'] C1 = get_numbered_constants(eq, num=1) # Refer Joel Moses, "Symbolic Integration - The Stormy Decade", # Communications of the ACM, Volume 14, Number 8, August 1971, pp. 558 # which gives the method to solve an exact differential equation. sol = Integral(d, x) + Integral((e - (Integral(d, x).diff(y))), y) return Eq(sol, C1) def ode_1st_homogeneous_coeff_best(eq, func, order, match): r""" Returns the best solution to an ODE from the two hints ``1st_homogeneous_coeff_subs_dep_div_indep`` and ``1st_homogeneous_coeff_subs_indep_div_dep``. This is as determined by :py:meth:`~sympy.solvers.ode.ode_sol_simplicity`. See the :py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep` and :py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep` docstrings for more information on these hints. Note that there is no ``ode_1st_homogeneous_coeff_best_Integral`` hint. Examples ======== >>> from sympy import Function, dsolve, pprint >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), ... hint='1st_homogeneous_coeff_best', simplify=False)) / 2 \ | 3*x | log|----- + 1| | 2 | \f (x) / log(f(x)) = log(C1) - -------------- 3 References ========== - http://en.wikipedia.org/wiki/Homogeneous_differential_equation - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 59 # indirect doctest """ # There are two substitutions that solve the equation, u1=y/x and u2=x/y # They produce different integrals, so try them both and see which # one is easier. sol1 = ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match) sol2 = ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match) simplify = match.get('simplify', True) if simplify: # why is odesimp called here? Should it be at the usual spot? constants = sol1.free_symbols.difference(eq.free_symbols) sol1 = odesimp( sol1, func, order, constants, "1st_homogeneous_coeff_subs_indep_div_dep") constants = sol2.free_symbols.difference(eq.free_symbols) sol2 = odesimp( sol2, func, order, constants, "1st_homogeneous_coeff_subs_dep_div_indep") return min([sol1, sol2], key=lambda x: ode_sol_simplicity(x, func, trysolving=not simplify)) def ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match): r""" Solves a 1st order differential equation with homogeneous coefficients using the substitution `u_1 = \frac{\text{<dependent variable>}}{\text{<independent variable>}}`. This is a differential equation .. math:: P(x, y) + Q(x, y) dy/dx = 0 such that `P` and `Q` are homogeneous and of the same order. A function `F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`. Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`. If the coefficients `P` and `Q` in the differential equation above are homogeneous functions of the same order, then it can be shown that the substitution `y = u_1 x` (i.e. `u_1 = y/x`) will turn the differential equation into an equation separable in the variables `x` and `u`. If `h(u_1)` is the function that results from making the substitution `u_1 = f(x)/x` on `P(x, f(x))` and `g(u_2)` is the function that results from the substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) + Q(x, f(x)) f'(x) = 0`, then the general solution is:: >>> from sympy import Function, dsolve, pprint >>> from sympy.abc import x >>> f, g, h = map(Function, ['f', 'g', 'h']) >>> genform = g(f(x)/x) + h(f(x)/x)*f(x).diff(x) >>> pprint(genform) /f(x)\ /f(x)\ d g|----| + h|----|*--(f(x)) \ x / \ x / dx >>> pprint(dsolve(genform, f(x), ... hint='1st_homogeneous_coeff_subs_dep_div_indep_Integral')) f(x) ---- x / | | -h(u1) log(x) = C1 + | ---------------- d(u1) | u1*h(u1) + g(u1) | / Where `u_1 h(u_1) + g(u_1) \ne 0` and `x \ne 0`. See also the docstrings of :py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_best` and :py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`. Examples ======== >>> from sympy import Function, dsolve >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), ... hint='1st_homogeneous_coeff_subs_dep_div_indep', simplify=False)) / 3 \ |3*f(x) f (x)| log|------ + -----| | x 3 | \ x / log(x) = log(C1) - ------------------- 3 References ========== - http://en.wikipedia.org/wiki/Homogeneous_differential_equation - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 59 # indirect doctest """ x = func.args[0] f = func.func u = Dummy('u') u1 = Dummy('u1') # u1 == f(x)/x r = match # d+e*diff(f(x),x) C1 = get_numbered_constants(eq, num=1) xarg = match.get('xarg', 0) yarg = match.get('yarg', 0) int = Integral( (-r[r['e']]/(r[r['d']] + u1*r[r['e']])).subs({x: 1, r['y']: u1}), (u1, None, f(x)/x)) sol = logcombine(Eq(log(x), int + log(C1)), force=True) sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x)))) return sol def ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match): r""" Solves a 1st order differential equation with homogeneous coefficients using the substitution `u_2 = \frac{\text{<independent variable>}}{\text{<dependent variable>}}`. This is a differential equation .. math:: P(x, y) + Q(x, y) dy/dx = 0 such that `P` and `Q` are homogeneous and of the same order. A function `F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`. Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`. If the coefficients `P` and `Q` in the differential equation above are homogeneous functions of the same order, then it can be shown that the substitution `x = u_2 y` (i.e. `u_2 = x/y`) will turn the differential equation into an equation separable in the variables `y` and `u_2`. If `h(u_2)` is the function that results from making the substitution `u_2 = x/f(x)` on `P(x, f(x))` and `g(u_2)` is the function that results from the substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) + Q(x, f(x)) f'(x) = 0`, then the general solution is: >>> from sympy import Function, dsolve, pprint >>> from sympy.abc import x >>> f, g, h = map(Function, ['f', 'g', 'h']) >>> genform = g(x/f(x)) + h(x/f(x))*f(x).diff(x) >>> pprint(genform) / x \ / x \ d g|----| + h|----|*--(f(x)) \f(x)/ \f(x)/ dx >>> pprint(dsolve(genform, f(x), ... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral')) x ---- f(x) / | | -g(u2) | ---------------- d(u2) | u2*g(u2) + h(u2) | / <BLANKLINE> f(x) = C1*e Where `u_2 g(u_2) + h(u_2) \ne 0` and `f(x) \ne 0`. See also the docstrings of :py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_best` and :py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`. Examples ======== >>> from sympy import Function, pprint, dsolve >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), ... hint='1st_homogeneous_coeff_subs_indep_div_dep', ... simplify=False)) / 2 \ | 3*x | log|----- + 1| | 2 | \f (x) / log(f(x)) = log(C1) - -------------- 3 References ========== - http://en.wikipedia.org/wiki/Homogeneous_differential_equation - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 59 # indirect doctest """ x = func.args[0] f = func.func u = Dummy('u') u2 = Dummy('u2') # u2 == x/f(x) r = match # d+e*diff(f(x),x) C1 = get_numbered_constants(eq, num=1) xarg = match.get('xarg', 0) # If xarg present take xarg, else zero yarg = match.get('yarg', 0) # If yarg present take yarg, else zero int = Integral( simplify( (-r[r['d']]/(r[r['e']] + u2*r[r['d']])).subs({x: u2, r['y']: 1})), (u2, None, x/f(x))) sol = logcombine(Eq(log(f(x)), int + log(C1)), force=True) sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x)))) return sol # XXX: Should this function maybe go somewhere else? def homogeneous_order(eq, *symbols): r""" Returns the order `n` if `g` is homogeneous and ``None`` if it is not homogeneous. Determines if a function is homogeneous and if so of what order. A function `f(x, y, \cdots)` is homogeneous of order `n` if `f(t x, t y, \cdots) = t^n f(x, y, \cdots)`. If the function is of two variables, `F(x, y)`, then `f` being homogeneous of any order is equivalent to being able to rewrite `F(x, y)` as `G(x/y)` or `H(y/x)`. This fact is used to solve 1st order ordinary differential equations whose coefficients are homogeneous of the same order (see the docstrings of :py:meth:`~solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep` and :py:meth:`~solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`). Symbols can be functions, but every argument of the function must be a symbol, and the arguments of the function that appear in the expression must match those given in the list of symbols. If a declared function appears with different arguments than given in the list of symbols, ``None`` is returned. Examples ======== >>> from sympy import Function, homogeneous_order, sqrt >>> from sympy.abc import x, y >>> f = Function('f') >>> homogeneous_order(f(x), f(x)) is None True >>> homogeneous_order(f(x,y), f(y, x), x, y) is None True >>> homogeneous_order(f(x), f(x), x) 1 >>> homogeneous_order(x**2*f(x)/sqrt(x**2+f(x)**2), x, f(x)) 2 >>> homogeneous_order(x**2+f(x), x, f(x)) is None True """ if not symbols: raise ValueError("homogeneous_order: no symbols were given.") symset = set(symbols) eq = sympify(eq) # The following are not supported if eq.has(Order, Derivative): return None # These are all constants if (eq.is_Number or eq.is_NumberSymbol or eq.is_number ): return S.Zero # Replace all functions with dummy variables dum = numbered_symbols(prefix='d', cls=Dummy) newsyms = set() for i in [j for j in symset if getattr(j, 'is_Function')]: iargs = set(i.args) if iargs.difference(symset): return None else: dummyvar = next(dum) eq = eq.subs(i, dummyvar) symset.remove(i) newsyms.add(dummyvar) symset.update(newsyms) if not eq.free_symbols & symset: return None # assuming order of a nested function can only be equal to zero if isinstance(eq, Function): return None if homogeneous_order( eq.args[0], *tuple(symset)) != 0 else S.Zero # make the replacement of x with x*t and see if t can be factored out t = Dummy('t', positive=True) # It is sufficient that t > 0 eqs = separatevars(eq.subs([(i, t*i) for i in symset]), [t], dict=True)[t] if eqs is S.One: return S.Zero # there was no term with only t i, d = eqs.as_independent(t, as_Add=False) b, e = d.as_base_exp() if b == t: return e def ode_1st_linear(eq, func, order, match): r""" Solves 1st order linear differential equations. These are differential equations of the form .. math:: dy/dx + P(x) y = Q(x)\text{.} These kinds of differential equations can be solved in a general way. The integrating factor `e^{\int P(x) \,dx}` will turn the equation into a separable equation. The general solution is:: >>> from sympy import Function, dsolve, Eq, pprint, diff, sin >>> from sympy.abc import x >>> f, P, Q = map(Function, ['f', 'P', 'Q']) >>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x)) >>> pprint(genform) d P(x)*f(x) + --(f(x)) = Q(x) dx >>> pprint(dsolve(genform, f(x), hint='1st_linear_Integral')) / / \ | | | | | / | / | | | | | | | | P(x) dx | - | P(x) dx | | | | | | | / | / f(x) = |C1 + | Q(x)*e dx|*e | | | \ / / Examples ======== >>> f = Function('f') >>> pprint(dsolve(Eq(x*diff(f(x), x) - f(x), x**2*sin(x)), ... f(x), '1st_linear')) f(x) = x*(C1 - cos(x)) References ========== - http://en.wikipedia.org/wiki/Linear_differential_equation#First_order_equation - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 92 # indirect doctest """ x = func.args[0] f = func.func r = match # a*diff(f(x),x) + b*f(x) + c C1 = get_numbered_constants(eq, num=1) t = exp(Integral(r[r['b']]/r[r['a']], x)) tt = Integral(t*(-r[r['c']]/r[r['a']]), x) f = match.get('u', f(x)) # take almost-linear u if present, else f(x) return Eq(f, (tt + C1)/t) def ode_Bernoulli(eq, func, order, match): r""" Solves Bernoulli differential equations. These are equations of the form .. math:: dy/dx + P(x) y = Q(x) y^n\text{, }n \ne 1`\text{.} The substitution `w = 1/y^{1-n}` will transform an equation of this form into one that is linear (see the docstring of :py:meth:`~sympy.solvers.ode.ode_1st_linear`). The general solution is:: >>> from sympy import Function, dsolve, Eq, pprint >>> from sympy.abc import x, n >>> f, P, Q = map(Function, ['f', 'P', 'Q']) >>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)**n) >>> pprint(genform) d n P(x)*f(x) + --(f(x)) = Q(x)*f (x) dx >>> pprint(dsolve(genform, f(x), hint='Bernoulli_Integral')) #doctest: +SKIP 1 ---- 1 - n // / \ \ || | | | || | / | / | || | | | | | || | (1 - n)* | P(x) dx | (-1 + n)* | P(x) dx| || | | | | | || | / | / | f(x) = ||C1 + (-1 + n)* | -Q(x)*e dx|*e | || | | | \\ / / / Note that the equation is separable when `n = 1` (see the docstring of :py:meth:`~sympy.solvers.ode.ode_separable`). >>> pprint(dsolve(Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)), f(x), ... hint='separable_Integral')) f(x) / | / | 1 | | - dy = C1 + | (-P(x) + Q(x)) dx | y | | / / Examples ======== >>> from sympy import Function, dsolve, Eq, pprint, log >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(Eq(x*f(x).diff(x) + f(x), log(x)*f(x)**2), ... f(x), hint='Bernoulli')) 1 f(x) = ------------------- / log(x) 1\ x*|C1 + ------ + -| \ x x/ References ========== - http://en.wikipedia.org/wiki/Bernoulli_differential_equation - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 95 # indirect doctest """ x = func.args[0] f = func.func r = match # a*diff(f(x),x) + b*f(x) + c*f(x)**n, n != 1 C1 = get_numbered_constants(eq, num=1) t = exp((1 - r[r['n']])*Integral(r[r['b']]/r[r['a']], x)) tt = (r[r['n']] - 1)*Integral(t*r[r['c']]/r[r['a']], x) return Eq(f(x), ((tt + C1)/t)**(1/(1 - r[r['n']]))) def ode_Riccati_special_minus2(eq, func, order, match): r""" The general Riccati equation has the form .. math:: dy/dx = f(x) y^2 + g(x) y + h(x)\text{.} While it does not have a general solution [1], the "special" form, `dy/dx = a y^2 - b x^c`, does have solutions in many cases [2]. This routine returns a solution for `a(dy/dx) = b y^2 + c y/x + d/x^2` that is obtained by using a suitable change of variables to reduce it to the special form and is valid when neither `a` nor `b` are zero and either `c` or `d` is zero. >>> from sympy.abc import x, y, a, b, c, d >>> from sympy.solvers.ode import dsolve, checkodesol >>> from sympy import pprint, Function >>> f = Function('f') >>> y = f(x) >>> genform = a*y.diff(x) - (b*y**2 + c*y/x + d/x**2) >>> sol = dsolve(genform, y) >>> pprint(sol, wrap_line=False) / / __________________ \\ | __________________ | / 2 || | / 2 | \/ 4*b*d - (a + c) *log(x)|| -|a + c - \/ 4*b*d - (a + c) *tan|C1 + ----------------------------|| \ \ 2*a // f(x) = ------------------------------------------------------------------------ 2*b*x >>> checkodesol(genform, sol, order=1)[0] True References ========== 1. http://www.maplesoft.com/support/help/Maple/view.aspx?path=odeadvisor/Riccati 2. http://eqworld.ipmnet.ru/en/solutions/ode/ode0106.pdf - http://eqworld.ipmnet.ru/en/solutions/ode/ode0123.pdf """ x = func.args[0] f = func.func r = match # a2*diff(f(x),x) + b2*f(x) + c2*f(x)/x + d2/x**2 a2, b2, c2, d2 = [r[r[s]] for s in 'a2 b2 c2 d2'.split()] C1 = get_numbered_constants(eq, num=1) mu = sqrt(4*d2*b2 - (a2 - c2)**2) return Eq(f(x), (a2 - c2 - mu*tan(mu/(2*a2)*log(x) + C1))/(2*b2*x)) def ode_Liouville(eq, func, order, match): r""" Solves 2nd order Liouville differential equations. The general form of a Liouville ODE is .. math:: \frac{d^2 y}{dx^2} + g(y) \left(\! \frac{dy}{dx}\!\right)^2 + h(x) \frac{dy}{dx}\text{.} The general solution is: >>> from sympy import Function, dsolve, Eq, pprint, diff >>> from sympy.abc import x >>> f, g, h = map(Function, ['f', 'g', 'h']) >>> genform = Eq(diff(f(x),x,x) + g(f(x))*diff(f(x),x)**2 + ... h(x)*diff(f(x),x), 0) >>> pprint(genform) 2 2 /d \ d d g(f(x))*|--(f(x))| + h(x)*--(f(x)) + ---(f(x)) = 0 \dx / dx 2 dx >>> pprint(dsolve(genform, f(x), hint='Liouville_Integral')) f(x) / / | | | / | / | | | | | - | h(x) dx | | g(y) dy | | | | | / | / C1 + C2* | e dx + | e dy = 0 | | / / Examples ======== >>> from sympy import Function, dsolve, Eq, pprint >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(diff(f(x), x, x) + diff(f(x), x)**2/f(x) + ... diff(f(x), x)/x, f(x), hint='Liouville')) ________________ ________________ [f(x) = -\/ C1 + C2*log(x) , f(x) = \/ C1 + C2*log(x) ] References ========== - Goldstein and Braun, "Advanced Methods for the Solution of Differential Equations", pp. 98 - http://www.maplesoft.com/support/help/Maple/view.aspx?path=odeadvisor/Liouville # indirect doctest """ # Liouville ODE: # f(x).diff(x, 2) + g(f(x))*(f(x).diff(x, 2))**2 + h(x)*f(x).diff(x) # See Goldstein and Braun, "Advanced Methods for the Solution of # Differential Equations", pg. 98, as well as # http://www.maplesoft.com/support/help/view.aspx?path=odeadvisor/Liouville x = func.args[0] f = func.func r = match # f(x).diff(x, 2) + g*f(x).diff(x)**2 + h*f(x).diff(x) y = r['y'] C1, C2 = get_numbered_constants(eq, num=2) int = Integral(exp(Integral(r['g'], y)), (y, None, f(x))) sol = Eq(int + C1*Integral(exp(-Integral(r['h'], x)), x) + C2, 0) return sol def ode_2nd_power_series_ordinary(eq, func, order, match): r""" Gives a power series solution to a second order homogeneous differential equation with polynomial coefficients at an ordinary point. A homogenous differential equation is of the form .. math :: P(x)\frac{d^2y}{dx^2} + Q(x)\frac{dy}{dx} + R(x) = 0 For simplicity it is assumed that `P(x)`, `Q(x)` and `R(x)` are polynomials, it is sufficient that `\frac{Q(x)}{P(x)}` and `\frac{R(x)}{P(x)}` exists at `x_{0}`. A recurrence relation is obtained by substituting `y` as `\sum_{n=0}^\infty a_{n}x^{n}`, in the differential equation, and equating the nth term. Using this relation various terms can be generated. Examples ======== >>> from sympy import dsolve, Function, pprint >>> from sympy.abc import x, y >>> f = Function("f") >>> eq = f(x).diff(x, 2) + f(x) >>> pprint(dsolve(eq, hint='2nd_power_series_ordinary')) / 4 2 \ / 2 \ |x x | | x | / 6\ f(x) = C2*|-- - -- + 1| + C1*x*|- -- + 1| + O\x / \24 2 / \ 6 / References ========== - http://tutorial.math.lamar.edu/Classes/DE/SeriesSolutions.aspx - George E. Simmons, "Differential Equations with Applications and Historical Notes", p.p 176 - 184 """ x = func.args[0] f = func.func C0, C1 = get_numbered_constants(eq, num=2) n = Dummy("n", integer=True) s = Wild("s") k = Wild("k", exclude=[x]) x0 = match.get('x0') terms = match.get('terms', 5) p = match[match['a3']] q = match[match['b3']] r = match[match['c3']] seriesdict = {} recurr = Function("r") # Generating the recurrence relation which works this way: # for the second order term the summation begins at n = 2. The coefficients # p is multiplied with an*(n - 1)*(n - 2)*x**n-2 and a substitution is made such that # the exponent of x becomes n. # For example, if p is x, then the second degree recurrence term is # an*(n - 1)*(n - 2)*x**n-1, substituting (n - 1) as n, it transforms to # an+1*n*(n - 1)*x**n. # A similar process is done with the first order and zeroth order term. coefflist = [(recurr(n), r), (n*recurr(n), q), (n*(n - 1)*recurr(n), p)] for index, coeff in enumerate(coefflist): if coeff[1]: f2 = powsimp(expand((coeff[1]*(x - x0)**(n - index)).subs(x, x + x0))) if f2.is_Add: addargs = f2.args else: addargs = [f2] for arg in addargs: powm = arg.match(s*x**k) term = coeff[0]*powm[s] if not powm[k].is_Symbol: term = term.subs(n, n - powm[k].as_independent(n)[0]) startind = powm[k].subs(n, index) # Seeing if the startterm can be reduced further. # If it vanishes for n lesser than startind, it is # equal to summation from n. if startind: for i in reversed(range(startind)): if not term.subs(n, i): seriesdict[term] = i else: seriesdict[term] = i + 1 break else: seriesdict[term] = S(0) # Stripping of terms so that the sum starts with the same number. teq = S(0) suminit = seriesdict.values() rkeys = seriesdict.keys() req = Add(*rkeys) if any(suminit): maxval = max(suminit) for term in seriesdict: val = seriesdict[term] if val != maxval: for i in range(val, maxval): teq += term.subs(n, val) finaldict = {} if teq: fargs = teq.atoms(AppliedUndef) if len(fargs) == 1: finaldict[fargs.pop()] = 0 else: maxf = max(fargs, key = lambda x: x.args[0]) sol = solve(teq, maxf) if isinstance(sol, list): sol = sol[0] finaldict[maxf] = sol # Finding the recurrence relation in terms of the largest term. fargs = req.atoms(AppliedUndef) maxf = max(fargs, key = lambda x: x.args[0]) minf = min(fargs, key = lambda x: x.args[0]) if minf.args[0].is_Symbol: startiter = 0 else: startiter = -minf.args[0].as_independent(n)[0] lhs = maxf rhs = solve(req, maxf) if isinstance(rhs, list): rhs = rhs[0] # Checking how many values are already present tcounter = len([t for t in finaldict.values() if t]) for _ in range(tcounter, terms - 3): # Assuming c0 and c1 to be arbitrary check = rhs.subs(n, startiter) nlhs = lhs.subs(n, startiter) nrhs = check.subs(finaldict) finaldict[nlhs] = nrhs startiter += 1 # Post processing series = C0 + C1*(x - x0) for term in finaldict: if finaldict[term]: fact = term.args[0] series += (finaldict[term].subs([(recurr(0), C0), (recurr(1), C1)])*( x - x0)**fact) series = collect(expand_mul(series), [C0, C1]) + Order(x**terms) return Eq(f(x), series) def ode_2nd_power_series_regular(eq, func, order, match): r""" Gives a power series solution to a second order homogeneous differential equation with polynomial coefficients at a regular point. A second order homogenous differential equation is of the form .. math :: P(x)\frac{d^2y}{dx^2} + Q(x)\frac{dy}{dx} + R(x) = 0 A point is said to regular singular at `x0` if `x - x0\frac{Q(x)}{P(x)}` and `(x - x0)^{2}\frac{R(x)}{P(x)}` are analytic at `x0`. For simplicity `P(x)`, `Q(x)` and `R(x)` are assumed to be polynomials. The algorithm for finding the power series solutions is: 1. Try expressing `(x - x0)P(x)` and `((x - x0)^{2})Q(x)` as power series solutions about x0. Find `p0` and `q0` which are the constants of the power series expansions. 2. Solve the indicial equation `f(m) = m(m - 1) + m*p0 + q0`, to obtain the roots `m1` and `m2` of the indicial equation. 3. If `m1 - m2` is a non integer there exists two series solutions. If `m1 = m2`, there exists only one solution. If `m1 - m2` is an integer, then the existence of one solution is confirmed. The other solution may or may not exist. The power series solution is of the form `x^{m}\sum_{n=0}^\infty a_{n}x^{n}`. The coefficients are determined by the following recurrence relation. `a_{n} = -\frac{\sum_{k=0}^{n-1} q_{n-k} + (m + k)p_{n-k}}{f(m + n)}`. For the case in which `m1 - m2` is an integer, it can be seen from the recurrence relation that for the lower root `m`, when `n` equals the difference of both the roots, the denominator becomes zero. So if the numerator is not equal to zero, a second series solution exists. Examples ======== >>> from sympy import dsolve, Function, pprint >>> from sympy.abc import x, y >>> f = Function("f") >>> eq = x*(f(x).diff(x, 2)) + 2*(f(x).diff(x)) + x*f(x) >>> pprint(dsolve(eq)) / 6 4 2 \ | x x x | / 4 2 \ C1*|- --- + -- - -- + 1| | x x | \ 720 24 2 / / 6\ f(x) = C2*|--- - -- + 1| + ------------------------ + O\x / \120 6 / x References ========== - George E. Simmons, "Differential Equations with Applications and Historical Notes", p.p 176 - 184 """ x = func.args[0] f = func.func C0, C1 = get_numbered_constants(eq, num=2) n = Dummy("n") m = Dummy("m") # for solving the indicial equation s = Wild("s") k = Wild("k", exclude=[x]) x0 = match.get('x0') terms = match.get('terms', 5) p = match['p'] q = match['q'] # Generating the indicial equation indicial = [] for term in [p, q]: if not term.has(x): indicial.append(term) else: term = series(term, n=1, x0=x0) if isinstance(term, Order): indicial.append(S(0)) else: for arg in term.args: if not arg.has(x): indicial.append(arg) break p0, q0 = indicial sollist = solve(m*(m - 1) + m*p0 + q0, m) if sollist and isinstance(sollist, list) and all( [sol.is_real for sol in sollist]): serdict1 = {} serdict2 = {} if len(sollist) == 1: # Only one series solution exists in this case. m1 = m2 = sollist.pop() if terms-m1-1 <= 0: return Eq(f(x), Order(terms)) serdict1 = _frobenius(terms-m1-1, m1, p0, q0, p, q, x0, x, C0) else: m1 = sollist[0] m2 = sollist[1] if m1 < m2: m1, m2 = m2, m1 # Irrespective of whether m1 - m2 is an integer or not, one # Frobenius series solution exists. serdict1 = _frobenius(terms-m1-1, m1, p0, q0, p, q, x0, x, C0) if not (m1 - m2).is_integer: # Second frobenius series solution exists. serdict2 = _frobenius(terms-m2-1, m2, p0, q0, p, q, x0, x, C1) else: # Check if second frobenius series solution exists. serdict2 = _frobenius(terms-m2-1, m2, p0, q0, p, q, x0, x, C1, check=m1) if serdict1: finalseries1 = C0 for key in serdict1: power = int(key.name[1:]) finalseries1 += serdict1[key]*(x - x0)**power finalseries1 = (x - x0)**m1*finalseries1 finalseries2 = S(0) if serdict2: for key in serdict2: power = int(key.name[1:]) finalseries2 += serdict2[key]*(x - x0)**power finalseries2 += C1 finalseries2 = (x - x0)**m2*finalseries2 return Eq(f(x), collect(finalseries1 + finalseries2, [C0, C1]) + Order(x**terms)) def _frobenius(n, m, p0, q0, p, q, x0, x, c, check=None): r""" Returns a dict with keys as coefficients and values as their values in terms of C0 """ n = int(n) # In cases where m1 - m2 is not an integer m2 = check d = Dummy("d") numsyms = numbered_symbols("C", start=0) numsyms = [next(numsyms) for i in range(n + 1)] C0 = Symbol("C0") serlist = [] for ser in [p, q]: # Order term not present if ser.is_polynomial(x) and Poly(ser, x).degree() <= n: if x0: ser = ser.subs(x, x + x0) dict_ = Poly(ser, x).as_dict() # Order term present else: tseries = series(ser, x=x0, n=n+1) # Removing order dict_ = Poly(list(ordered(tseries.args))[: -1], x).as_dict() # Fill in with zeros, if coefficients are zero. for i in range(n + 1): if (i,) not in dict_: dict_[(i,)] = S(0) serlist.append(dict_) pseries = serlist[0] qseries = serlist[1] indicial = d*(d - 1) + d*p0 + q0 frobdict = {} for i in range(1, n + 1): num = c*(m*pseries[(i,)] + qseries[(i,)]) for j in range(1, i): sym = Symbol("C" + str(j)) num += frobdict[sym]*((m + j)*pseries[(i - j,)] + qseries[(i - j,)]) # Checking for cases when m1 - m2 is an integer. If num equals zero # then a second Frobenius series solution cannot be found. If num is not zero # then set constant as zero and proceed. if m2 is not None and i == m2 - m: if num: return False else: frobdict[numsyms[i]] = S(0) else: frobdict[numsyms[i]] = -num/(indicial.subs(d, m+i)) return frobdict def _nth_linear_match(eq, func, order): r""" Matches a differential equation to the linear form: .. math:: a_n(x) y^{(n)} + \cdots + a_1(x)y' + a_0(x) y + B(x) = 0 Returns a dict of order:coeff terms, where order is the order of the derivative on each term, and coeff is the coefficient of that derivative. The key ``-1`` holds the function `B(x)`. Returns ``None`` if the ODE is not linear. This function assumes that ``func`` has already been checked to be good. Examples ======== >>> from sympy import Function, cos, sin >>> from sympy.abc import x >>> from sympy.solvers.ode import _nth_linear_match >>> f = Function('f') >>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) + ... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) - ... sin(x), f(x), 3) {-1: x - sin(x), 0: -1, 1: cos(x) + 2, 2: x, 3: 1} >>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) + ... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) - ... sin(f(x)), f(x), 3) == None True """ x = func.args[0] one_x = {x} terms = {i: S.Zero for i in range(-1, order + 1)} for i in Add.make_args(eq): if not i.has(func): terms[-1] += i else: c, f = i.as_independent(func) if not ((isinstance(f, Derivative) and set(f.variables) == one_x) \ or f == func): return None else: terms[len(f.args[1:])] += c return terms def ode_nth_linear_euler_eq_homogeneous(eq, func, order, match, returns='sol'): r""" Solves an `n`\th order linear homogeneous variable-coefficient Cauchy-Euler equidimensional ordinary differential equation. This is an equation with form `0 = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x) \cdots`. These equations can be solved in a general manner, by substituting solutions of the form `f(x) = x^r`, and deriving a characteristic equation for `r`. When there are repeated roots, we include extra terms of the form `C_{r k} \ln^k(x) x^r`, where `C_{r k}` is an arbitrary integration constant, `r` is a root of the characteristic equation, and `k` ranges over the multiplicity of `r`. In the cases where the roots are complex, solutions of the form `C_1 x^a \sin(b \log(x)) + C_2 x^a \cos(b \log(x))` are returned, based on expansions with Eulers formula. The general solution is the sum of the terms found. If SymPy cannot find exact roots to the characteristic equation, a :py:class:`~sympy.polys.rootoftools.CRootOf` instance will be returned instead. >>> from sympy import Function, dsolve, Eq >>> from sympy.abc import x >>> f = Function('f') >>> dsolve(4*x**2*f(x).diff(x, 2) + f(x), f(x), ... hint='nth_linear_euler_eq_homogeneous') ... # doctest: +NORMALIZE_WHITESPACE Eq(f(x), sqrt(x)*(C1 + C2*log(x))) Note that because this method does not involve integration, there is no ``nth_linear_euler_eq_homogeneous_Integral`` hint. The following is for internal use: - ``returns = 'sol'`` returns the solution to the ODE. - ``returns = 'list'`` returns a list of linearly independent solutions, corresponding to the fundamental solution set, for use with non homogeneous solution methods like variation of parameters and undetermined coefficients. Note that, though the solutions should be linearly independent, this function does not explicitly check that. You can do ``assert simplify(wronskian(sollist)) != 0`` to check for linear independence. Also, ``assert len(sollist) == order`` will need to pass. - ``returns = 'both'``, return a dictionary ``{'sol': <solution to ODE>, 'list': <list of linearly independent solutions>}``. Examples ======== >>> from sympy import Function, dsolve, pprint >>> from sympy.abc import x >>> f = Function('f') >>> eq = f(x).diff(x, 2)*x**2 - 4*f(x).diff(x)*x + 6*f(x) >>> pprint(dsolve(eq, f(x), ... hint='nth_linear_euler_eq_homogeneous')) 2 f(x) = x *(C1 + C2*x) References ========== - http://en.wikipedia.org/wiki/Cauchy%E2%80%93Euler_equation - C. Bender & S. Orszag, "Advanced Mathematical Methods for Scientists and Engineers", Springer 1999, pp. 12 # indirect doctest """ global collectterms collectterms = [] x = func.args[0] f = func.func r = match # First, set up characteristic equation. chareq, symbol = S.Zero, Dummy('x') for i in r.keys(): if not isinstance(i, str) and i >= 0: chareq += (r[i]*diff(x**symbol, x, i)*x**-symbol).expand() chareq = Poly(chareq, symbol) chareqroots = [rootof(chareq, k) for k in range(chareq.degree())] # A generator of constants constants = list(get_numbered_constants(eq, num=chareq.degree()*2)) constants.reverse() # Create a dict root: multiplicity or charroots charroots = defaultdict(int) for root in chareqroots: charroots[root] += 1 gsol = S(0) # We need keep track of terms so we can run collect() at the end. # This is necessary for constantsimp to work properly. ln = log for root, multiplicity in charroots.items(): for i in range(multiplicity): if isinstance(root, RootOf): gsol += (x**root) * constants.pop() if multiplicity != 1: raise ValueError("Value should be 1") collectterms = [(0, root, 0)] + collectterms elif root.is_real: gsol += ln(x)**i*(x**root) * constants.pop() collectterms = [(i, root, 0)] + collectterms else: reroot = re(root) imroot = im(root) gsol += ln(x)**i * (x**reroot) * ( constants.pop() * sin(abs(imroot)*ln(x)) + constants.pop() * cos(imroot*ln(x))) # Preserve ordering (multiplicity, real part, imaginary part) # It will be assumed implicitly when constructing # fundamental solution sets. collectterms = [(i, reroot, imroot)] + collectterms if returns == 'sol': return Eq(f(x), gsol) elif returns in ('list' 'both'): # HOW TO TEST THIS CODE? (dsolve does not pass 'returns' through) # Create a list of (hopefully) linearly independent solutions gensols = [] # Keep track of when to use sin or cos for nonzero imroot for i, reroot, imroot in collectterms: if imroot == 0: gensols.append(ln(x)**i*x**reroot) else: sin_form = ln(x)**i*x**reroot*sin(abs(imroot)*ln(x)) if sin_form in gensols: cos_form = ln(x)**i*x**reroot*cos(imroot*ln(x)) gensols.append(cos_form) else: gensols.append(sin_form) if returns == 'list': return gensols else: return {'sol': Eq(f(x), gsol), 'list': gensols} else: raise ValueError('Unknown value for key "returns".') def ode_nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients(eq, func, order, match, returns='sol'): r""" Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional ordinary differential equation using undetermined coefficients. This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x) \cdots`. These equations can be solved in a general manner, by substituting solutions of the form `x = exp(t)`, and deriving a characteristic equation of form `g(exp(t)) = b_0 f(t) + b_1 f'(t) + b_2 f''(t) \cdots` which can be then solved by nth_linear_constant_coeff_undetermined_coefficients if g(exp(t)) has finite number of lineary independent derivatives. Functions that fit this requirement are finite sums functions of the form `a x^i e^{b x} \sin(c x + d)` or `a x^i e^{b x} \cos(c x + d)`, where `i` is a non-negative integer and `a`, `b`, `c`, and `d` are constants. For example any polynomial in `x`, functions like `x^2 e^{2 x}`, `x \sin(x)`, and `e^x \cos(x)` can all be used. Products of `\sin`'s and `\cos`'s have a finite number of derivatives, because they can be expanded into `\sin(a x)` and `\cos(b x)` terms. However, SymPy currently cannot do that expansion, so you will need to manually rewrite the expression in terms of the above to use this method. So, for example, you will need to manually convert `\sin^2(x)` into `(1 + \cos(2 x))/2` to properly apply the method of undetermined coefficients on it. After replacement of x by exp(t), this method works by creating a trial function from the expression and all of its linear independent derivatives and substituting them into the original ODE. The coefficients for each term will be a system of linear equations, which are be solved for and substituted, giving the solution. If any of the trial functions are linearly dependent on the solution to the homogeneous equation, they are multiplied by sufficient `x` to make them linearly independent. Examples ======== >>> from sympy import dsolve, Function, Derivative, log >>> from sympy.abc import x >>> f = Function('f') >>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - log(x) >>> dsolve(eq, f(x), ... hint='nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients').expand() Eq(f(x), C1*x + C2*x**2 + log(x)/2 + 3/4) """ x = func.args[0] f = func.func r = match chareq, eq, symbol = S.Zero, S.Zero, Dummy('x') for i in r.keys(): if not isinstance(i, str) and i >= 0: chareq += (r[i]*diff(x**symbol, x, i)*x**-symbol).expand() for i in range(1,degree(Poly(chareq, symbol))+1): eq += chareq.coeff(symbol**i)*diff(f(x), x, i) if chareq.as_coeff_add(symbol)[0]: eq += chareq.as_coeff_add(symbol)[0]*f(x) e, re = posify(r[-1].subs(x, exp(x))) eq += e.subs(re) match = _nth_linear_match(eq, f(x), ode_order(eq, f(x))) match['trialset'] = r['trialset'] return ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match).subs(x, log(x)).subs(f(log(x)), f(x)).expand() def ode_nth_linear_euler_eq_nonhomogeneous_variation_of_parameters(eq, func, order, match, returns='sol'): r""" Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional ordinary differential equation using variation of parameters. This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x) \cdots`. This method works by assuming that the particular solution takes the form .. math:: \sum_{x=1}^{n} c_i(x) y_i(x) {a_n} {x^n} \text{,} where `y_i` is the `i`\th solution to the homogeneous equation. The solution is then solved using Wronskian's and Cramer's Rule. The particular solution is given by multiplying eq given below with `a_n x^{n}` .. math:: \sum_{x=1}^n \left( \int \frac{W_i(x)}{W(x)} \,dx \right) y_i(x) \text{,} where `W(x)` is the Wronskian of the fundamental system (the system of `n` linearly independent solutions to the homogeneous equation), and `W_i(x)` is the Wronskian of the fundamental system with the `i`\th column replaced with `[0, 0, \cdots, 0, \frac{x^{- n}}{a_n} g{\left (x \right )}]`. This method is general enough to solve any `n`\th order inhomogeneous linear differential equation, but sometimes SymPy cannot simplify the Wronskian well enough to integrate it. If this method hangs, try using the ``nth_linear_constant_coeff_variation_of_parameters_Integral`` hint and simplifying the integrals manually. Also, prefer using ``nth_linear_constant_coeff_undetermined_coefficients`` when it applies, because it doesn't use integration, making it faster and more reliable. Warning, using simplify=False with 'nth_linear_constant_coeff_variation_of_parameters' in :py:meth:`~sympy.solvers.ode.dsolve` may cause it to hang, because it will not attempt to simplify the Wronskian before integrating. It is recommended that you only use simplify=False with 'nth_linear_constant_coeff_variation_of_parameters_Integral' for this method, especially if the solution to the homogeneous equation has trigonometric functions in it. Examples ======== >>> from sympy import Function, dsolve, Derivative >>> from sympy.abc import x >>> f = Function('f') >>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - x**4 >>> dsolve(eq, f(x), ... hint='nth_linear_euler_eq_nonhomogeneous_variation_of_parameters').expand() Eq(f(x), C1*x + C2*x**2 + x**4/6) """ x = func.args[0] f = func.func r = match gensol = ode_nth_linear_euler_eq_homogeneous(eq, func, order, match, returns='both') match.update(gensol) r[-1] = r[-1]/r[ode_order(eq, f(x))] sol = _solve_variation_of_parameters(eq, func, order, match) return Eq(f(x), r['sol'].rhs + (sol.rhs - r['sol'].rhs)*r[ode_order(eq, f(x))]) def ode_almost_linear(eq, func, order, match): r""" Solves an almost-linear differential equation. The general form of an almost linear differential equation is .. math:: f(x) g(y) y + k(x) l(y) + m(x) = 0 \text{where} l'(y) = g(y)\text{.} This can be solved by substituting `l(y) = u(y)`. Making the given substitution reduces it to a linear differential equation of the form `u' + P(x) u + Q(x) = 0`. The general solution is >>> from sympy import Function, dsolve, Eq, pprint >>> from sympy.abc import x, y, n >>> f, g, k, l = map(Function, ['f', 'g', 'k', 'l']) >>> genform = Eq(f(x)*(l(y).diff(y)) + k(x)*l(y) + g(x)) >>> pprint(genform) d f(x)*--(l(y)) + g(x) + k(x)*l(y) = 0 dy >>> pprint(dsolve(genform, hint = 'almost_linear')) / // -y*g(x) \\ | || -------- for k(x) = 0|| | || f(x) || -y*k(x) | || || -------- | || y*k(x) || f(x) l(y) = |C1 + |< ------ ||*e | || f(x) || | ||-g(x)*e || | ||-------------- otherwise || | || k(x) || \ \\ // See Also ======== :meth:`sympy.solvers.ode.ode_1st_linear` Examples ======== >>> from sympy import Function, Derivative, pprint >>> from sympy.solvers.ode import dsolve, classify_ode >>> from sympy.abc import x >>> f = Function('f') >>> d = f(x).diff(x) >>> eq = x*d + x*f(x) + 1 >>> dsolve(eq, f(x), hint='almost_linear') Eq(f(x), (C1 - Ei(x))*exp(-x)) >>> pprint(dsolve(eq, f(x), hint='almost_linear')) -x f(x) = (C1 - Ei(x))*e References ========== - Joel Moses, "Symbolic Integration - The Stormy Decade", Communications of the ACM, Volume 14, Number 8, August 1971, pp. 558 """ # Since ode_1st_linear has already been implemented, and the # coefficients have been modified to the required form in # classify_ode, just passing eq, func, order and match to # ode_1st_linear will give the required output. return ode_1st_linear(eq, func, order, match) def _linear_coeff_match(expr, func): r""" Helper function to match hint ``linear_coefficients``. Matches the expression to the form `(a_1 x + b_1 f(x) + c_1)/(a_2 x + b_2 f(x) + c_2)` where the following conditions hold: 1. `a_1`, `b_1`, `c_1`, `a_2`, `b_2`, `c_2` are Rationals; 2. `c_1` or `c_2` are not equal to zero; 3. `a_2 b_1 - a_1 b_2` is not equal to zero. Return ``xarg``, ``yarg`` where 1. ``xarg`` = `(b_2 c_1 - b_1 c_2)/(a_2 b_1 - a_1 b_2)` 2. ``yarg`` = `(a_1 c_2 - a_2 c_1)/(a_2 b_1 - a_1 b_2)` Examples ======== >>> from sympy import Function >>> from sympy.abc import x >>> from sympy.solvers.ode import _linear_coeff_match >>> from sympy.functions.elementary.trigonometric import sin >>> f = Function('f') >>> _linear_coeff_match(( ... (-25*f(x) - 8*x + 62)/(4*f(x) + 11*x - 11)), f(x)) (1/9, 22/9) >>> _linear_coeff_match( ... sin((-5*f(x) - 8*x + 6)/(4*f(x) + x - 1)), f(x)) (19/27, 2/27) >>> _linear_coeff_match(sin(f(x)/x), f(x)) """ f = func.func x = func.args[0] def abc(eq): r''' Internal function of _linear_coeff_match that returns Rationals a, b, c if eq is a*x + b*f(x) + c, else None. ''' eq = _mexpand(eq) c = eq.as_independent(x, f(x), as_Add=True)[0] if not c.is_Rational: return a = eq.coeff(x) if not a.is_Rational: return b = eq.coeff(f(x)) if not b.is_Rational: return if eq == a*x + b*f(x) + c: return a, b, c def match(arg): r''' Internal function of _linear_coeff_match that returns Rationals a1, b1, c1, a2, b2, c2 and a2*b1 - a1*b2 of the expression (a1*x + b1*f(x) + c1)/(a2*x + b2*f(x) + c2) if one of c1 or c2 and a2*b1 - a1*b2 is non-zero, else None. ''' n, d = arg.together().as_numer_denom() m = abc(n) if m is not None: a1, b1, c1 = m m = abc(d) if m is not None: a2, b2, c2 = m d = a2*b1 - a1*b2 if (c1 or c2) and d: return a1, b1, c1, a2, b2, c2, d m = [fi.args[0] for fi in expr.atoms(Function) if fi.func != f and len(fi.args) == 1 and not fi.args[0].is_Function] or {expr} m1 = match(m.pop()) if m1 and all(match(mi) == m1 for mi in m): a1, b1, c1, a2, b2, c2, denom = m1 return (b2*c1 - b1*c2)/denom, (a1*c2 - a2*c1)/denom def ode_linear_coefficients(eq, func, order, match): r""" Solves a differential equation with linear coefficients. The general form of a differential equation with linear coefficients is .. math:: y' + F\left(\!\frac{a_1 x + b_1 y + c_1}{a_2 x + b_2 y + c_2}\!\right) = 0\text{,} where `a_1`, `b_1`, `c_1`, `a_2`, `b_2`, `c_2` are constants and `a_1 b_2 - a_2 b_1 \ne 0`. This can be solved by substituting: .. math:: x = x' + \frac{b_2 c_1 - b_1 c_2}{a_2 b_1 - a_1 b_2} y = y' + \frac{a_1 c_2 - a_2 c_1}{a_2 b_1 - a_1 b_2}\text{.} This substitution reduces the equation to a homogeneous differential equation. See Also ======== :meth:`sympy.solvers.ode.ode_1st_homogeneous_coeff_best` :meth:`sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep` :meth:`sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep` Examples ======== >>> from sympy import Function, Derivative, pprint >>> from sympy.solvers.ode import dsolve, classify_ode >>> from sympy.abc import x >>> f = Function('f') >>> df = f(x).diff(x) >>> eq = (x + f(x) + 1)*df + (f(x) - 6*x + 1) >>> pprint(dsolve(eq, hint='linear_coefficients')) ___________ ___________ / 2 / 2 [f(x) = -x - \/ C1 + 7*x - 1, f(x) = -x + \/ C1 + 7*x - 1] References ========== - Joel Moses, "Symbolic Integration - The Stormy Decade", Communications of the ACM, Volume 14, Number 8, August 1971, pp. 558 """ return ode_1st_homogeneous_coeff_best(eq, func, order, match) def ode_separable_reduced(eq, func, order, match): r""" Solves a differential equation that can be reduced to the separable form. The general form of this equation is .. math:: y' + (y/x) H(x^n y) = 0\text{}. This can be solved by substituting `u(y) = x^n y`. The equation then reduces to the separable form `\frac{u'}{u (\mathrm{power} - H(u))} - \frac{1}{x} = 0`. The general solution is: >>> from sympy import Function, dsolve, Eq, pprint >>> from sympy.abc import x, n >>> f, g = map(Function, ['f', 'g']) >>> genform = f(x).diff(x) + (f(x)/x)*g(x**n*f(x)) >>> pprint(genform) / n \ d f(x)*g\x *f(x)/ --(f(x)) + --------------- dx x >>> pprint(dsolve(genform, hint='separable_reduced')) n x *f(x) / | | 1 | ------------ dy = C1 + log(x) | y*(n - g(y)) | / See Also ======== :meth:`sympy.solvers.ode.ode_separable` Examples ======== >>> from sympy import Function, Derivative, pprint >>> from sympy.solvers.ode import dsolve, classify_ode >>> from sympy.abc import x >>> f = Function('f') >>> d = f(x).diff(x) >>> eq = (x - x**2*f(x))*d - f(x) >>> pprint(dsolve(eq, hint='separable_reduced')) ___________ ___________ / 2 / 2 - \/ C1*x + 1 + 1 \/ C1*x + 1 + 1 [f(x) = --------------------, f(x) = ------------------] x x References ========== - Joel Moses, "Symbolic Integration - The Stormy Decade", Communications of the ACM, Volume 14, Number 8, August 1971, pp. 558 """ # Arguments are passed in a way so that they are coherent with the # ode_separable function x = func.args[0] f = func.func y = Dummy('y') u = match['u'].subs(match['t'], y) ycoeff = 1/(y*(match['power'] - u)) m1 = {y: 1, x: -1/x, 'coeff': 1} m2 = {y: ycoeff, x: 1, 'coeff': 1} r = {'m1': m1, 'm2': m2, 'y': y, 'hint': x**match['power']*f(x)} return ode_separable(eq, func, order, r) def ode_1st_power_series(eq, func, order, match): r""" The power series solution is a method which gives the Taylor series expansion to the solution of a differential equation. For a first order differential equation `\frac{dy}{dx} = h(x, y)`, a power series solution exists at a point `x = x_{0}` if `h(x, y)` is analytic at `x_{0}`. The solution is given by .. math:: y(x) = y(x_{0}) + \sum_{n = 1}^{\infty} \frac{F_{n}(x_{0},b)(x - x_{0})^n}{n!}, where `y(x_{0}) = b` is the value of y at the initial value of `x_{0}`. To compute the values of the `F_{n}(x_{0},b)` the following algorithm is followed, until the required number of terms are generated. 1. `F_1 = h(x_{0}, b)` 2. `F_{n+1} = \frac{\partial F_{n}}{\partial x} + \frac{\partial F_{n}}{\partial y}F_{1}` Examples ======== >>> from sympy import Function, Derivative, pprint, exp >>> from sympy.solvers.ode import dsolve >>> from sympy.abc import x >>> f = Function('f') >>> eq = exp(x)*(f(x).diff(x)) - f(x) >>> pprint(dsolve(eq, hint='1st_power_series')) 3 4 5 C1*x C1*x C1*x / 6\ f(x) = C1 + C1*x - ----- + ----- + ----- + O\x / 6 24 60 References ========== - Travis W. Walker, Analytic power series technique for solving first-order differential equations, p.p 17, 18 """ x = func.args[0] y = match['y'] f = func.func h = -match[match['d']]/match[match['e']] point = match.get('f0') value = match.get('f0val') terms = match.get('terms') # First term F = h if not h: return Eq(f(x), value) # Initialisation series = value if terms > 1: hc = h.subs({x: point, y: value}) if hc.has(oo) or hc.has(NaN) or hc.has(zoo): # Derivative does not exist, not analytic return Eq(f(x), oo) elif hc: series += hc*(x - point) for factcount in range(2, terms): Fnew = F.diff(x) + F.diff(y)*h Fnewc = Fnew.subs({x: point, y: value}) # Same logic as above if Fnewc.has(oo) or Fnewc.has(NaN) or Fnewc.has(-oo) or Fnewc.has(zoo): return Eq(f(x), oo) series += Fnewc*((x - point)**factcount)/factorial(factcount) F = Fnew series += Order(x**terms) return Eq(f(x), series) def ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match, returns='sol'): r""" Solves an `n`\th order linear homogeneous differential equation with constant coefficients. This is an equation of the form .. math:: a_n f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x) + a_0 f(x) = 0\text{.} These equations can be solved in a general manner, by taking the roots of the characteristic equation `a_n m^n + a_{n-1} m^{n-1} + \cdots + a_1 m + a_0 = 0`. The solution will then be the sum of `C_n x^i e^{r x}` terms, for each where `C_n` is an arbitrary constant, `r` is a root of the characteristic equation and `i` is one of each from 0 to the multiplicity of the root - 1 (for example, a root 3 of multiplicity 2 would create the terms `C_1 e^{3 x} + C_2 x e^{3 x}`). The exponential is usually expanded for complex roots using Euler's equation `e^{I x} = \cos(x) + I \sin(x)`. Complex roots always come in conjugate pairs in polynomials with real coefficients, so the two roots will be represented (after simplifying the constants) as `e^{a x} \left(C_1 \cos(b x) + C_2 \sin(b x)\right)`. If SymPy cannot find exact roots to the characteristic equation, a :py:class:`~sympy.polys.rootoftools.CRootOf` instance will be return instead. >>> from sympy import Function, dsolve, Eq >>> from sympy.abc import x >>> f = Function('f') >>> dsolve(f(x).diff(x, 5) + 10*f(x).diff(x) - 2*f(x), f(x), ... hint='nth_linear_constant_coeff_homogeneous') ... # doctest: +NORMALIZE_WHITESPACE Eq(f(x), C1*exp(x*CRootOf(_x**5 + 10*_x - 2, 0)) + C2*exp(x*CRootOf(_x**5 + 10*_x - 2, 1)) + C3*exp(x*CRootOf(_x**5 + 10*_x - 2, 2)) + C4*exp(x*CRootOf(_x**5 + 10*_x - 2, 3)) + C5*exp(x*CRootOf(_x**5 + 10*_x - 2, 4))) Note that because this method does not involve integration, there is no ``nth_linear_constant_coeff_homogeneous_Integral`` hint. The following is for internal use: - ``returns = 'sol'`` returns the solution to the ODE. - ``returns = 'list'`` returns a list of linearly independent solutions, for use with non homogeneous solution methods like variation of parameters and undetermined coefficients. Note that, though the solutions should be linearly independent, this function does not explicitly check that. You can do ``assert simplify(wronskian(sollist)) != 0`` to check for linear independence. Also, ``assert len(sollist) == order`` will need to pass. - ``returns = 'both'``, return a dictionary ``{'sol': <solution to ODE>, 'list': <list of linearly independent solutions>}``. Examples ======== >>> from sympy import Function, dsolve, pprint >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(f(x).diff(x, 4) + 2*f(x).diff(x, 3) - ... 2*f(x).diff(x, 2) - 6*f(x).diff(x) + 5*f(x), f(x), ... hint='nth_linear_constant_coeff_homogeneous')) x -2*x f(x) = (C1 + C2*x)*e + (C3*sin(x) + C4*cos(x))*e References ========== - http://en.wikipedia.org/wiki/Linear_differential_equation section: Nonhomogeneous_equation_with_constant_coefficients - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 211 # indirect doctest """ x = func.args[0] f = func.func r = match # First, set up characteristic equation. chareq, symbol = S.Zero, Dummy('x') for i in r.keys(): if type(i) == str or i < 0: pass else: chareq += r[i]*symbol**i chareq = Poly(chareq, symbol) chareqroots = [rootof(chareq, k) for k in range(chareq.degree())] chareq_is_complex = not all([i.is_real for i in chareq.all_coeffs()]) # A generator of constants constants = list(get_numbered_constants(eq, num=chareq.degree()*2)) # Create a dict root: multiplicity or charroots charroots = defaultdict(int) for root in chareqroots: charroots[root] += 1 gsol = S(0) # We need to keep track of terms so we can run collect() at the end. # This is necessary for constantsimp to work properly. global collectterms collectterms = [] gensols = [] conjugate_roots = [] # used to prevent double-use of conjugate roots for root, multiplicity in charroots.items(): for i in range(multiplicity): if isinstance(root, RootOf): gensols.append(exp(root*x)) if multiplicity != 1: raise ValueError("Value should be 1") # This ordering is important collectterms = [(0, root, 0)] + collectterms else: if chareq_is_complex: gensols.append(x**i*exp(root*x)) collectterms = [(i, root, 0)] + collectterms continue reroot = re(root) imroot = im(root) if imroot.has(atan2) and reroot.has(atan2): # Remove this condition when re and im stop returning # circular atan2 usages. gensols.append(x**i*exp(root*x)) collectterms = [(i, root, 0)] + collectterms else: if root in conjugate_roots: collectterms = [(i, reroot, imroot)] + collectterms continue if imroot == 0: gensols.append(x**i*exp(reroot*x)) collectterms = [(i, reroot, 0)] + collectterms continue conjugate_roots.append(conjugate(root)) gensols.append(x**i*exp(reroot*x) * sin(abs(imroot) * x)) gensols.append(x**i*exp(reroot*x) * cos( imroot * x)) # This ordering is important collectterms = [(i, reroot, imroot)] + collectterms if returns == 'list': return gensols elif returns in ('sol' 'both'): gsol = Add(*[i*j for (i,j) in zip(constants, gensols)]) if returns == 'sol': return Eq(f(x), gsol) else: return {'sol': Eq(f(x), gsol), 'list': gensols} else: raise ValueError('Unknown value for key "returns".') def ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match): r""" Solves an `n`\th order linear differential equation with constant coefficients using the method of undetermined coefficients. This method works on differential equations of the form .. math:: a_n f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x) + a_0 f(x) = P(x)\text{,} where `P(x)` is a function that has a finite number of linearly independent derivatives. Functions that fit this requirement are finite sums functions of the form `a x^i e^{b x} \sin(c x + d)` or `a x^i e^{b x} \cos(c x + d)`, where `i` is a non-negative integer and `a`, `b`, `c`, and `d` are constants. For example any polynomial in `x`, functions like `x^2 e^{2 x}`, `x \sin(x)`, and `e^x \cos(x)` can all be used. Products of `\sin`'s and `\cos`'s have a finite number of derivatives, because they can be expanded into `\sin(a x)` and `\cos(b x)` terms. However, SymPy currently cannot do that expansion, so you will need to manually rewrite the expression in terms of the above to use this method. So, for example, you will need to manually convert `\sin^2(x)` into `(1 + \cos(2 x))/2` to properly apply the method of undetermined coefficients on it. This method works by creating a trial function from the expression and all of its linear independent derivatives and substituting them into the original ODE. The coefficients for each term will be a system of linear equations, which are be solved for and substituted, giving the solution. If any of the trial functions are linearly dependent on the solution to the homogeneous equation, they are multiplied by sufficient `x` to make them linearly independent. Examples ======== >>> from sympy import Function, dsolve, pprint, exp, cos >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(f(x).diff(x, 2) + 2*f(x).diff(x) + f(x) - ... 4*exp(-x)*x**2 + cos(2*x), f(x), ... hint='nth_linear_constant_coeff_undetermined_coefficients')) / 4\ | x | -x 4*sin(2*x) 3*cos(2*x) f(x) = |C1 + C2*x + --|*e - ---------- + ---------- \ 3 / 25 25 References ========== - http://en.wikipedia.org/wiki/Method_of_undetermined_coefficients - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 221 # indirect doctest """ gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match, returns='both') match.update(gensol) return _solve_undetermined_coefficients(eq, func, order, match) def _solve_undetermined_coefficients(eq, func, order, match): r""" Helper function for the method of undetermined coefficients. See the :py:meth:`~sympy.solvers.ode.ode_nth_linear_constant_coeff_undetermined_coefficients` docstring for more information on this method. The parameter ``match`` should be a dictionary that has the following keys: ``list`` A list of solutions to the homogeneous equation, such as the list returned by ``ode_nth_linear_constant_coeff_homogeneous(returns='list')``. ``sol`` The general solution, such as the solution returned by ``ode_nth_linear_constant_coeff_homogeneous(returns='sol')``. ``trialset`` The set of trial functions as returned by ``_undetermined_coefficients_match()['trialset']``. """ x = func.args[0] f = func.func r = match coeffs = numbered_symbols('a', cls=Dummy) coefflist = [] gensols = r['list'] gsol = r['sol'] trialset = r['trialset'] notneedset = set([]) newtrialset = set([]) global collectterms if len(gensols) != order: raise NotImplementedError("Cannot find " + str(order) + " solutions to the homogeneous equation necessary to apply" + " undetermined coefficients to " + str(eq) + " (number of terms != order)") usedsin = set([]) mult = 0 # The multiplicity of the root getmult = True for i, reroot, imroot in collectterms: if getmult: mult = i + 1 getmult = False if i == 0: getmult = True if imroot: # Alternate between sin and cos if (i, reroot) in usedsin: check = x**i*exp(reroot*x)*cos(imroot*x) else: check = x**i*exp(reroot*x)*sin(abs(imroot)*x) usedsin.add((i, reroot)) else: check = x**i*exp(reroot*x) if check in trialset: # If an element of the trial function is already part of the # homogeneous solution, we need to multiply by sufficient x to # make it linearly independent. We also don't need to bother # checking for the coefficients on those elements, since we # already know it will be 0. while True: if check*x**mult in trialset: mult += 1 else: break trialset.add(check*x**mult) notneedset.add(check) newtrialset = trialset - notneedset trialfunc = 0 for i in newtrialset: c = next(coeffs) coefflist.append(c) trialfunc += c*i eqs = sub_func_doit(eq, f(x), trialfunc) coeffsdict = dict(list(zip(trialset, [0]*(len(trialset) + 1)))) eqs = _mexpand(eqs) for i in Add.make_args(eqs): s = separatevars(i, dict=True, symbols=[x]) coeffsdict[s[x]] += s['coeff'] coeffvals = solve(list(coeffsdict.values()), coefflist) if not coeffvals: raise NotImplementedError( "Could not solve `%s` using the " "method of undetermined coefficients " "(unable to solve for coefficients)." % eq) psol = trialfunc.subs(coeffvals) return Eq(f(x), gsol.rhs + psol) def _undetermined_coefficients_match(expr, x): r""" Returns a trial function match if undetermined coefficients can be applied to ``expr``, and ``None`` otherwise. A trial expression can be found for an expression for use with the method of undetermined coefficients if the expression is an additive/multiplicative combination of constants, polynomials in `x` (the independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and `e^{a x}` terms (in other words, it has a finite number of linearly independent derivatives). Note that you may still need to multiply each term returned here by sufficient `x` to make it linearly independent with the solutions to the homogeneous equation. This is intended for internal use by ``undetermined_coefficients`` hints. SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So, for example, you will need to manually convert `\sin^2(x)` into `[1 + \cos(2 x)]/2` to properly apply the method of undetermined coefficients on it. Examples ======== >>> from sympy import log, exp >>> from sympy.solvers.ode import _undetermined_coefficients_match >>> from sympy.abc import x >>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x) {'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}} >>> _undetermined_coefficients_match(log(x), x) {'test': False} """ a = Wild('a', exclude=[x]) b = Wild('b', exclude=[x]) expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1) retdict = {} def _test_term(expr, x): r""" Test if ``expr`` fits the proper form for undetermined coefficients. """ if expr.is_Add: return all(_test_term(i, x) for i in expr.args) elif expr.is_Mul: if expr.has(sin, cos): foundtrig = False # Make sure that there is only one trig function in the args. # See the docstring. for i in expr.args: if i.has(sin, cos): if foundtrig: return False else: foundtrig = True return all(_test_term(i, x) for i in expr.args) elif expr.is_Function: if expr.func in (sin, cos, exp): if expr.args[0].match(a*x + b): return True else: return False else: return False elif expr.is_Pow and expr.base.is_Symbol and expr.exp.is_Integer and \ expr.exp >= 0: return True elif expr.is_Pow and expr.base.is_number: if expr.exp.match(a*x + b): return True else: return False elif expr.is_Symbol or expr.is_number: return True else: return False def _get_trial_set(expr, x, exprs=set([])): r""" Returns a set of trial terms for undetermined coefficients. The idea behind undetermined coefficients is that the terms expression repeat themselves after a finite number of derivatives, except for the coefficients (they are linearly dependent). So if we collect these, we should have the terms of our trial function. """ def _remove_coefficient(expr, x): r""" Returns the expression without a coefficient. Similar to expr.as_independent(x)[1], except it only works multiplicatively. """ term = S.One if expr.is_Mul: for i in expr.args: if i.has(x): term *= i elif expr.has(x): term = expr return term expr = expand_mul(expr) if expr.is_Add: for term in expr.args: if _remove_coefficient(term, x) in exprs: pass else: exprs.add(_remove_coefficient(term, x)) exprs = exprs.union(_get_trial_set(term, x, exprs)) else: term = _remove_coefficient(expr, x) tmpset = exprs.union({term}) oldset = set([]) while tmpset != oldset: # If you get stuck in this loop, then _test_term is probably # broken oldset = tmpset.copy() expr = expr.diff(x) term = _remove_coefficient(expr, x) if term.is_Add: tmpset = tmpset.union(_get_trial_set(term, x, tmpset)) else: tmpset.add(term) exprs = tmpset return exprs retdict['test'] = _test_term(expr, x) if retdict['test']: # Try to generate a list of trial solutions that will have the # undetermined coefficients. Note that if any of these are not linearly # independent with any of the solutions to the homogeneous equation, # then they will need to be multiplied by sufficient x to make them so. # This function DOES NOT do that (it doesn't even look at the # homogeneous equation). retdict['trialset'] = _get_trial_set(expr, x) return retdict def ode_nth_linear_constant_coeff_variation_of_parameters(eq, func, order, match): r""" Solves an `n`\th order linear differential equation with constant coefficients using the method of variation of parameters. This method works on any differential equations of the form .. math:: f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x) + a_0 f(x) = P(x)\text{.} This method works by assuming that the particular solution takes the form .. math:: \sum_{x=1}^{n} c_i(x) y_i(x)\text{,} where `y_i` is the `i`\th solution to the homogeneous equation. The solution is then solved using Wronskian's and Cramer's Rule. The particular solution is given by .. math:: \sum_{x=1}^n \left( \int \frac{W_i(x)}{W(x)} \,dx \right) y_i(x) \text{,} where `W(x)` is the Wronskian of the fundamental system (the system of `n` linearly independent solutions to the homogeneous equation), and `W_i(x)` is the Wronskian of the fundamental system with the `i`\th column replaced with `[0, 0, \cdots, 0, P(x)]`. This method is general enough to solve any `n`\th order inhomogeneous linear differential equation with constant coefficients, but sometimes SymPy cannot simplify the Wronskian well enough to integrate it. If this method hangs, try using the ``nth_linear_constant_coeff_variation_of_parameters_Integral`` hint and simplifying the integrals manually. Also, prefer using ``nth_linear_constant_coeff_undetermined_coefficients`` when it applies, because it doesn't use integration, making it faster and more reliable. Warning, using simplify=False with 'nth_linear_constant_coeff_variation_of_parameters' in :py:meth:`~sympy.solvers.ode.dsolve` may cause it to hang, because it will not attempt to simplify the Wronskian before integrating. It is recommended that you only use simplify=False with 'nth_linear_constant_coeff_variation_of_parameters_Integral' for this method, especially if the solution to the homogeneous equation has trigonometric functions in it. Examples ======== >>> from sympy import Function, dsolve, pprint, exp, log >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(f(x).diff(x, 3) - 3*f(x).diff(x, 2) + ... 3*f(x).diff(x) - f(x) - exp(x)*log(x), f(x), ... hint='nth_linear_constant_coeff_variation_of_parameters')) / 3 \ | 2 x *(6*log(x) - 11)| x f(x) = |C1 + C2*x + C3*x + ------------------|*e \ 36 / References ========== - http://en.wikipedia.org/wiki/Variation_of_parameters - http://planetmath.org/VariationOfParameters - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 233 # indirect doctest """ gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match, returns='both') match.update(gensol) return _solve_variation_of_parameters(eq, func, order, match) def _solve_variation_of_parameters(eq, func, order, match): r""" Helper function for the method of variation of parameters and nonhomogeneous euler eq. See the :py:meth:`~sympy.solvers.ode.ode_nth_linear_constant_coeff_variation_of_parameters` docstring for more information on this method. The parameter ``match`` should be a dictionary that has the following keys: ``list`` A list of solutions to the homogeneous equation, such as the list returned by ``ode_nth_linear_constant_coeff_homogeneous(returns='list')``. ``sol`` The general solution, such as the solution returned by ``ode_nth_linear_constant_coeff_homogeneous(returns='sol')``. """ x = func.args[0] f = func.func r = match psol = 0 gensols = r['list'] gsol = r['sol'] wr = wronskian(gensols, x) if r.get('simplify', True): wr = simplify(wr) # We need much better simplification for # some ODEs. See issue 4662, for example. # To reduce commonly occuring sin(x)**2 + cos(x)**2 to 1 wr = trigsimp(wr, deep=True, recursive=True) if not wr: # The wronskian will be 0 iff the solutions are not linearly # independent. raise NotImplementedError("Cannot find " + str(order) + " solutions to the homogeneous equation nessesary to apply " + "variation of parameters to " + str(eq) + " (Wronskian == 0)") if len(gensols) != order: raise NotImplementedError("Cannot find " + str(order) + " solutions to the homogeneous equation nessesary to apply " + "variation of parameters to " + str(eq) + " (number of terms != order)") negoneterm = (-1)**(order) for i in gensols: psol += negoneterm*Integral(wronskian([sol for sol in gensols if sol != i], x)*r[-1]/wr, x)*i/r[order] negoneterm *= -1 if r.get('simplify', True): psol = simplify(psol) psol = trigsimp(psol, deep=True) return Eq(f(x), gsol.rhs + psol) def ode_separable(eq, func, order, match): r""" Solves separable 1st order differential equations. This is any differential equation that can be written as `P(y) \tfrac{dy}{dx} = Q(x)`. The solution can then just be found by rearranging terms and integrating: `\int P(y) \,dy = \int Q(x) \,dx`. This hint uses :py:meth:`sympy.simplify.simplify.separatevars` as its back end, so if a separable equation is not caught by this solver, it is most likely the fault of that function. :py:meth:`~sympy.simplify.simplify.separatevars` is smart enough to do most expansion and factoring necessary to convert a separable equation `F(x, y)` into the proper form `P(x)\cdot{}Q(y)`. The general solution is:: >>> from sympy import Function, dsolve, Eq, pprint >>> from sympy.abc import x >>> a, b, c, d, f = map(Function, ['a', 'b', 'c', 'd', 'f']) >>> genform = Eq(a(x)*b(f(x))*f(x).diff(x), c(x)*d(f(x))) >>> pprint(genform) d a(x)*b(f(x))*--(f(x)) = c(x)*d(f(x)) dx >>> pprint(dsolve(genform, f(x), hint='separable_Integral')) f(x) / / | | | b(y) | c(x) | ---- dy = C1 + | ---- dx | d(y) | a(x) | | / / Examples ======== >>> from sympy import Function, dsolve, Eq >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(Eq(f(x)*f(x).diff(x) + x, 3*x*f(x)**2), f(x), ... hint='separable', simplify=False)) / 2 \ 2 log\3*f (x) - 1/ x ---------------- = C1 + -- 6 2 References ========== - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 52 # indirect doctest """ x = func.args[0] f = func.func C1 = get_numbered_constants(eq, num=1) r = match # {'m1':m1, 'm2':m2, 'y':y} u = r.get('hint', f(x)) # get u from separable_reduced else get f(x) return Eq(Integral(r['m2']['coeff']*r['m2'][r['y']]/r['m1'][r['y']], (r['y'], None, u)), Integral(-r['m1']['coeff']*r['m1'][x]/ r['m2'][x], x) + C1) def checkinfsol(eq, infinitesimals, func=None, order=None): r""" This function is used to check if the given infinitesimals are the actual infinitesimals of the given first order differential equation. This method is specific to the Lie Group Solver of ODEs. As of now, it simply checks, by substituting the infinitesimals in the partial differential equation. .. math:: \frac{\partial \eta}{\partial x} + \left(\frac{\partial \eta}{\partial y} - \frac{\partial \xi}{\partial x}\right)*h - \frac{\partial \xi}{\partial y}*h^{2} - \xi\frac{\partial h}{\partial x} - \eta\frac{\partial h}{\partial y} = 0 where `\eta`, and `\xi` are the infinitesimals and `h(x,y) = \frac{dy}{dx}` The infinitesimals should be given in the form of a list of dicts ``[{xi(x, y): inf, eta(x, y): inf}]``, corresponding to the output of the function infinitesimals. It returns a list of values of the form ``[(True/False, sol)]`` where ``sol`` is the value obtained after substituting the infinitesimals in the PDE. If it is ``True``, then ``sol`` would be 0. """ if isinstance(eq, Equality): eq = eq.lhs - eq.rhs if not func: eq, func = _preprocess(eq) variables = func.args if len(variables) != 1: raise ValueError("ODE's have only one independent variable") else: x = variables[0] if not order: order = ode_order(eq, func) if order != 1: raise NotImplementedError("Lie groups solver has been implemented " "only for first order differential equations") else: df = func.diff(x) a = Wild('a', exclude = [df]) b = Wild('b', exclude = [df]) match = collect(expand(eq), df).match(a*df + b) if match: h = -simplify(match[b]/match[a]) else: try: sol = solve(eq, df) except NotImplementedError: raise NotImplementedError("Infinitesimals for the " "first order ODE could not be found") else: h = sol[0] # Find infinitesimals for one solution y = Dummy('y') h = h.subs(func, y) xi = Function('xi')(x, y) eta = Function('eta')(x, y) dxi = Function('xi')(x, func) deta = Function('eta')(x, func) pde = (eta.diff(x) + (eta.diff(y) - xi.diff(x))*h - (xi.diff(y))*h**2 - xi*(h.diff(x)) - eta*(h.diff(y))) soltup = [] for sol in infinitesimals: tsol = {xi: S(sol[dxi]).subs(func, y), eta: S(sol[deta]).subs(func, y)} sol = simplify(pde.subs(tsol).doit()) if sol: soltup.append((False, sol.subs(y, func))) else: soltup.append((True, 0)) return soltup def ode_lie_group(eq, func, order, match): r""" This hint implements the Lie group method of solving first order differential equations. The aim is to convert the given differential equation from the given coordinate given system into another coordinate system where it becomes invariant under the one-parameter Lie group of translations. The converted ODE is quadrature and can be solved easily. It makes use of the :py:meth:`sympy.solvers.ode.infinitesimals` function which returns the infinitesimals of the transformation. The coordinates `r` and `s` can be found by solving the following Partial Differential Equations. .. math :: \xi\frac{\partial r}{\partial x} + \eta\frac{\partial r}{\partial y} = 0 .. math :: \xi\frac{\partial s}{\partial x} + \eta\frac{\partial s}{\partial y} = 1 The differential equation becomes separable in the new coordinate system .. math :: \frac{ds}{dr} = \frac{\frac{\partial s}{\partial x} + h(x, y)\frac{\partial s}{\partial y}}{ \frac{\partial r}{\partial x} + h(x, y)\frac{\partial r}{\partial y}} After finding the solution by integration, it is then converted back to the original coordinate system by subsituting `r` and `s` in terms of `x` and `y` again. Examples ======== >>> from sympy import Function, dsolve, Eq, exp, pprint >>> from sympy.abc import x >>> f = Function('f') >>> pprint(dsolve(f(x).diff(x) + 2*x*f(x) - x*exp(-x**2), f(x), ... hint='lie_group')) / 2\ 2 | x | -x f(x) = |C1 + --|*e \ 2 / References ========== - Solving differential equations by Symmetry Groups, John Starrett, pp. 1 - pp. 14 """ heuristics = lie_heuristics inf = {} f = func.func x = func.args[0] df = func.diff(x) xi = Function("xi") eta = Function("eta") a = Wild('a', exclude = [df]) b = Wild('b', exclude = [df]) xis = match.pop('xi') etas = match.pop('eta') if match: h = -simplify(match[match['d']]/match[match['e']]) y = match['y'] else: try: sol = solve(eq, df) except NotImplementedError: raise NotImplementedError("Unable to solve the differential equation " + str(eq) + " by the lie group method") else: y = Dummy("y") h = sol[0].subs(func, y) if xis is not None and etas is not None: inf = [{xi(x, f(x)): S(xis), eta(x, f(x)): S(etas)}] if not checkinfsol(eq, inf, func=f(x), order=1)[0][0]: raise ValueError("The given infinitesimals xi and eta" " are not the infinitesimals to the given equation") else: heuristics = ["user_defined"] match = {'h': h, 'y': y} # This is done so that if: # a] solve raises a NotImplementedError. # b] any heuristic raises a ValueError # another heuristic can be used. tempsol = [] # Used by solve below for heuristic in heuristics: try: if not inf: inf = infinitesimals(eq, hint=heuristic, func=func, order=1, match=match) except ValueError: continue else: for infsim in inf: xiinf = (infsim[xi(x, func)]).subs(func, y) etainf = (infsim[eta(x, func)]).subs(func, y) # This condition creates recursion while using pdsolve. # Since the first step while solving a PDE of form # a*(f(x, y).diff(x)) + b*(f(x, y).diff(y)) + c = 0 # is to solve the ODE dy/dx = b/a if simplify(etainf/xiinf) == h: continue rpde = f(x, y).diff(x)*xiinf + f(x, y).diff(y)*etainf r = pdsolve(rpde, func=f(x, y)).rhs s = pdsolve(rpde - 1, func=f(x, y)).rhs newcoord = [_lie_group_remove(coord) for coord in [r, s]] r = Dummy("r") s = Dummy("s") C1 = Symbol("C1") rcoord = newcoord[0] scoord = newcoord[-1] try: sol = solve([r - rcoord, s - scoord], x, y, dict=True) except NotImplementedError: continue else: sol = sol[0] xsub = sol[x] ysub = sol[y] num = simplify(scoord.diff(x) + scoord.diff(y)*h) denom = simplify(rcoord.diff(x) + rcoord.diff(y)*h) if num and denom: diffeq = simplify((num/denom).subs([(x, xsub), (y, ysub)])) sep = separatevars(diffeq, symbols=[r, s], dict=True) if sep: # Trying to separate, r and s coordinates deq = integrate((1/sep[s]), s) + C1 - integrate(sep['coeff']*sep[r], r) # Substituting and reverting back to original coordinates deq = deq.subs([(r, rcoord), (s, scoord)]) try: sdeq = solve(deq, y) except NotImplementedError: tempsol.append(deq) else: if len(sdeq) == 1: return Eq(f(x), sdeq.pop()) else: return [Eq(f(x), sol) for sol in sdeq] elif denom: # (ds/dr) is zero which means s is constant return Eq(f(x), solve(scoord - C1, y)[0]) elif num: # (dr/ds) is zero which means r is constant return Eq(f(x), solve(rcoord - C1, y)[0]) # If nothing works, return solution as it is, without solving for y if tempsol: if len(tempsol) == 1: return Eq(tempsol.pop().subs(y, f(x)), 0) else: return [Eq(sol.subs(y, f(x)), 0) for sol in tempsol] raise NotImplementedError("The given ODE " + str(eq) + " cannot be solved by" + " the lie group method") def _lie_group_remove(coords): r""" This function is strictly meant for internal use by the Lie group ODE solving method. It replaces arbitrary functions returned by pdsolve with either 0 or 1 or the args of the arbitrary function. The algorithm used is: 1] If coords is an instance of an Undefined Function, then the args are returned 2] If the arbitrary function is present in an Add object, it is replaced by zero. 3] If the arbitrary function is present in an Mul object, it is replaced by one. 4] If coords has no Undefined Function, it is returned as it is. Examples ======== >>> from sympy.solvers.ode import _lie_group_remove >>> from sympy import Function >>> from sympy.abc import x, y >>> F = Function("F") >>> eq = x**2*y >>> _lie_group_remove(eq) x**2*y >>> eq = F(x**2*y) >>> _lie_group_remove(eq) x**2*y >>> eq = y**2*x + F(x**3) >>> _lie_group_remove(eq) x*y**2 >>> eq = (F(x**3) + y)*x**4 >>> _lie_group_remove(eq) x**4*y """ if isinstance(coords, AppliedUndef): return coords.args[0] elif coords.is_Add: subfunc = coords.atoms(AppliedUndef) if subfunc: for func in subfunc: coords = coords.subs(func, 0) return coords elif coords.is_Pow: base, expr = coords.as_base_exp() base = _lie_group_remove(base) expr = _lie_group_remove(expr) return base**expr elif coords.is_Mul: mulargs = [] coordargs = coords.args for arg in coordargs: if not isinstance(coords, AppliedUndef): mulargs.append(_lie_group_remove(arg)) return Mul(*mulargs) return coords def infinitesimals(eq, func=None, order=None, hint='default', match=None): r""" The infinitesimal functions of an ordinary differential equation, `\xi(x,y)` and `\eta(x,y)`, are the infinitesimals of the Lie group of point transformations for which the differential equation is invariant. So, the ODE `y'=f(x,y)` would admit a Lie group `x^*=X(x,y;\varepsilon)=x+\varepsilon\xi(x,y)`, `y^*=Y(x,y;\varepsilon)=y+\varepsilon\eta(x,y)` such that `(y^*)'=f(x^*, y^*)`. A change of coordinates, to `r(x,y)` and `s(x,y)`, can be performed so this Lie group becomes the translation group, `r^*=r` and `s^*=s+\varepsilon`. They are tangents to the coordinate curves of the new system. Consider the transformation `(x, y) \to (X, Y)` such that the differential equation remains invariant. `\xi` and `\eta` are the tangents to the transformed coordinates `X` and `Y`, at `\varepsilon=0`. .. math:: \left(\frac{\partial X(x,y;\varepsilon)}{\partial\varepsilon }\right)|_{\varepsilon=0} = \xi, \left(\frac{\partial Y(x,y;\varepsilon)}{\partial\varepsilon }\right)|_{\varepsilon=0} = \eta, The infinitesimals can be found by solving the following PDE: >>> from sympy import Function, diff, Eq, pprint >>> from sympy.abc import x, y >>> xi, eta, h = map(Function, ['xi', 'eta', 'h']) >>> h = h(x, y) # dy/dx = h >>> eta = eta(x, y) >>> xi = xi(x, y) >>> genform = Eq(eta.diff(x) + (eta.diff(y) - xi.diff(x))*h ... - (xi.diff(y))*h**2 - xi*(h.diff(x)) - eta*(h.diff(y)), 0) >>> pprint(genform) /d d \ d 2 d |--(eta(x, y)) - --(xi(x, y))|*h(x, y) - eta(x, y)*--(h(x, y)) - h (x, y)*--(x \dy dx / dy dy <BLANKLINE> d d i(x, y)) - xi(x, y)*--(h(x, y)) + --(eta(x, y)) = 0 dx dx Solving the above mentioned PDE is not trivial, and can be solved only by making intelligent assumptions for `\xi` and `\eta` (heuristics). Once an infinitesimal is found, the attempt to find more heuristics stops. This is done to optimise the speed of solving the differential equation. If a list of all the infinitesimals is needed, ``hint`` should be flagged as ``all``, which gives the complete list of infinitesimals. If the infinitesimals for a particular heuristic needs to be found, it can be passed as a flag to ``hint``. Examples ======== >>> from sympy import Function, diff >>> from sympy.solvers.ode import infinitesimals >>> from sympy.abc import x >>> f = Function('f') >>> eq = f(x).diff(x) - x**2*f(x) >>> infinitesimals(eq) [{eta(x, f(x)): exp(x**3/3), xi(x, f(x)): 0}] References ========== - Solving differential equations by Symmetry Groups, John Starrett, pp. 1 - pp. 14 """ if isinstance(eq, Equality): eq = eq.lhs - eq.rhs if not func: eq, func = _preprocess(eq) variables = func.args if len(variables) != 1: raise ValueError("ODE's have only one independent variable") else: x = variables[0] if not order: order = ode_order(eq, func) if order != 1: raise NotImplementedError("Infinitesimals for only " "first order ODE's have been implemented") else: df = func.diff(x) # Matching differential equation of the form a*df + b a = Wild('a', exclude = [df]) b = Wild('b', exclude = [df]) if match: # Used by lie_group hint h = match['h'] y = match['y'] else: match = collect(expand(eq), df).match(a*df + b) if match: h = -simplify(match[b]/match[a]) else: try: sol = solve(eq, df) except NotImplementedError: raise NotImplementedError("Infinitesimals for the " "first order ODE could not be found") else: h = sol[0] # Find infinitesimals for one solution y = Dummy("y") h = h.subs(func, y) u = Dummy("u") hx = h.diff(x) hy = h.diff(y) hinv = ((1/h).subs([(x, u), (y, x)])).subs(u, y) # Inverse ODE match = {'h': h, 'func': func, 'hx': hx, 'hy': hy, 'y': y, 'hinv': hinv} if hint == 'all': xieta = [] for heuristic in lie_heuristics: function = globals()['lie_heuristic_' + heuristic] inflist = function(match, comp=True) if inflist: xieta.extend([inf for inf in inflist if inf not in xieta]) if xieta: return xieta else: raise NotImplementedError("Infinitesimals could not be found for " "the given ODE") elif hint == 'default': for heuristic in lie_heuristics: function = globals()['lie_heuristic_' + heuristic] xieta = function(match, comp=False) if xieta: return xieta raise NotImplementedError("Infinitesimals could not be found for" " the given ODE") elif hint not in lie_heuristics: raise ValueError("Heuristic not recognized: " + hint) else: function = globals()['lie_heuristic_' + hint] xieta = function(match, comp=True) if xieta: return xieta else: raise ValueError("Infinitesimals could not be found using the" " given heuristic") def lie_heuristic_abaco1_simple(match, comp=False): r""" The first heuristic uses the following four sets of assumptions on `\xi` and `\eta` .. math:: \xi = 0, \eta = f(x) .. math:: \xi = 0, \eta = f(y) .. math:: \xi = f(x), \eta = 0 .. math:: \xi = f(y), \eta = 0 The success of this heuristic is determined by algebraic factorisation. For the first assumption `\xi = 0` and `\eta` to be a function of `x`, the PDE .. math:: \frac{\partial \eta}{\partial x} + (\frac{\partial \eta}{\partial y} - \frac{\partial \xi}{\partial x})*h - \frac{\partial \xi}{\partial y}*h^{2} - \xi*\frac{\partial h}{\partial x} - \eta*\frac{\partial h}{\partial y} = 0 reduces to `f'(x) - f\frac{\partial h}{\partial y} = 0` If `\frac{\partial h}{\partial y}` is a function of `x`, then this can usually be integrated easily. A similar idea is applied to the other 3 assumptions as well. References ========== - E.S Cheb-Terrab, L.G.S Duarte and L.A,C.P da Mota, Computer Algebra Solving of First Order ODEs Using Symmetry Methods, pp. 8 """ xieta = [] y = match['y'] h = match['h'] func = match['func'] x = func.args[0] hx = match['hx'] hy = match['hy'] xi = Function('xi')(x, func) eta = Function('eta')(x, func) hysym = hy.free_symbols if y not in hysym: try: fx = exp(integrate(hy, x)) except NotImplementedError: pass else: inf = {xi: S(0), eta: fx} if not comp: return [inf] if comp and inf not in xieta: xieta.append(inf) factor = hy/h facsym = factor.free_symbols if x not in facsym: try: fy = exp(integrate(factor, y)) except NotImplementedError: pass else: inf = {xi: S(0), eta: fy.subs(y, func)} if not comp: return [inf] if comp and inf not in xieta: xieta.append(inf) factor = -hx/h facsym = factor.free_symbols if y not in facsym: try: fx = exp(integrate(factor, x)) except NotImplementedError: pass else: inf = {xi: fx, eta: S(0)} if not comp: return [inf] if comp and inf not in xieta: xieta.append(inf) factor = -hx/(h**2) facsym = factor.free_symbols if x not in facsym: try: fy = exp(integrate(factor, y)) except NotImplementedError: pass else: inf = {xi: fy.subs(y, func), eta: S(0)} if not comp: return [inf] if comp and inf not in xieta: xieta.append(inf) if xieta: return xieta def lie_heuristic_abaco1_product(match, comp=False): r""" The second heuristic uses the following two assumptions on `\xi` and `\eta` .. math:: \eta = 0, \xi = f(x)*g(y) .. math:: \eta = f(x)*g(y), \xi = 0 The first assumption of this heuristic holds good if `\frac{1}{h^{2}}\frac{\partial^2}{\partial x \partial y}\log(h)` is separable in `x` and `y`, then the separated factors containing `x` is `f(x)`, and `g(y)` is obtained by .. math:: e^{\int f\frac{\partial}{\partial x}\left(\frac{1}{f*h}\right)\,dy} provided `f\frac{\partial}{\partial x}\left(\frac{1}{f*h}\right)` is a function of `y` only. The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as `\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first assumption satisifes. After obtaining `f(x)` and `g(y)`, the coordinates are again interchanged, to get `\eta` as `f(x)*g(y)` References ========== - E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order ODE Patterns, pp. 7 - pp. 8 """ xieta = [] y = match['y'] h = match['h'] hinv = match['hinv'] func = match['func'] x = func.args[0] xi = Function('xi')(x, func) eta = Function('eta')(x, func) inf = separatevars(((log(h).diff(y)).diff(x))/h**2, dict=True, symbols=[x, y]) if inf and inf['coeff']: fx = inf[x] gy = simplify(fx*((1/(fx*h)).diff(x))) gysyms = gy.free_symbols if x not in gysyms: gy = exp(integrate(gy, y)) inf = {eta: S(0), xi: (fx*gy).subs(y, func)} if not comp: return [inf] if comp and inf not in xieta: xieta.append(inf) u1 = Dummy("u1") inf = separatevars(((log(hinv).diff(y)).diff(x))/hinv**2, dict=True, symbols=[x, y]) if inf and inf['coeff']: fx = inf[x] gy = simplify(fx*((1/(fx*hinv)).diff(x))) gysyms = gy.free_symbols if x not in gysyms: gy = exp(integrate(gy, y)) etaval = fx*gy etaval = (etaval.subs([(x, u1), (y, x)])).subs(u1, y) inf = {eta: etaval.subs(y, func), xi: S(0)} if not comp: return [inf] if comp and inf not in xieta: xieta.append(inf) if xieta: return xieta def lie_heuristic_bivariate(match, comp=False): r""" The third heuristic assumes the infinitesimals `\xi` and `\eta` to be bi-variate polynomials in `x` and `y`. The assumption made here for the logic below is that `h` is a rational function in `x` and `y` though that may not be necessary for the infinitesimals to be bivariate polynomials. The coefficients of the infinitesimals are found out by substituting them in the PDE and grouping similar terms that are polynomials and since they form a linear system, solve and check for non trivial solutions. The degree of the assumed bivariates are increased till a certain maximum value. References ========== - Lie Groups and Differential Equations pp. 327 - pp. 329 """ h = match['h'] hx = match['hx'] hy = match['hy'] func = match['func'] x = func.args[0] y = match['y'] xi = Function('xi')(x, func) eta = Function('eta')(x, func) if h.is_rational_function(): # The maximum degree that the infinitesimals can take is # calculated by this technique. etax, etay, etad, xix, xiy, xid = symbols("etax etay etad xix xiy xid") ipde = etax + (etay - xix)*h - xiy*h**2 - xid*hx - etad*hy num, denom = cancel(ipde).as_numer_denom() deg = Poly(num, x, y).total_degree() deta = Function('deta')(x, y) dxi = Function('dxi')(x, y) ipde = (deta.diff(x) + (deta.diff(y) - dxi.diff(x))*h - (dxi.diff(y))*h**2 - dxi*hx - deta*hy) xieq = Symbol("xi0") etaeq = Symbol("eta0") for i in range(deg + 1): if i: xieq += Add(*[ Symbol("xi_" + str(power) + "_" + str(i - power))*x**power*y**(i - power) for power in range(i + 1)]) etaeq += Add(*[ Symbol("eta_" + str(power) + "_" + str(i - power))*x**power*y**(i - power) for power in range(i + 1)]) pden, denom = (ipde.subs({dxi: xieq, deta: etaeq}).doit()).as_numer_denom() pden = expand(pden) # If the individual terms are monomials, the coefficients # are grouped if pden.is_polynomial(x, y) and pden.is_Add: polyy = Poly(pden, x, y).as_dict() if polyy: symset = xieq.free_symbols.union(etaeq.free_symbols) - {x, y} soldict = solve(polyy.values(), *symset) if isinstance(soldict, list): soldict = soldict[0] if any(x for x in soldict.values()): xired = xieq.subs(soldict) etared = etaeq.subs(soldict) # Scaling is done by substituting one for the parameters # This can be any number except zero. dict_ = dict((sym, 1) for sym in symset) inf = {eta: etared.subs(dict_).subs(y, func), xi: xired.subs(dict_).subs(y, func)} return [inf] def lie_heuristic_chi(match, comp=False): r""" The aim of the fourth heuristic is to find the function `\chi(x, y)` that satisifies the PDE `\frac{d\chi}{dx} + h\frac{d\chi}{dx} - \frac{\partial h}{\partial y}\chi = 0`. This assumes `\chi` to be a bivariate polynomial in `x` and `y`. By intution, `h` should be a rational function in `x` and `y`. The method used here is to substitute a general binomial for `\chi` up to a certain maximum degree is reached. The coefficients of the polynomials, are calculated by by collecting terms of the same order in `x` and `y`. After finding `\chi`, the next step is to use `\eta = \xi*h + \chi`, to determine `\xi` and `\eta`. This can be done by dividing `\chi` by `h` which would give `-\xi` as the quotient and `\eta` as the remainder. References ========== - E.S Cheb-Terrab, L.G.S Duarte and L.A,C.P da Mota, Computer Algebra Solving of First Order ODEs Using Symmetry Methods, pp. 8 """ h = match['h'] hx = match['hx'] hy = match['hy'] func = match['func'] x = func.args[0] y = match['y'] xi = Function('xi')(x, func) eta = Function('eta')(x, func) if h.is_rational_function(): schi, schix, schiy = symbols("schi, schix, schiy") cpde = schix + h*schiy - hy*schi num, denom = cancel(cpde).as_numer_denom() deg = Poly(num, x, y).total_degree() chi = Function('chi')(x, y) chix = chi.diff(x) chiy = chi.diff(y) cpde = chix + h*chiy - hy*chi chieq = Symbol("chi") for i in range(1, deg + 1): chieq += Add(*[ Symbol("chi_" + str(power) + "_" + str(i - power))*x**power*y**(i - power) for power in range(i + 1)]) cnum, cden = cancel(cpde.subs({chi : chieq}).doit()).as_numer_denom() cnum = expand(cnum) if cnum.is_polynomial(x, y) and cnum.is_Add: cpoly = Poly(cnum, x, y).as_dict() if cpoly: solsyms = chieq.free_symbols - {x, y} soldict = solve(cpoly.values(), *solsyms) if isinstance(soldict, list): soldict = soldict[0] if any(x for x in soldict.values()): chieq = chieq.subs(soldict) dict_ = dict((sym, 1) for sym in solsyms) chieq = chieq.subs(dict_) # After finding chi, the main aim is to find out # eta, xi by the equation eta = xi*h + chi # One method to set xi, would be rearranging it to # (eta/h) - xi = (chi/h). This would mean dividing # chi by h would give -xi as the quotient and eta # as the remainder. Thanks to Sean Vig for suggesting # this method. xic, etac = div(chieq, h) inf = {eta: etac.subs(y, func), xi: -xic.subs(y, func)} return [inf] def lie_heuristic_function_sum(match, comp=False): r""" This heuristic uses the following two assumptions on `\xi` and `\eta` .. math:: \eta = 0, \xi = f(x) + g(y) .. math:: \eta = f(x) + g(y), \xi = 0 The first assumption of this heuristic holds good if .. math:: \frac{\partial}{\partial y}[(h\frac{\partial^{2}}{ \partial x^{2}}(h^{-1}))^{-1}] is separable in `x` and `y`, 1. The separated factors containing `y` is `\frac{\partial g}{\partial y}`. From this `g(y)` can be determined. 2. The separated factors containing `x` is `f''(x)`. 3. `h\frac{\partial^{2}}{\partial x^{2}}(h^{-1})` equals `\frac{f''(x)}{f(x) + g(y)}`. From this `f(x)` can be determined. The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as `\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first assumption satisifes. After obtaining `f(x)` and `g(y)`, the coordinates are again interchanged, to get `\eta` as `f(x) + g(y)`. For both assumptions, the constant factors are separated among `g(y)` and `f''(x)`, such that `f''(x)` obtained from 3] is the same as that obtained from 2]. If not possible, then this heuristic fails. References ========== - E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order ODE Patterns, pp. 7 - pp. 8 """ xieta = [] h = match['h'] hx = match['hx'] hy = match['hy'] func = match['func'] hinv = match['hinv'] x = func.args[0] y = match['y'] xi = Function('xi')(x, func) eta = Function('eta')(x, func) for odefac in [h, hinv]: factor = odefac*((1/odefac).diff(x, 2)) sep = separatevars((1/factor).diff(y), dict=True, symbols=[x, y]) if sep and sep['coeff'] and sep[x].has(x) and sep[y].has(y): k = Dummy("k") try: gy = k*integrate(sep[y], y) except NotImplementedError: pass else: fdd = 1/(k*sep[x]*sep['coeff']) fx = simplify(fdd/factor - gy) check = simplify(fx.diff(x, 2) - fdd) if fx: if not check: fx = fx.subs(k, 1) gy = (gy/k) else: sol = solve(check, k) if sol: sol = sol[0] fx = fx.subs(k, sol) gy = (gy/k)*sol else: continue if odefac == hinv: # Inverse ODE fx = fx.subs(x, y) gy = gy.subs(y, x) etaval = factor_terms(fx + gy) if etaval.is_Mul: etaval = Mul(*[arg for arg in etaval.args if arg.has(x, y)]) if odefac == hinv: # Inverse ODE inf = {eta: etaval.subs(y, func), xi : S(0)} else: inf = {xi: etaval.subs(y, func), eta : S(0)} if not comp: return [inf] else: xieta.append(inf) if xieta: return xieta def lie_heuristic_abaco2_similar(match, comp=False): r""" This heuristic uses the following two assumptions on `\xi` and `\eta` .. math:: \eta = g(x), \xi = f(x) .. math:: \eta = f(y), \xi = g(y) For the first assumption, 1. First `\frac{\frac{\partial h}{\partial y}}{\frac{\partial^{2} h}{ \partial yy}}` is calculated. Let us say this value is A 2. If this is constant, then `h` is matched to the form `A(x) + B(x)e^{ \frac{y}{C}}` then, `\frac{e^{\int \frac{A(x)}{C} \,dx}}{B(x)}` gives `f(x)` and `A(x)*f(x)` gives `g(x)` 3. Otherwise `\frac{\frac{\partial A}{\partial X}}{\frac{\partial A}{ \partial Y}} = \gamma` is calculated. If a] `\gamma` is a function of `x` alone b] `\frac{\gamma\frac{\partial h}{\partial y} - \gamma'(x) - \frac{ \partial h}{\partial x}}{h + \gamma} = G` is a function of `x` alone. then, `e^{\int G \,dx}` gives `f(x)` and `-\gamma*f(x)` gives `g(x)` The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as `\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first assumption satisifes. After obtaining `f(x)` and `g(x)`, the coordinates are again interchanged, to get `\xi` as `f(x^*)` and `\eta` as `g(y^*)` References ========== - E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order ODE Patterns, pp. 10 - pp. 12 """ xieta = [] h = match['h'] hx = match['hx'] hy = match['hy'] func = match['func'] hinv = match['hinv'] x = func.args[0] y = match['y'] xi = Function('xi')(x, func) eta = Function('eta')(x, func) factor = cancel(h.diff(y)/h.diff(y, 2)) factorx = factor.diff(x) factory = factor.diff(y) if not factor.has(x) and not factor.has(y): A = Wild('A', exclude=[y]) B = Wild('B', exclude=[y]) C = Wild('C', exclude=[x, y]) match = h.match(A + B*exp(y/C)) try: tau = exp(-integrate(match[A]/match[C]), x)/match[B] except NotImplementedError: pass else: gx = match[A]*tau return [{xi: tau, eta: gx}] else: gamma = cancel(factorx/factory) if not gamma.has(y): tauint = cancel((gamma*hy - gamma.diff(x) - hx)/(h + gamma)) if not tauint.has(y): try: tau = exp(integrate(tauint, x)) except NotImplementedError: pass else: gx = -tau*gamma return [{xi: tau, eta: gx}] factor = cancel(hinv.diff(y)/hinv.diff(y, 2)) factorx = factor.diff(x) factory = factor.diff(y) if not factor.has(x) and not factor.has(y): A = Wild('A', exclude=[y]) B = Wild('B', exclude=[y]) C = Wild('C', exclude=[x, y]) match = h.match(A + B*exp(y/C)) try: tau = exp(-integrate(match[A]/match[C]), x)/match[B] except NotImplementedError: pass else: gx = match[A]*tau return [{eta: tau.subs(x, func), xi: gx.subs(x, func)}] else: gamma = cancel(factorx/factory) if not gamma.has(y): tauint = cancel((gamma*hinv.diff(y) - gamma.diff(x) - hinv.diff(x))/( hinv + gamma)) if not tauint.has(y): try: tau = exp(integrate(tauint, x)) except NotImplementedError: pass else: gx = -tau*gamma return [{eta: tau.subs(x, func), xi: gx.subs(x, func)}] def lie_heuristic_abaco2_unique_unknown(match, comp=False): r""" This heuristic assumes the presence of unknown functions or known functions with non-integer powers. 1. A list of all functions and non-integer powers containing x and y 2. Loop over each element `f` in the list, find `\frac{\frac{\partial f}{\partial x}}{ \frac{\partial f}{\partial x}} = R` If it is separable in `x` and `y`, let `X` be the factors containing `x`. Then a] Check if `\xi = X` and `\eta = -\frac{X}{R}` satisfy the PDE. If yes, then return `\xi` and `\eta` b] Check if `\xi = \frac{-R}{X}` and `\eta = -\frac{1}{X}` satisfy the PDE. If yes, then return `\xi` and `\eta` If not, then check if a] :math:`\xi = -R,\eta = 1` b] :math:`\xi = 1, \eta = -\frac{1}{R}` are solutions. References ========== - E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order ODE Patterns, pp. 10 - pp. 12 """ xieta = [] h = match['h'] hx = match['hx'] hy = match['hy'] func = match['func'] hinv = match['hinv'] x = func.args[0] y = match['y'] xi = Function('xi')(x, func) eta = Function('eta')(x, func) funclist = [] for atom in h.atoms(Pow): base, exp = atom.as_base_exp() if base.has(x) and base.has(y): if not exp.is_Integer: funclist.append(atom) for function in h.atoms(AppliedUndef): syms = function.free_symbols if x in syms and y in syms: funclist.append(function) for f in funclist: frac = cancel(f.diff(y)/f.diff(x)) sep = separatevars(frac, dict=True, symbols=[x, y]) if sep and sep['coeff']: xitry1 = sep[x] etatry1 = -1/(sep[y]*sep['coeff']) pde1 = etatry1.diff(y)*h - xitry1.diff(x)*h - xitry1*hx - etatry1*hy if not simplify(pde1): return [{xi: xitry1, eta: etatry1.subs(y, func)}] xitry2 = 1/etatry1 etatry2 = 1/xitry1 pde2 = etatry2.diff(x) - (xitry2.diff(y))*h**2 - xitry2*hx - etatry2*hy if not simplify(expand(pde2)): return [{xi: xitry2.subs(y, func), eta: etatry2}] else: etatry = -1/frac pde = etatry.diff(x) + etatry.diff(y)*h - hx - etatry*hy if not simplify(pde): return [{xi: S(1), eta: etatry.subs(y, func)}] xitry = -frac pde = -xitry.diff(x)*h -xitry.diff(y)*h**2 - xitry*hx -hy if not simplify(expand(pde)): return [{xi: xitry.subs(y, func), eta: S(1)}] def lie_heuristic_abaco2_unique_general(match, comp=False): r""" This heuristic finds if infinitesimals of the form `\eta = f(x)`, `\xi = g(y)` without making any assumptions on `h`. The complete sequence of steps is given in the paper mentioned below. References ========== - E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order ODE Patterns, pp. 10 - pp. 12 """ xieta = [] h = match['h'] hx = match['hx'] hy = match['hy'] func = match['func'] hinv = match['hinv'] x = func.args[0] y = match['y'] xi = Function('xi')(x, func) eta = Function('eta')(x, func) C = S(0) A = hx.diff(y) B = hy.diff(y) + hy**2 C = hx.diff(x) - hx**2 if not (A and B and C): return Ax = A.diff(x) Ay = A.diff(y) Axy = Ax.diff(y) Axx = Ax.diff(x) Ayy = Ay.diff(y) D = simplify(2*Axy + hx*Ay - Ax*hy + (hx*hy + 2*A)*A)*A - 3*Ax*Ay if not D: E1 = simplify(3*Ax**2 + ((hx**2 + 2*C)*A - 2*Axx)*A) if E1: E2 = simplify((2*Ayy + (2*B - hy**2)*A)*A - 3*Ay**2) if not E2: E3 = simplify( E1*((28*Ax + 4*hx*A)*A**3 - E1*(hy*A + Ay)) - E1.diff(x)*8*A**4) if not E3: etaval = cancel((4*A**3*(Ax - hx*A) + E1*(hy*A - Ay))/(S(2)*A*E1)) if x not in etaval: try: etaval = exp(integrate(etaval, y)) except NotImplementedError: pass else: xival = -4*A**3*etaval/E1 if y not in xival: return [{xi: xival, eta: etaval.subs(y, func)}] else: E1 = simplify((2*Ayy + (2*B - hy**2)*A)*A - 3*Ay**2) if E1: E2 = simplify( 4*A**3*D - D**2 + E1*((2*Axx - (hx**2 + 2*C)*A)*A - 3*Ax**2)) if not E2: E3 = simplify( -(A*D)*E1.diff(y) + ((E1.diff(x) - hy*D)*A + 3*Ay*D + (A*hx - 3*Ax)*E1)*E1) if not E3: etaval = cancel(((A*hx - Ax)*E1 - (Ay + A*hy)*D)/(S(2)*A*D)) if x not in etaval: try: etaval = exp(integrate(etaval, y)) except NotImplementedError: pass else: xival = -E1*etaval/D if y not in xival: return [{xi: xival, eta: etaval.subs(y, func)}] def lie_heuristic_linear(match, comp=False): r""" This heuristic assumes 1. `\xi = ax + by + c` and 2. `\eta = fx + gy + h` After substituting the following assumptions in the determining PDE, it reduces to .. math:: f + (g - a)h - bh^{2} - (ax + by + c)\frac{\partial h}{\partial x} - (fx + gy + c)\frac{\partial h}{\partial y} Solving the reduced PDE obtained, using the method of characteristics, becomes impractical. The method followed is grouping similar terms and solving the system of linear equations obtained. The difference between the bivariate heuristic is that `h` need not be a rational function in this case. References ========== - E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order ODE Patterns, pp. 10 - pp. 12 """ xieta = [] h = match['h'] hx = match['hx'] hy = match['hy'] func = match['func'] hinv = match['hinv'] x = func.args[0] y = match['y'] xi = Function('xi')(x, func) eta = Function('eta')(x, func) coeffdict = {} symbols = numbered_symbols("c", cls=Dummy) symlist = [next(symbols) for i in islice(symbols, 6)] C0, C1, C2, C3, C4, C5 = symlist pde = C3 + (C4 - C0)*h -(C0*x + C1*y + C2)*hx - (C3*x + C4*y + C5)*hy - C1*h**2 pde, denom = pde.as_numer_denom() pde = powsimp(expand(pde)) if pde.is_Add: terms = pde.args for term in terms: if term.is_Mul: rem = Mul(*[m for m in term.args if not m.has(x, y)]) xypart = term/rem if xypart not in coeffdict: coeffdict[xypart] = rem else: coeffdict[xypart] += rem else: if term not in coeffdict: coeffdict[term] = S(1) else: coeffdict[term] += S(1) sollist = coeffdict.values() soldict = solve(sollist, symlist) if soldict: if isinstance(soldict, list): soldict = soldict[0] subval = soldict.values() if any(t for t in subval): onedict = dict(zip(symlist, [1]*6)) xival = C0*x + C1*func + C2 etaval = C3*x + C4*func + C5 xival = xival.subs(soldict) etaval = etaval.subs(soldict) xival = xival.subs(onedict) etaval = etaval.subs(onedict) return [{xi: xival, eta: etaval}] def sysode_linear_2eq_order1(match_): x = match_['func'][0].func y = match_['func'][1].func func = match_['func'] fc = match_['func_coeff'] eq = match_['eq'] C1, C2, C3, C4 = get_numbered_constants(eq, num=4) r = dict() t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] for i in range(2): eqs = 0 for terms in Add.make_args(eq[i]): eqs += terms/fc[i,func[i],1] eq[i] = eqs # for equations Eq(a1*diff(x(t),t), a*x(t) + b*y(t) + k1) # and Eq(a2*diff(x(t),t), c*x(t) + d*y(t) + k2) r['a'] = -fc[0,x(t),0]/fc[0,x(t),1] r['c'] = -fc[1,x(t),0]/fc[1,y(t),1] r['b'] = -fc[0,y(t),0]/fc[0,x(t),1] r['d'] = -fc[1,y(t),0]/fc[1,y(t),1] forcing = [S(0),S(0)] for i in range(2): for j in Add.make_args(eq[i]): if not j.has(x(t), y(t)): forcing[i] += j if not (forcing[0].has(t) or forcing[1].has(t)): r['k1'] = forcing[0] r['k2'] = forcing[1] else: raise NotImplementedError("Only homogeneous problems are supported" + " (and constant inhomogeneity)") if match_['type_of_equation'] == 'type1': sol = _linear_2eq_order1_type1(x, y, t, r, eq) if match_['type_of_equation'] == 'type2': gsol = _linear_2eq_order1_type1(x, y, t, r, eq) psol = _linear_2eq_order1_type2(x, y, t, r, eq) sol = [Eq(x(t), gsol[0].rhs+psol[0]), Eq(y(t), gsol[1].rhs+psol[1])] if match_['type_of_equation'] == 'type3': sol = _linear_2eq_order1_type3(x, y, t, r, eq) if match_['type_of_equation'] == 'type4': sol = _linear_2eq_order1_type4(x, y, t, r, eq) if match_['type_of_equation'] == 'type5': sol = _linear_2eq_order1_type5(x, y, t, r, eq) if match_['type_of_equation'] == 'type6': sol = _linear_2eq_order1_type6(x, y, t, r, eq) if match_['type_of_equation'] == 'type7': sol = _linear_2eq_order1_type7(x, y, t, r, eq) return sol def _linear_2eq_order1_type1(x, y, t, r, eq): r""" It is classified under system of two linear homogeneous first-order constant-coefficient ordinary differential equations. The equations which come under this type are .. math:: x' = ax + by, .. math:: y' = cx + dy The characteristics equation is written as .. math:: \lambda^{2} + (a+d) \lambda + ad - bc = 0 and its discriminant is `D = (a-d)^{2} + 4bc`. There are several cases 1. Case when `ad - bc \neq 0`. The origin of coordinates, `x = y = 0`, is the only stationary point; it is - a node if `D = 0` - a node if `D > 0` and `ad - bc > 0` - a saddle if `D > 0` and `ad - bc < 0` - a focus if `D < 0` and `a + d \neq 0` - a centre if `D < 0` and `a + d \neq 0`. 1.1. If `D > 0`. The characteristic equation has two distinct real roots `\lambda_1` and `\lambda_ 2` . The general solution of the system in question is expressed as .. math:: x = C_1 b e^{\lambda_1 t} + C_2 b e^{\lambda_2 t} .. math:: y = C_1 (\lambda_1 - a) e^{\lambda_1 t} + C_2 (\lambda_2 - a) e^{\lambda_2 t} where `C_1` and `C_2` being arbitary constants 1.2. If `D < 0`. The characteristics equation has two conjugate roots, `\lambda_1 = \sigma + i \beta` and `\lambda_2 = \sigma - i \beta`. The general solution of the system is given by .. math:: x = b e^{\sigma t} (C_1 \sin(\beta t) + C_2 \cos(\beta t)) .. math:: y = e^{\sigma t} ([(\sigma - a) C_1 - \beta C_2] \sin(\beta t) + [\beta C_1 + (\sigma - a) C_2 \cos(\beta t)]) 1.3. If `D = 0` and `a \neq d`. The characteristic equation has two equal roots, `\lambda_1 = \lambda_2`. The general solution of the system is written as .. math:: x = 2b (C_1 + \frac{C_2}{a-d} + C_2 t) e^{\frac{a+d}{2} t} .. math:: y = [(d - a) C_1 + C_2 + (d - a) C_2 t] e^{\frac{a+d}{2} t} 1.4. If `D = 0` and `a = d \neq 0` and `b = 0` .. math:: x = C_1 e^{a t} , y = (c C_1 t + C_2) e^{a t} 1.5. If `D = 0` and `a = d \neq 0` and `c = 0` .. math:: x = (b C_1 t + C_2) e^{a t} , y = C_1 e^{a t} 2. Case when `ad - bc = 0` and `a^{2} + b^{2} > 0`. The whole straight line `ax + by = 0` consists of singular points. The orginal system of differential equaitons can be rewritten as .. math:: x' = ax + by , y' = k (ax + by) 2.1 If `a + bk \neq 0`, solution will be .. math:: x = b C_1 + C_2 e^{(a + bk) t} , y = -a C_1 + k C_2 e^{(a + bk) t} 2.2 If `a + bk = 0`, solution will be .. math:: x = C_1 (bk t - 1) + b C_2 t , y = k^{2} b C_1 t + (b k^{2} t + 1) C_2 """ # FIXME: at least some of these can fail to give two linearly # independent solutions e.g., because they make assumptions about # zero/nonzero of certain coefficients. I've "fixed" one and # raised NotImplementedError in another. I think this should probably # just be re-written in terms of eigenvectors... l = Dummy('l') C1, C2, C3, C4 = get_numbered_constants(eq, num=4) l1 = rootof(l**2 - (r['a']+r['d'])*l + r['a']*r['d'] - r['b']*r['c'], l, 0) l2 = rootof(l**2 - (r['a']+r['d'])*l + r['a']*r['d'] - r['b']*r['c'], l, 1) D = (r['a'] - r['d'])**2 + 4*r['b']*r['c'] if (r['a']*r['d'] - r['b']*r['c']) != 0: if D > 0: if r['b'].is_zero: # tempting to use this in all cases, but does not guarantee linearly independent eigenvectors gsol1 = C1*(l1 - r['d'] + r['b'])*exp(l1*t) + C2*(l2 - r['d'] + r['b'])*exp(l2*t) gsol2 = C1*(l1 - r['a'] + r['c'])*exp(l1*t) + C2*(l2 - r['a'] + r['c'])*exp(l2*t) else: gsol1 = C1*r['b']*exp(l1*t) + C2*r['b']*exp(l2*t) gsol2 = C1*(l1 - r['a'])*exp(l1*t) + C2*(l2 - r['a'])*exp(l2*t) if D < 0: sigma = re(l1) if im(l1).is_positive: beta = im(l1) else: beta = im(l2) if r['b'].is_zero: raise NotImplementedError('b == 0 case not implemented') gsol1 = r['b']*exp(sigma*t)*(C1*sin(beta*t)+C2*cos(beta*t)) gsol2 = exp(sigma*t)*(((C1*(sigma-r['a'])-C2*beta)*sin(beta*t)+(C1*beta+(sigma-r['a'])*C2)*cos(beta*t))) if D == 0: if r['a']!=r['d']: gsol1 = 2*r['b']*(C1 + C2/(r['a']-r['d'])+C2*t)*exp((r['a']+r['d'])*t/2) gsol2 = ((r['d']-r['a'])*C1+C2+(r['d']-r['a'])*C2*t)*exp((r['a']+r['d'])*t/2) if r['a']==r['d'] and r['a']!=0 and r['b']==0: gsol1 = C1*exp(r['a']*t) gsol2 = (r['c']*C1*t+C2)*exp(r['a']*t) if r['a']==r['d'] and r['a']!=0 and r['c']==0: gsol1 = (r['b']*C1*t+C2)*exp(r['a']*t) gsol2 = C1*exp(r['a']*t) elif (r['a']*r['d'] - r['b']*r['c']) == 0 and (r['a']**2+r['b']**2) > 0: k = r['c']/r['a'] if r['a']+r['b']*k != 0: gsol1 = r['b']*C1 + C2*exp((r['a']+r['b']*k)*t) gsol2 = -r['a']*C1 + k*C2*exp((r['a']+r['b']*k)*t) else: gsol1 = C1*(r['b']*k*t-1)+r['b']*C2*t gsol2 = k**2*r['b']*C1*t+(r['b']*k**2*t+1)*C2 return [Eq(x(t), gsol1), Eq(y(t), gsol2)] def _linear_2eq_order1_type2(x, y, t, r, eq): r""" The equations of this type are .. math:: x' = ax + by + k1 , y' = cx + dy + k2 The general solution of this system is given by sum of its particular solution and the general solution of the corresponding homogeneous system is obtained from type1. 1. When `ad - bc \neq 0`. The particular solution will be `x = x_0` and `y = y_0` where `x_0` and `y_0` are determined by solving linear system of equations .. math:: a x_0 + b y_0 + k1 = 0 , c x_0 + d y_0 + k2 = 0 2. When `ad - bc = 0` and `a^{2} + b^{2} > 0`. In this case, the system of equation becomes .. math:: x' = ax + by + k_1 , y' = k (ax + by) + k_2 2.1 If `\sigma = a + bk \neq 0`, particular solution is given by .. math:: x = b \sigma^{-1} (c_1 k - c_2) t - \sigma^{-2} (a c_1 + b c_2) .. math:: y = kx + (c_2 - c_1 k) t 2.2 If `\sigma = a + bk = 0`, particular solution is given by .. math:: x = \frac{1}{2} b (c_2 - c_1 k) t^{2} + c_1 t .. math:: y = kx + (c_2 - c_1 k) t """ r['k1'] = -r['k1']; r['k2'] = -r['k2'] if (r['a']*r['d'] - r['b']*r['c']) != 0: x0, y0 = symbols('x0, y0', cls=Dummy) sol = solve((r['a']*x0+r['b']*y0+r['k1'], r['c']*x0+r['d']*y0+r['k2']), x0, y0) psol = [sol[x0], sol[y0]] elif (r['a']*r['d'] - r['b']*r['c']) == 0 and (r['a']**2+r['b']**2) > 0: k = r['c']/r['a'] sigma = r['a'] + r['b']*k if sigma != 0: sol1 = r['b']*sigma**-1*(r['k1']*k-r['k2'])*t - sigma**-2*(r['a']*r['k1']+r['b']*r['k2']) sol2 = k*sol1 + (r['k2']-r['k1']*k)*t else: # FIXME: a previous typo fix shows this is not covered by tests sol1 = r['b']*(r['k2']-r['k1']*k)*t**2 + r['k1']*t sol2 = k*sol1 + (r['k2']-r['k1']*k)*t psol = [sol1, sol2] return psol def _linear_2eq_order1_type3(x, y, t, r, eq): r""" The equations of this type of ode are .. math:: x' = f(t) x + g(t) y .. math:: y' = g(t) x + f(t) y The solution of such equations is given by .. math:: x = e^{F} (C_1 e^{G} + C_2 e^{-G}) , y = e^{F} (C_1 e^{G} - C_2 e^{-G}) where `C_1` and `C_2` are arbitary constants, and .. math:: F = \int f(t) \,dt , G = \int g(t) \,dt """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) F = Integral(r['a'], t) G = Integral(r['b'], t) sol1 = exp(F)*(C1*exp(G) + C2*exp(-G)) sol2 = exp(F)*(C1*exp(G) - C2*exp(-G)) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order1_type4(x, y, t, r, eq): r""" The equations of this type of ode are . .. math:: x' = f(t) x + g(t) y .. math:: y' = -g(t) x + f(t) y The solution is given by .. math:: x = F (C_1 \cos(G) + C_2 \sin(G)), y = F (-C_1 \sin(G) + C_2 \cos(G)) where `C_1` and `C_2` are arbitary constants, and .. math:: F = \int f(t) \,dt , G = \int g(t) \,dt """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) if r['b'] == -r['c']: F = exp(Integral(r['a'], t)) G = Integral(r['b'], t) sol1 = F*(C1*cos(G) + C2*sin(G)) sol2 = F*(-C1*sin(G) + C2*cos(G)) elif r['d'] == -r['a']: F = exp(Integral(r['c'], t)) G = Integral(r['d'], t) sol1 = F*(-C1*sin(G) + C2*cos(G)) sol2 = F*(C1*cos(G) + C2*sin(G)) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order1_type5(x, y, t, r, eq): r""" The equations of this type of ode are . .. math:: x' = f(t) x + g(t) y .. math:: y' = a g(t) x + [f(t) + b g(t)] y The transformation of .. math:: x = e^{\int f(t) \,dt} u , y = e^{\int f(t) \,dt} v , T = \int g(t) \,dt leads to a system of constant coefficient linear differential equations .. math:: u'(T) = v , v'(T) = au + bv """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) u, v = symbols('u, v', function=True) T = Symbol('T') if not cancel(r['c']/r['b']).has(t): p = cancel(r['c']/r['b']) q = cancel((r['d']-r['a'])/r['b']) eq = (Eq(diff(u(T),T), v(T)), Eq(diff(v(T),T), p*u(T)+q*v(T))) sol = dsolve(eq) sol1 = exp(Integral(r['a'], t))*sol[0].rhs.subs(T, Integral(r['b'],t)) sol2 = exp(Integral(r['a'], t))*sol[1].rhs.subs(T, Integral(r['b'],t)) if not cancel(r['a']/r['d']).has(t): p = cancel(r['a']/r['d']) q = cancel((r['b']-r['c'])/r['d']) sol = dsolve(Eq(diff(u(T),T), v(T)), Eq(diff(v(T),T), p*u(T)+q*v(T))) sol1 = exp(Integral(r['c'], t))*sol[1].rhs.subs(T, Integral(r['d'],t)) sol2 = exp(Integral(r['c'], t))*sol[0].rhs.subs(T, Integral(r['d'],t)) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order1_type6(x, y, t, r, eq): r""" The equations of this type of ode are . .. math:: x' = f(t) x + g(t) y .. math:: y' = a [f(t) + a h(t)] x + a [g(t) - h(t)] y This is solved by first multiplying the first equation by `-a` and adding it to the second equation to obtain .. math:: y' - a x' = -a h(t) (y - a x) Setting `U = y - ax` and integrating the equation we arrive at .. math:: y - ax = C_1 e^{-a \int h(t) \,dt} and on substituing the value of y in first equation give rise to first order ODEs. After solving for `x`, we can obtain `y` by substituting the value of `x` in second equation. """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) p = 0 q = 0 p1 = cancel(r['c']/cancel(r['c']/r['d']).as_numer_denom()[0]) p2 = cancel(r['a']/cancel(r['a']/r['b']).as_numer_denom()[0]) for n, i in enumerate([p1, p2]): for j in Mul.make_args(collect_const(i)): if not j.has(t): q = j if q!=0 and n==0: if ((r['c']/j - r['a'])/(r['b'] - r['d']/j)) == j: p = 1 s = j break if q!=0 and n==1: if ((r['a']/j - r['c'])/(r['d'] - r['b']/j)) == j: p = 2 s = j break if p == 1: equ = diff(x(t),t) - r['a']*x(t) - r['b']*(s*x(t) + C1*exp(-s*Integral(r['b'] - r['d']/s, t))) hint1 = classify_ode(equ)[1] sol1 = dsolve(equ, hint=hint1+'_Integral').rhs sol2 = s*sol1 + C1*exp(-s*Integral(r['b'] - r['d']/s, t)) elif p ==2: equ = diff(y(t),t) - r['c']*y(t) - r['d']*s*y(t) + C1*exp(-s*Integral(r['d'] - r['b']/s, t)) hint1 = classify_ode(equ)[1] sol2 = dsolve(equ, hint=hint1+'_Integral').rhs sol1 = s*sol2 + C1*exp(-s*Integral(r['d'] - r['b']/s, t)) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order1_type7(x, y, t, r, eq): r""" The equations of this type of ode are . .. math:: x' = f(t) x + g(t) y .. math:: y' = h(t) x + p(t) y Differentiating the first equation and substituting the value of `y` from second equation will give a second-order linear equation .. math:: g x'' - (fg + gp + g') x' + (fgp - g^{2} h + f g' - f' g) x = 0 This above equation can be easily integrated if following conditions are satisfied. 1. `fgp - g^{2} h + f g' - f' g = 0` 2. `fgp - g^{2} h + f g' - f' g = ag, fg + gp + g' = bg` If first condition is satisfied then it is solved by current dsolve solver and in second case it becomes a constant cofficient differential equation which is also solved by current solver. Otherwise if the above condition fails then, a particular solution is assumed as `x = x_0(t)` and `y = y_0(t)` Then the general solution is expressed as .. math:: x = C_1 x_0(t) + C_2 x_0(t) \int \frac{g(t) F(t) P(t)}{x_0^{2}(t)} \,dt .. math:: y = C_1 y_0(t) + C_2 [\frac{F(t) P(t)}{x_0(t)} + y_0(t) \int \frac{g(t) F(t) P(t)}{x_0^{2}(t)} \,dt] where C1 and C2 are arbitary constants and .. math:: F(t) = e^{\int f(t) \,dt} , P(t) = e^{\int p(t) \,dt} """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) e1 = r['a']*r['b']*r['c'] - r['b']**2*r['c'] + r['a']*diff(r['b'],t) - diff(r['a'],t)*r['b'] e2 = r['a']*r['c']*r['d'] - r['b']*r['c']**2 + diff(r['c'],t)*r['d'] - r['c']*diff(r['d'],t) m1 = r['a']*r['b'] + r['b']*r['d'] + diff(r['b'],t) m2 = r['a']*r['c'] + r['c']*r['d'] + diff(r['c'],t) if e1 == 0: sol1 = dsolve(r['b']*diff(x(t),t,t) - m1*diff(x(t),t)).rhs sol2 = dsolve(diff(y(t),t) - r['c']*sol1 - r['d']*y(t)).rhs elif e2 == 0: sol2 = dsolve(r['c']*diff(y(t),t,t) - m2*diff(y(t),t)).rhs sol1 = dsolve(diff(x(t),t) - r['a']*x(t) - r['b']*sol2).rhs elif not (e1/r['b']).has(t) and not (m1/r['b']).has(t): sol1 = dsolve(diff(x(t),t,t) - (m1/r['b'])*diff(x(t),t) - (e1/r['b'])*x(t)).rhs sol2 = dsolve(diff(y(t),t) - r['c']*sol1 - r['d']*y(t)).rhs elif not (e2/r['c']).has(t) and not (m2/r['c']).has(t): sol2 = dsolve(diff(y(t),t,t) - (m2/r['c'])*diff(y(t),t) - (e2/r['c'])*y(t)).rhs sol1 = dsolve(diff(x(t),t) - r['a']*x(t) - r['b']*sol2).rhs else: x0, y0 = symbols('x0, y0') #x0 and y0 being particular solutions F = exp(Integral(r['a'],t)) P = exp(Integral(r['d'],t)) sol1 = C1*x0 + C2*x0*Integral(r['b']*F*P/x0**2, t) sol2 = C1*y0 + C2(F*P/x0 + y0*Integral(r['b']*F*P/x0**2, t)) return [Eq(x(t), sol1), Eq(y(t), sol2)] def sysode_linear_2eq_order2(match_): x = match_['func'][0].func y = match_['func'][1].func func = match_['func'] fc = match_['func_coeff'] eq = match_['eq'] C1, C2, C3, C4 = get_numbered_constants(eq, num=4) r = dict() t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] for i in range(2): eqs = [] for terms in Add.make_args(eq[i]): eqs.append(terms/fc[i,func[i],2]) eq[i] = Add(*eqs) # for equations Eq(diff(x(t),t,t), a1*diff(x(t),t)+b1*diff(y(t),t)+c1*x(t)+d1*y(t)+e1) # and Eq(a2*diff(y(t),t,t), a2*diff(x(t),t)+b2*diff(y(t),t)+c2*x(t)+d2*y(t)+e2) r['a1'] = -fc[0,x(t),1]/fc[0,x(t),2] ; r['a2'] = -fc[1,x(t),1]/fc[1,y(t),2] r['b1'] = -fc[0,y(t),1]/fc[0,x(t),2] ; r['b2'] = -fc[1,y(t),1]/fc[1,y(t),2] r['c1'] = -fc[0,x(t),0]/fc[0,x(t),2] ; r['c2'] = -fc[1,x(t),0]/fc[1,y(t),2] r['d1'] = -fc[0,y(t),0]/fc[0,x(t),2] ; r['d2'] = -fc[1,y(t),0]/fc[1,y(t),2] const = [S(0), S(0)] for i in range(2): for j in Add.make_args(eq[i]): if not (j.has(x(t)) or j.has(y(t))): const[i] += j r['e1'] = -const[0] r['e2'] = -const[1] if match_['type_of_equation'] == 'type1': sol = _linear_2eq_order2_type1(x, y, t, r, eq) elif match_['type_of_equation'] == 'type2': gsol = _linear_2eq_order2_type1(x, y, t, r, eq) psol = _linear_2eq_order2_type2(x, y, t, r, eq) sol = [Eq(x(t), gsol[0].rhs+psol[0]), Eq(y(t), gsol[1].rhs+psol[1])] elif match_['type_of_equation'] == 'type3': sol = _linear_2eq_order2_type3(x, y, t, r, eq) elif match_['type_of_equation'] == 'type4': sol = _linear_2eq_order2_type4(x, y, t, r, eq) elif match_['type_of_equation'] == 'type5': sol = _linear_2eq_order2_type5(x, y, t, r, eq) elif match_['type_of_equation'] == 'type6': sol = _linear_2eq_order2_type6(x, y, t, r, eq) elif match_['type_of_equation'] == 'type7': sol = _linear_2eq_order2_type7(x, y, t, r, eq) elif match_['type_of_equation'] == 'type8': sol = _linear_2eq_order2_type8(x, y, t, r, eq) elif match_['type_of_equation'] == 'type9': sol = _linear_2eq_order2_type9(x, y, t, r, eq) elif match_['type_of_equation'] == 'type10': sol = _linear_2eq_order2_type10(x, y, t, r, eq) elif match_['type_of_equation'] == 'type11': sol = _linear_2eq_order2_type11(x, y, t, r, eq) return sol def _linear_2eq_order2_type1(x, y, t, r, eq): r""" System of two constant-coefficient second-order linear homogeneous differential equations .. math:: x'' = ax + by .. math:: y'' = cx + dy The charecteristic equation for above equations .. math:: \lambda^4 - (a + d) \lambda^2 + ad - bc = 0 whose discriminant is `D = (a - d)^2 + 4bc \neq 0` 1. When `ad - bc \neq 0` 1.1. If `D \neq 0`. The characteristic equation has four distict roots, `\lambda_1, \lambda_2, \lambda_3, \lambda_4`. The general solution of the system is .. math:: x = C_1 b e^{\lambda_1 t} + C_2 b e^{\lambda_2 t} + C_3 b e^{\lambda_3 t} + C_4 b e^{\lambda_4 t} .. math:: y = C_1 (\lambda_1^{2} - a) e^{\lambda_1 t} + C_2 (\lambda_2^{2} - a) e^{\lambda_2 t} + C_3 (\lambda_3^{2} - a) e^{\lambda_3 t} + C_4 (\lambda_4^{2} - a) e^{\lambda_4 t} where `C_1,..., C_4` are arbitary constants. 1.2. If `D = 0` and `a \neq d`: .. math:: x = 2 C_1 (bt + \frac{2bk}{a - d}) e^{\frac{kt}{2}} + 2 C_2 (bt + \frac{2bk}{a - d}) e^{\frac{-kt}{2}} + 2b C_3 t e^{\frac{kt}{2}} + 2b C_4 t e^{\frac{-kt}{2}} .. math:: y = C_1 (d - a) t e^{\frac{kt}{2}} + C_2 (d - a) t e^{\frac{-kt}{2}} + C_3 [(d - a) t + 2k] e^{\frac{kt}{2}} + C_4 [(d - a) t - 2k] e^{\frac{-kt}{2}} where `C_1,..., C_4` are arbitary constants and `k = \sqrt{2 (a + d)}` 1.3. If `D = 0` and `a = d \neq 0` and `b = 0`: .. math:: x = 2 \sqrt{a} C_1 e^{\sqrt{a} t} + 2 \sqrt{a} C_2 e^{-\sqrt{a} t} .. math:: y = c C_1 t e^{\sqrt{a} t} - c C_2 t e^{-\sqrt{a} t} + C_3 e^{\sqrt{a} t} + C_4 e^{-\sqrt{a} t} 1.4. If `D = 0` and `a = d \neq 0` and `c = 0`: .. math:: x = b C_1 t e^{\sqrt{a} t} - b C_2 t e^{-\sqrt{a} t} + C_3 e^{\sqrt{a} t} + C_4 e^{-\sqrt{a} t} .. math:: y = 2 \sqrt{a} C_1 e^{\sqrt{a} t} + 2 \sqrt{a} C_2 e^{-\sqrt{a} t} 2. When `ad - bc = 0` and `a^2 + b^2 > 0`. Then the original system becomes .. math:: x'' = ax + by .. math:: y'' = k (ax + by) 2.1. If `a + bk \neq 0`: .. math:: x = C_1 e^{t \sqrt{a + bk}} + C_2 e^{-t \sqrt{a + bk}} + C_3 bt + C_4 b .. math:: y = C_1 k e^{t \sqrt{a + bk}} + C_2 k e^{-t \sqrt{a + bk}} - C_3 at - C_4 a 2.2. If `a + bk = 0`: .. math:: x = C_1 b t^3 + C_2 b t^2 + C_3 t + C_4 .. math:: y = kx + 6 C_1 t + 2 C_2 """ r['a'] = r['c1'] r['b'] = r['d1'] r['c'] = r['c2'] r['d'] = r['d2'] l = Symbol('l') C1, C2, C3, C4 = get_numbered_constants(eq, num=4) chara_eq = l**4 - (r['a']+r['d'])*l**2 + r['a']*r['d'] - r['b']*r['c'] l1 = rootof(chara_eq, 0) l2 = rootof(chara_eq, 1) l3 = rootof(chara_eq, 2) l4 = rootof(chara_eq, 3) D = (r['a'] - r['d'])**2 + 4*r['b']*r['c'] if (r['a']*r['d'] - r['b']*r['c']) != 0: if D != 0: gsol1 = C1*r['b']*exp(l1*t) + C2*r['b']*exp(l2*t) + C3*r['b']*exp(l3*t) \ + C4*r['b']*exp(l4*t) gsol2 = C1*(l1**2-r['a'])*exp(l1*t) + C2*(l2**2-r['a'])*exp(l2*t) + \ C3*(l3**2-r['a'])*exp(l3*t) + C4*(l4**2-r['a'])*exp(l4*t) else: if r['a'] != r['d']: k = sqrt(2*(r['a']+r['d'])) mid = r['b']*t+2*r['b']*k/(r['a']-r['d']) gsol1 = 2*C1*mid*exp(k*t/2) + 2*C2*mid*exp(-k*t/2) + \ 2*r['b']*C3*t*exp(k*t/2) + 2*r['b']*C4*t*exp(-k*t/2) gsol2 = C1*(r['d']-r['a'])*t*exp(k*t/2) + C2*(r['d']-r['a'])*t*exp(-k*t/2) + \ C3*((r['d']-r['a'])*t+2*k)*exp(k*t/2) + C4*((r['d']-r['a'])*t-2*k)*exp(-k*t/2) elif r['a'] == r['d'] != 0 and r['b'] == 0: sa = sqrt(r['a']) gsol1 = 2*sa*C1*exp(sa*t) + 2*sa*C2*exp(-sa*t) gsol2 = r['c']*C1*t*exp(sa*t)-r['c']*C2*t*exp(-sa*t)+C3*exp(sa*t)+C4*exp(-sa*t) elif r['a'] == r['d'] != 0 and r['c'] == 0: sa = sqrt(r['a']) gsol1 = r['b']*C1*t*exp(sa*t)-r['b']*C2*t*exp(-sa*t)+C3*exp(sa*t)+C4*exp(-sa*t) gsol2 = 2*sa*C1*exp(sa*t) + 2*sa*C2*exp(-sa*t) elif (r['a']*r['d'] - r['b']*r['c']) == 0 and (r['a']**2 + r['b']**2) > 0: k = r['c']/r['a'] if r['a'] + r['b']*k != 0: mid = sqrt(r['a'] + r['b']*k) gsol1 = C1*exp(mid*t) + C2*exp(-mid*t) + C3*r['b']*t + C4*r['b'] gsol2 = C1*k*exp(mid*t) + C2*k*exp(-mid*t) - C3*r['a']*t - C4*r['a'] else: gsol1 = C1*r['b']*t**3 + C2*r['b']*t**2 + C3*t + C4 gsol2 = k*gsol1 + 6*C1*t + 2*C2 return [Eq(x(t), gsol1), Eq(y(t), gsol2)] def _linear_2eq_order2_type2(x, y, t, r, eq): r""" The equations in this type are .. math:: x'' = a_1 x + b_1 y + c_1 .. math:: y'' = a_2 x + b_2 y + c_2 The general solution of this system is given by the sum of its particular solution and the general solution of the homogeneous system. The general solution is given by the linear system of 2 equation of order 2 and type 1 1. If `a_1 b_2 - a_2 b_1 \neq 0`. A particular solution will be `x = x_0` and `y = y_0` where the constants `x_0` and `y_0` are determined by solving the linear algebraic system .. math:: a_1 x_0 + b_1 y_0 + c_1 = 0, a_2 x_0 + b_2 y_0 + c_2 = 0 2. If `a_1 b_2 - a_2 b_1 = 0` and `a_1^2 + b_1^2 > 0`. In this case, the system in question becomes .. math:: x'' = ax + by + c_1, y'' = k (ax + by) + c_2 2.1. If `\sigma = a + bk \neq 0`, the particular solution will be .. math:: x = \frac{1}{2} b \sigma^{-1} (c_1 k - c_2) t^2 - \sigma^{-2} (a c_1 + b c_2) .. math:: y = kx + \frac{1}{2} (c_2 - c_1 k) t^2 2.2. If `\sigma = a + bk = 0`, the particular solution will be .. math:: x = \frac{1}{24} b (c_2 - c_1 k) t^4 + \frac{1}{2} c_1 t^2 .. math:: y = kx + \frac{1}{2} (c_2 - c_1 k) t^2 """ x0, y0 = symbols('x0, y0') if r['c1']*r['d2'] - r['c2']*r['d1'] != 0: sol = solve((r['c1']*x0+r['d1']*y0+r['e1'], r['c2']*x0+r['d2']*y0+r['e2']), x0, y0) psol = [sol[x0], sol[y0]] elif r['c1']*r['d2'] - r['c2']*r['d1'] == 0 and (r['c1']**2 + r['d1']**2) > 0: k = r['c2']/r['c1'] sig = r['c1'] + r['d1']*k if sig != 0: psol1 = r['d1']*sig**-1*(r['e1']*k-r['e2'])*t**2/2 - \ sig**-2*(r['c1']*r['e1']+r['d1']*r['e2']) psol2 = k*psol1 + (r['e2'] - r['e1']*k)*t**2/2 psol = [psol1, psol2] else: psol1 = r['d1']*(r['e2']-r['e1']*k)*t**4/24 + r['e1']*t**2/2 psol2 = k*psol1 + (r['e2']-r['e1']*k)*t**2/2 psol = [psol1, psol2] return psol def _linear_2eq_order2_type3(x, y, t, r, eq): r""" These type of equation is used for describing the horizontal motion of a pendulum taking into account the Earth rotation. The solution is given with `a^2 + 4b > 0`: .. math:: x = C_1 \cos(\alpha t) + C_2 \sin(\alpha t) + C_3 \cos(\beta t) + C_4 \sin(\beta t) .. math:: y = -C_1 \sin(\alpha t) + C_2 \cos(\alpha t) - C_3 \sin(\beta t) + C_4 \cos(\beta t) where `C_1,...,C_4` and .. math:: \alpha = \frac{1}{2} a + \frac{1}{2} \sqrt{a^2 + 4b}, \beta = \frac{1}{2} a - \frac{1}{2} \sqrt{a^2 + 4b} """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) if r['b1']**2 - 4*r['c1'] > 0: r['a'] = r['b1'] ; r['b'] = -r['c1'] alpha = r['a']/2 + sqrt(r['a']**2 + 4*r['b'])/2 beta = r['a']/2 - sqrt(r['a']**2 + 4*r['b'])/2 sol1 = C1*cos(alpha*t) + C2*sin(alpha*t) + C3*cos(beta*t) + C4*sin(beta*t) sol2 = -C1*sin(alpha*t) + C2*cos(alpha*t) - C3*sin(beta*t) + C4*cos(beta*t) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order2_type4(x, y, t, r, eq): r""" These equations are found in the theory of oscillations .. math:: x'' + a_1 x' + b_1 y' + c_1 x + d_1 y = k_1 e^{i \omega t} .. math:: y'' + a_2 x' + b_2 y' + c_2 x + d_2 y = k_2 e^{i \omega t} The general solution of this linear nonhomogeneous system of constant-coefficient differential equations is given by the sum of its particular solution and the general solution of the corresponding homogeneous system (with `k_1 = k_2 = 0`) 1. A particular solution is obtained by the method of undetermined coefficients: .. math:: x = A_* e^{i \omega t}, y = B_* e^{i \omega t} On substituting these expressions into the original system of differential equations, one arrive at a linear nonhomogeneous system of algebraic equations for the coefficients `A` and `B`. 2. The general solution of the homogeneous system of differential equations is determined by a linear combination of linearly independent particular solutions determined by the method of undetermined coefficients in the form of exponentials: .. math:: x = A e^{\lambda t}, y = B e^{\lambda t} On substituting these expressions into the original system and colleting the coefficients of the unknown `A` and `B`, one obtains .. math:: (\lambda^{2} + a_1 \lambda + c_1) A + (b_1 \lambda + d_1) B = 0 .. math:: (a_2 \lambda + c_2) A + (\lambda^{2} + b_2 \lambda + d_2) B = 0 The determinant of this system must vanish for nontrivial solutions A, B to exist. This requirement results in the following characteristic equation for `\lambda` .. math:: (\lambda^2 + a_1 \lambda + c_1) (\lambda^2 + b_2 \lambda + d_2) - (b_1 \lambda + d_1) (a_2 \lambda + c_2) = 0 If all roots `k_1,...,k_4` of this equation are distict, the general solution of the original system of the differential equations has the form .. math:: x = C_1 (b_1 \lambda_1 + d_1) e^{\lambda_1 t} - C_2 (b_1 \lambda_2 + d_1) e^{\lambda_2 t} - C_3 (b_1 \lambda_3 + d_1) e^{\lambda_3 t} - C_4 (b_1 \lambda_4 + d_1) e^{\lambda_4 t} .. math:: y = C_1 (\lambda_1^{2} + a_1 \lambda_1 + c_1) e^{\lambda_1 t} + C_2 (\lambda_2^{2} + a_1 \lambda_2 + c_1) e^{\lambda_2 t} + C_3 (\lambda_3^{2} + a_1 \lambda_3 + c_1) e^{\lambda_3 t} + C_4 (\lambda_4^{2} + a_1 \lambda_4 + c_1) e^{\lambda_4 t} """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) k = Symbol('k') Ra, Ca, Rb, Cb = symbols('Ra, Ca, Rb, Cb') a1 = r['a1'] ; a2 = r['a2'] b1 = r['b1'] ; b2 = r['b2'] c1 = r['c1'] ; c2 = r['c2'] d1 = r['d1'] ; d2 = r['d2'] k1 = r['e1'].expand().as_independent(t)[0] k2 = r['e2'].expand().as_independent(t)[0] ew1 = r['e1'].expand().as_independent(t)[1] ew2 = powdenest(ew1).as_base_exp()[1] ew3 = collect(ew2, t).coeff(t) w = cancel(ew3/I) # The particular solution is assumed to be (Ra+I*Ca)*exp(I*w*t) and # (Rb+I*Cb)*exp(I*w*t) for x(t) and y(t) respectively peq1 = (-w**2+c1)*Ra - a1*w*Ca + d1*Rb - b1*w*Cb - k1 peq2 = a1*w*Ra + (-w**2+c1)*Ca + b1*w*Rb + d1*Cb peq3 = c2*Ra - a2*w*Ca + (-w**2+d2)*Rb - b2*w*Cb - k2 peq4 = a2*w*Ra + c2*Ca + b2*w*Rb + (-w**2+d2)*Cb # FIXME: solve for what in what? Ra, Rb, etc I guess # but then psol not used for anything? psol = solve([peq1, peq2, peq3, peq4]) chareq = (k**2+a1*k+c1)*(k**2+b2*k+d2) - (b1*k+d1)*(a2*k+c2) [k1, k2, k3, k4] = roots_quartic(Poly(chareq)) sol1 = -C1*(b1*k1+d1)*exp(k1*t) - C2*(b1*k2+d1)*exp(k2*t) - \ C3*(b1*k3+d1)*exp(k3*t) - C4*(b1*k4+d1)*exp(k4*t) + (Ra+I*Ca)*exp(I*w*t) a1_ = (a1-1) sol2 = C1*(k1**2+a1_*k1+c1)*exp(k1*t) + C2*(k2**2+a1_*k2+c1)*exp(k2*t) + \ C3*(k3**2+a1_*k3+c1)*exp(k3*t) + C4*(k4**2+a1_*k4+c1)*exp(k4*t) + (Rb+I*Cb)*exp(I*w*t) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order2_type5(x, y, t, r, eq): r""" The equation which come under this catagory are .. math:: x'' = a (t y' - y) .. math:: y'' = b (t x' - x) The transformation .. math:: u = t x' - x, b = t y' - y leads to the first-order system .. math:: u' = atv, v' = btu The general solution of this system is given by If `ab > 0`: .. math:: u = C_1 a e^{\frac{1}{2} \sqrt{ab} t^2} + C_2 a e^{-\frac{1}{2} \sqrt{ab} t^2} .. math:: v = C_1 \sqrt{ab} e^{\frac{1}{2} \sqrt{ab} t^2} - C_2 \sqrt{ab} e^{-\frac{1}{2} \sqrt{ab} t^2} If `ab < 0`: .. math:: u = C_1 a \cos(\frac{1}{2} \sqrt{\left|ab\right|} t^2) + C_2 a \sin(-\frac{1}{2} \sqrt{\left|ab\right|} t^2) .. math:: v = C_1 \sqrt{\left|ab\right|} \sin(\frac{1}{2} \sqrt{\left|ab\right|} t^2) + C_2 \sqrt{\left|ab\right|} \cos(-\frac{1}{2} \sqrt{\left|ab\right|} t^2) where `C_1` and `C_2` are arbitary constants. On substituting the value of `u` and `v` in above equations and integrating the resulting expressions, the general solution will become .. math:: x = C_3 t + t \int \frac{u}{t^2} \,dt, y = C_4 t + t \int \frac{u}{t^2} \,dt where `C_3` and `C_4` are arbitrary constants. """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) r['a'] = -r['d1'] ; r['b'] = -r['c2'] mul = sqrt(abs(r['a']*r['b'])) if r['a']*r['b'] > 0: u = C1*r['a']*exp(mul*t**2/2) + C2*r['a']*exp(-mul*t**2/2) v = C1*mul*exp(mul*t**2/2) - C2*mul*exp(-mul*t**2/2) else: u = C1*r['a']*cos(mul*t**2/2) + C2*r['a']*sin(mul*t**2/2) v = -C1*mul*sin(mul*t**2/2) + C2*mul*cos(mul*t**2/2) sol1 = C3*t + t*Integral(u/t**2, t) sol2 = C4*t + t*Integral(v/t**2, t) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order2_type6(x, y, t, r, eq): r""" The equations are .. math:: x'' = f(t) (a_1 x + b_1 y) .. math:: y'' = f(t) (a_2 x + b_2 y) If `k_1` and `k_2` are roots of the quadratic equation .. math:: k^2 - (a_1 + b_2) k + a_1 b_2 - a_2 b_1 = 0 Then by multiplying appropriate constants and adding together original equations we obtain two independent equations: .. math:: z_1'' = k_1 f(t) z_1, z_1 = a_2 x + (k_1 - a_1) y .. math:: z_2'' = k_2 f(t) z_2, z_2 = a_2 x + (k_2 - a_1) y Solving the equations will give the values of `x` and `y` after obtaining the value of `z_1` and `z_2` by solving the differential equation and substuting the result. """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) k = Symbol('k') z = Function('z') num, den = cancel( (r['c1']*x(t) + r['d1']*y(t))/ (r['c2']*x(t) + r['d2']*y(t))).as_numer_denom() f = r['c1']/num.coeff(x(t)) a1 = num.coeff(x(t)) b1 = num.coeff(y(t)) a2 = den.coeff(x(t)) b2 = den.coeff(y(t)) chareq = k**2 - (a1 + b2)*k + a1*b2 - a2*b1 k1, k2 = [rootof(chareq, k) for k in range(Poly(chareq).degree())] z1 = dsolve(diff(z(t),t,t) - k1*f*z(t)).rhs z2 = dsolve(diff(z(t),t,t) - k2*f*z(t)).rhs sol1 = (k1*z2 - k2*z1 + a1*(z1 - z2))/(a2*(k1-k2)) sol2 = (z1 - z2)/(k1 - k2) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order2_type7(x, y, t, r, eq): r""" The equations are given as .. math:: x'' = f(t) (a_1 x' + b_1 y') .. math:: y'' = f(t) (a_2 x' + b_2 y') If `k_1` and 'k_2` are roots of the quadratic equation .. math:: k^2 - (a_1 + b_2) k + a_1 b_2 - a_2 b_1 = 0 Then the system can be reduced by adding together the two equations multiplied by appropriate constants give following two independent equations: .. math:: z_1'' = k_1 f(t) z_1', z_1 = a_2 x + (k_1 - a_1) y .. math:: z_2'' = k_2 f(t) z_2', z_2 = a_2 x + (k_2 - a_1) y Integrating these and returning to the original variables, one arrives at a linear algebraic system for the unknowns `x` and `y`: .. math:: a_2 x + (k_1 - a_1) y = C_1 \int e^{k_1 F(t)} \,dt + C_2 .. math:: a_2 x + (k_2 - a_1) y = C_3 \int e^{k_2 F(t)} \,dt + C_4 where `C_1,...,C_4` are arbitrary constants and `F(t) = \int f(t) \,dt` """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) k = Symbol('k') num, den = cancel( (r['a1']*x(t) + r['b1']*y(t))/ (r['a2']*x(t) + r['b2']*y(t))).as_numer_denom() f = r['a1']/num.coeff(x(t)) a1 = num.coeff(x(t)) b1 = num.coeff(y(t)) a2 = den.coeff(x(t)) b2 = den.coeff(y(t)) chareq = k**2 - (a1 + b2)*k + a1*b2 - a2*b1 [k1, k2] = [rootof(chareq, k) for k in range(Poly(chareq).degree())] F = Integral(f, t) z1 = C1*Integral(exp(k1*F), t) + C2 z2 = C3*Integral(exp(k2*F), t) + C4 sol1 = (k1*z2 - k2*z1 + a1*(z1 - z2))/(a2*(k1-k2)) sol2 = (z1 - z2)/(k1 - k2) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order2_type8(x, y, t, r, eq): r""" The equation of this catagory are .. math:: x'' = a f(t) (t y' - y) .. math:: y'' = b f(t) (t x' - x) The transformation .. math:: u = t x' - x, v = t y' - y leads to the system of first-order equations .. math:: u' = a t f(t) v, v' = b t f(t) u The general solution of this system has the form If `ab > 0`: .. math:: u = C_1 a e^{\sqrt{ab} \int t f(t) \,dt} + C_2 a e^{-\sqrt{ab} \int t f(t) \,dt} .. math:: v = C_1 \sqrt{ab} e^{\sqrt{ab} \int t f(t) \,dt} - C_2 \sqrt{ab} e^{-\sqrt{ab} \int t f(t) \,dt} If `ab < 0`: .. math:: u = C_1 a \cos(\sqrt{\left|ab\right|} \int t f(t) \,dt) + C_2 a \sin(-\sqrt{\left|ab\right|} \int t f(t) \,dt) .. math:: v = C_1 \sqrt{\left|ab\right|} \sin(\sqrt{\left|ab\right|} \int t f(t) \,dt) + C_2 \sqrt{\left|ab\right|} \cos(-\sqrt{\left|ab\right|} \int t f(t) \,dt) where `C_1` and `C_2` are arbitary constants. On substituting the value of `u` and `v` in above equations and integrating the resulting expressions, the general solution will become .. math:: x = C_3 t + t \int \frac{u}{t^2} \,dt, y = C_4 t + t \int \frac{u}{t^2} \,dt where `C_3` and `C_4` are arbitrary constants. """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) num, den = cancel(r['d1']/r['c2']).as_numer_denom() f = -r['d1']/num a = num b = den mul = sqrt(abs(a*b)) Igral = Integral(t*f, t) if a*b > 0: u = C1*a*exp(mul*Igral) + C2*a*exp(-mul*Igral) v = C1*mul*exp(mul*Igral) - C2*mul*exp(-mul*Igral) else: u = C1*a*cos(mul*Igral) + C2*a*sin(mul*Igral) v = -C1*mul*sin(mul*Igral) + C2*mul*cos(mul*Igral) sol1 = C3*t + t*Integral(u/t**2, t) sol2 = C4*t + t*Integral(v/t**2, t) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order2_type9(x, y, t, r, eq): r""" .. math:: t^2 x'' + a_1 t x' + b_1 t y' + c_1 x + d_1 y = 0 .. math:: t^2 y'' + a_2 t x' + b_2 t y' + c_2 x + d_2 y = 0 These system of equations are euler type. The substitution of `t = \sigma e^{\tau} (\sigma \neq 0)` leads to the system of constant coefficient linear differential equations .. math:: x'' + (a_1 - 1) x' + b_1 y' + c_1 x + d_1 y = 0 .. math:: y'' + a_2 x' + (b_2 - 1) y' + c_2 x + d_2 y = 0 The general solution of the homogeneous system of differential equations is determined by a linear combination of linearly independent particular solutions determined by the method of undetermined coefficients in the form of exponentials .. math:: x = A e^{\lambda t}, y = B e^{\lambda t} On substituting these expressions into the original system and colleting the coefficients of the unknown `A` and `B`, one obtains .. math:: (\lambda^{2} + (a_1 - 1) \lambda + c_1) A + (b_1 \lambda + d_1) B = 0 .. math:: (a_2 \lambda + c_2) A + (\lambda^{2} + (b_2 - 1) \lambda + d_2) B = 0 The determinant of this system must vanish for nontrivial solutions A, B to exist. This requirement results in the following characteristic equation for `\lambda` .. math:: (\lambda^2 + (a_1 - 1) \lambda + c_1) (\lambda^2 + (b_2 - 1) \lambda + d_2) - (b_1 \lambda + d_1) (a_2 \lambda + c_2) = 0 If all roots `k_1,...,k_4` of this equation are distict, the general solution of the original system of the differential equations has the form .. math:: x = C_1 (b_1 \lambda_1 + d_1) e^{\lambda_1 t} - C_2 (b_1 \lambda_2 + d_1) e^{\lambda_2 t} - C_3 (b_1 \lambda_3 + d_1) e^{\lambda_3 t} - C_4 (b_1 \lambda_4 + d_1) e^{\lambda_4 t} .. math:: y = C_1 (\lambda_1^{2} + (a_1 - 1) \lambda_1 + c_1) e^{\lambda_1 t} + C_2 (\lambda_2^{2} + (a_1 - 1) \lambda_2 + c_1) e^{\lambda_2 t} + C_3 (\lambda_3^{2} + (a_1 - 1) \lambda_3 + c_1) e^{\lambda_3 t} + C_4 (\lambda_4^{2} + (a_1 - 1) \lambda_4 + c_1) e^{\lambda_4 t} """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) k = Symbol('k') a1 = -r['a1']*t; a2 = -r['a2']*t b1 = -r['b1']*t; b2 = -r['b2']*t c1 = -r['c1']*t**2; c2 = -r['c2']*t**2 d1 = -r['d1']*t**2; d2 = -r['d2']*t**2 eq = (k**2+(a1-1)*k+c1)*(k**2+(b2-1)*k+d2)-(b1*k+d1)*(a2*k+c2) [k1, k2, k3, k4] = roots_quartic(Poly(eq)) sol1 = -C1*(b1*k1+d1)*exp(k1*log(t)) - C2*(b1*k2+d1)*exp(k2*log(t)) - \ C3*(b1*k3+d1)*exp(k3*log(t)) - C4*(b1*k4+d1)*exp(k4*log(t)) a1_ = (a1-1) sol2 = C1*(k1**2+a1_*k1+c1)*exp(k1*log(t)) + C2*(k2**2+a1_*k2+c1)*exp(k2*log(t)) \ + C3*(k3**2+a1_*k3+c1)*exp(k3*log(t)) + C4*(k4**2+a1_*k4+c1)*exp(k4*log(t)) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order2_type10(x, y, t, r, eq): r""" The equation of this catagory are .. math:: (\alpha t^2 + \beta t + \gamma)^{2} x'' = ax + by .. math:: (\alpha t^2 + \beta t + \gamma)^{2} y'' = cx + dy The transformation .. math:: \tau = \int \frac{1}{\alpha t^2 + \beta t + \gamma} \,dt , u = \frac{x}{\sqrt{\left|\alpha t^2 + \beta t + \gamma\right|}} , v = \frac{y}{\sqrt{\left|\alpha t^2 + \beta t + \gamma\right|}} leads to a constant coefficient linear system of equations .. math:: u'' = (a - \alpha \gamma + \frac{1}{4} \beta^{2}) u + b v .. math:: v'' = c u + (d - \alpha \gamma + \frac{1}{4} \beta^{2}) v These system of equations obtained can be solved by type1 of System of two constant-coefficient second-order linear homogeneous differential equations. """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) u, v = symbols('u, v', function=True) T = Symbol('T') p = Wild('p', exclude=[t, t**2]) q = Wild('q', exclude=[t, t**2]) s = Wild('s', exclude=[t, t**2]) n = Wild('n', exclude=[t, t**2]) num, den = r['c1'].as_numer_denom() dic = den.match((n*(p*t**2+q*t+s)**2).expand()) eqz = dic[p]*t**2 + dic[q]*t + dic[s] a = num/dic[n] b = cancel(r['d1']*eqz**2) c = cancel(r['c2']*eqz**2) d = cancel(r['d2']*eqz**2) [msol1, msol2] = dsolve([Eq(diff(u(t), t, t), (a - dic[p]*dic[s] + dic[q]**2/4)*u(t) \ + b*v(t)), Eq(diff(v(t),t,t), c*u(t) + (d - dic[p]*dic[s] + dic[q]**2/4)*v(t))]) sol1 = (msol1.rhs*sqrt(abs(eqz))).subs(t, Integral(1/eqz, t)) sol2 = (msol2.rhs*sqrt(abs(eqz))).subs(t, Integral(1/eqz, t)) return [Eq(x(t), sol1), Eq(y(t), sol2)] def _linear_2eq_order2_type11(x, y, t, r, eq): r""" The equations which comes under this type are .. math:: x'' = f(t) (t x' - x) + g(t) (t y' - y) .. math:: y'' = h(t) (t x' - x) + p(t) (t y' - y) The transformation .. math:: u = t x' - x, v = t y' - y leads to the linear system of first-order equations .. math:: u' = t f(t) u + t g(t) v, v' = t h(t) u + t p(t) v On substituting the value of `u` and `v` in transformed equation gives value of `x` and `y` as .. math:: x = C_3 t + t \int \frac{u}{t^2} \,dt , y = C_4 t + t \int \frac{v}{t^2} \,dt. where `C_3` and `C_4` are arbitrary constants. """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) u, v = symbols('u, v', function=True) f = -r['c1'] ; g = -r['d1'] h = -r['c2'] ; p = -r['d2'] [msol1, msol2] = dsolve([Eq(diff(u(t),t), t*f*u(t) + t*g*v(t)), Eq(diff(v(t),t), t*h*u(t) + t*p*v(t))]) sol1 = C3*t + t*Integral(msol1.rhs/t**2, t) sol2 = C4*t + t*Integral(msol2.rhs/t**2, t) return [Eq(x(t), sol1), Eq(y(t), sol2)] def sysode_linear_3eq_order1(match_): x = match_['func'][0].func y = match_['func'][1].func z = match_['func'][2].func func = match_['func'] fc = match_['func_coeff'] eq = match_['eq'] C1, C2, C3, C4 = get_numbered_constants(eq, num=4) r = dict() t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] for i in range(3): eqs = 0 for terms in Add.make_args(eq[i]): eqs += terms/fc[i,func[i],1] eq[i] = eqs # for equations: # Eq(g1*diff(x(t),t), a1*x(t)+b1*y(t)+c1*z(t)+d1), # Eq(g2*diff(y(t),t), a2*x(t)+b2*y(t)+c2*z(t)+d2), and # Eq(g3*diff(z(t),t), a3*x(t)+b3*y(t)+c3*z(t)+d3) r['a1'] = fc[0,x(t),0]/fc[0,x(t),1]; r['a2'] = fc[1,x(t),0]/fc[1,y(t),1]; r['a3'] = fc[2,x(t),0]/fc[2,z(t),1] r['b1'] = fc[0,y(t),0]/fc[0,x(t),1]; r['b2'] = fc[1,y(t),0]/fc[1,y(t),1]; r['b3'] = fc[2,y(t),0]/fc[2,z(t),1] r['c1'] = fc[0,z(t),0]/fc[0,x(t),1]; r['c2'] = fc[1,z(t),0]/fc[1,y(t),1]; r['c3'] = fc[2,z(t),0]/fc[2,z(t),1] for i in range(3): for j in Add.make_args(eq[i]): if not j.has(x(t), y(t), z(t)): raise NotImplementedError("Only homogeneous problems are supported, non-homogenous are not supported currently.") if match_['type_of_equation'] == 'type1': sol = _linear_3eq_order1_type1(x, y, z, t, r, eq) if match_['type_of_equation'] == 'type2': sol = _linear_3eq_order1_type2(x, y, z, t, r, eq) if match_['type_of_equation'] == 'type3': sol = _linear_3eq_order1_type3(x, y, z, t, r, eq) if match_['type_of_equation'] == 'type4': sol = _linear_3eq_order1_type4(x, y, z, t, r, eq) if match_['type_of_equation'] == 'type6': sol = _linear_neq_order1_type1(match_) return sol def _linear_3eq_order1_type1(x, y, z, t, r, eq): r""" .. math:: x' = ax .. math:: y' = bx + cy .. math:: z' = dx + ky + pz Solution of such equations are forward substitution. Solving first equations gives the value of `x`, substituting it in second and third equation and solving second equation gives `y` and similarly substituting `y` in third equation give `z`. .. math:: x = C_1 e^{at} .. math:: y = \frac{b C_1}{a - c} e^{at} + C_2 e^{ct} .. math:: z = \frac{C_1}{a - p} (d + \frac{bk}{a - c}) e^{at} + \frac{k C_2}{c - p} e^{ct} + C_3 e^{pt} where `C_1, C_2` and `C_3` are arbitrary constants. """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) a = -r['a1']; b = -r['a2']; c = -r['b2'] d = -r['a3']; k = -r['b3']; p = -r['c3'] sol1 = C1*exp(a*t) sol2 = b*C1*exp(a*t)/(a-c) + C2*exp(c*t) sol3 = C1*(d+b*k/(a-c))*exp(a*t)/(a-p) + k*C2*exp(c*t)/(c-p) + C3*exp(p*t) return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)] def _linear_3eq_order1_type2(x, y, z, t, r, eq): r""" The equations of this type are .. math:: x' = cy - bz .. math:: y' = az - cx .. math:: z' = bx - ay 1. First integral: .. math:: ax + by + cz = A \qquad - (1) .. math:: x^2 + y^2 + z^2 = B^2 \qquad - (2) where `A` and `B` are arbitrary constants. It follows from these integrals that the integral lines are circles formed by the intersection of the planes `(1)` and sphere `(2)` 2. Solution: .. math:: x = a C_0 + k C_1 \cos(kt) + (c C_2 - b C_3) \sin(kt) .. math:: y = b C_0 + k C_2 \cos(kt) + (a C_2 - c C_3) \sin(kt) .. math:: z = c C_0 + k C_3 \cos(kt) + (b C_2 - a C_3) \sin(kt) where `k = \sqrt{a^2 + b^2 + c^2}` and the four constants of integration, `C_1,...,C_4` are constrained by a single relation, .. math:: a C_1 + b C_2 + c C_3 = 0 """ C0, C1, C2, C3 = get_numbered_constants(eq, num=4, start=0) a = -r['c2']; b = -r['a3']; c = -r['b1'] k = sqrt(a**2 + b**2 + c**2) C3 = (-a*C1 - b*C2)/c sol1 = a*C0 + k*C1*cos(k*t) + (c*C2-b*C3)*sin(k*t) sol2 = b*C0 + k*C2*cos(k*t) + (a*C3-c*C1)*sin(k*t) sol3 = c*C0 + k*C3*cos(k*t) + (b*C1-a*C2)*sin(k*t) return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)] def _linear_3eq_order1_type3(x, y, z, t, r, eq): r""" Equations of this system of ODEs .. math:: a x' = bc (y - z) .. math:: b y' = ac (z - x) .. math:: c z' = ab (x - y) 1. First integral: .. math:: a^2 x + b^2 y + c^2 z = A where A is an arbitary constant. It follows that the integral lines are plane curves. 2. Solution: .. math:: x = C_0 + k C_1 \cos(kt) + a^{-1} bc (C_2 - C_3) \sin(kt) .. math:: y = C_0 + k C_2 \cos(kt) + a b^{-1} c (C_3 - C_1) \sin(kt) .. math:: z = C_0 + k C_3 \cos(kt) + ab c^{-1} (C_1 - C_2) \sin(kt) where `k = \sqrt{a^2 + b^2 + c^2}` and the four constants of integration, `C_1,...,C_4` are constrained by a single relation .. math:: a^2 C_1 + b^2 C_2 + c^2 C_3 = 0 """ C0, C1, C2, C3 = get_numbered_constants(eq, num=4, start=0) c = sqrt(r['b1']*r['c2']) b = sqrt(r['b1']*r['a3']) a = sqrt(r['c2']*r['a3']) C3 = (-a**2*C1-b**2*C2)/c**2 k = sqrt(a**2 + b**2 + c**2) sol1 = C0 + k*C1*cos(k*t) + a**-1*b*c*(C2-C3)*sin(k*t) sol2 = C0 + k*C2*cos(k*t) + a*b**-1*c*(C3-C1)*sin(k*t) sol3 = C0 + k*C3*cos(k*t) + a*b*c**-1*(C1-C2)*sin(k*t) return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)] def _linear_3eq_order1_type4(x, y, z, t, r, eq): r""" Equations: .. math:: x' = (a_1 f(t) + g(t)) x + a_2 f(t) y + a_3 f(t) z .. math:: y' = b_1 f(t) x + (b_2 f(t) + g(t)) y + b_3 f(t) z .. math:: z' = c_1 f(t) x + c_2 f(t) y + (c_3 f(t) + g(t)) z The transformation .. math:: x = e^{\int g(t) \,dt} u, y = e^{\int g(t) \,dt} v, z = e^{\int g(t) \,dt} w, \tau = \int f(t) \,dt leads to the system of constant coefficient linear differential equations .. math:: u' = a_1 u + a_2 v + a_3 w .. math:: v' = b_1 u + b_2 v + b_3 w .. math:: w' = c_1 u + c_2 v + c_3 w These system of equations are solved by homogeneous linear system of constant coefficients of `n` equations of first order. Then substituting the value of `u, v` and `w` in transformed equation gives value of `x, y` and `z`. """ u, v, w = symbols('u, v, w', function=True) a2, a3 = cancel(r['b1']/r['c1']).as_numer_denom() f = cancel(r['b1']/a2) b1 = cancel(r['a2']/f); b3 = cancel(r['c2']/f) c1 = cancel(r['a3']/f); c2 = cancel(r['b3']/f) a1, g = div(r['a1'],f) b2 = div(r['b2'],f)[0] c3 = div(r['c3'],f)[0] trans_eq = (diff(u(t),t)-a1*u(t)-a2*v(t)-a3*w(t), diff(v(t),t)-b1*u(t)-\ b2*v(t)-b3*w(t), diff(w(t),t)-c1*u(t)-c2*v(t)-c3*w(t)) sol = dsolve(trans_eq) sol1 = exp(Integral(g,t))*((sol[0].rhs).subs(t, Integral(f,t))) sol2 = exp(Integral(g,t))*((sol[1].rhs).subs(t, Integral(f,t))) sol3 = exp(Integral(g,t))*((sol[2].rhs).subs(t, Integral(f,t))) return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)] def sysode_linear_neq_order1(match_): sol = _linear_neq_order1_type1(match_) def _linear_neq_order1_type1(match_): r""" System of n first-order constant-coefficient linear nonhomogeneous differential equation .. math:: y'_k = a_{k1} y_1 + a_{k2} y_2 +...+ a_{kn} y_n; k = 1,2,...,n or that can be written as `\vec{y'} = A . \vec{y}` where `\vec{y}` is matrix of `y_k` for `k = 1,2,...n` and `A` is a `n \times n` matrix. Since these equations are equivalent to a first order homogeneous linear differential equation. So the general solution will contain `n` linearly independent parts and solution will consist some type of exponential functions. Assuming `y = \vec{v} e^{rt}` is a solution of the system where `\vec{v}` is a vector of coefficients of `y_1,...,y_n`. Substituting `y` and `y' = r v e^{r t}` into the equation `\vec{y'} = A . \vec{y}`, we get .. math:: r \vec{v} e^{rt} = A \vec{v} e^{rt} .. math:: r \vec{v} = A \vec{v} where `r` comes out to be eigenvalue of `A` and vector `\vec{v}` is the eigenvector of `A` corresponding to `r`. There are three possiblities of eigenvalues of `A` - `n` distinct real eigenvalues - complex conjugate eigenvalues - eigenvalues with multiplicity `k` 1. When all eigenvalues `r_1,..,r_n` are distinct with `n` different eigenvectors `v_1,...v_n` then the solution is given by .. math:: \vec{y} = C_1 e^{r_1 t} \vec{v_1} + C_2 e^{r_2 t} \vec{v_2} +...+ C_n e^{r_n t} \vec{v_n} where `C_1,C_2,...,C_n` are arbitrary constants. 2. When some eigenvalues are complex then in order to make the solution real, we take a llinear combination: if `r = a + bi` has an eigenvector `\vec{v} = \vec{w_1} + i \vec{w_2}` then to obtain real-valued solutions to the system, replace the complex-valued solutions `e^{rx} \vec{v}` with real-valued solution `e^{ax} (\vec{w_1} \cos(bx) - \vec{w_2} \sin(bx))` and for `r = a - bi` replace the solution `e^{-r x} \vec{v}` with `e^{ax} (\vec{w_1} \sin(bx) + \vec{w_2} \cos(bx))` 3. If some eigenvalues are repeated. Then we get fewer than `n` linearly independent eigenvectors, we miss some of the solutions and need to construct the missing ones. We do this via generalized eigenvectors, vectors which are not eigenvectors but are close enough that we can use to write down the remaining solutions. For a eigenvalue `r` with eigenvector `\vec{w}` we obtain `\vec{w_2},...,\vec{w_k}` using .. math:: (A - r I) . \vec{w_2} = \vec{w} .. math:: (A - r I) . \vec{w_3} = \vec{w_2} .. math:: \vdots .. math:: (A - r I) . \vec{w_k} = \vec{w_{k-1}} Then the solutions to the system for the eigenspace are `e^{rt} [\vec{w}], e^{rt} [t \vec{w} + \vec{w_2}], e^{rt} [\frac{t^2}{2} \vec{w} + t \vec{w_2} + \vec{w_3}], ...,e^{rt} [\frac{t^{k-1}}{(k-1)!} \vec{w} + \frac{t^{k-2}}{(k-2)!} \vec{w_2} +...+ t \vec{w_{k-1}} + \vec{w_k}]` So, If `\vec{y_1},...,\vec{y_n}` are `n` solution of obtained from three categories of `A`, then general solution to the system `\vec{y'} = A . \vec{y}` .. math:: \vec{y} = C_1 \vec{y_1} + C_2 \vec{y_2} + \cdots + C_n \vec{y_n} """ eq = match_['eq'] func = match_['func'] fc = match_['func_coeff'] n = len(eq) t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] constants = numbered_symbols(prefix='C', cls=Symbol, start=1) M = Matrix(n,n,lambda i,j:-fc[i,func[j],0]) evector = M.eigenvects(simplify=True) def is_complex(mat, root): return Matrix(n, 1, lambda i,j: re(mat[i])*cos(im(root)*t) - im(mat[i])*sin(im(root)*t)) def is_complex_conjugate(mat, root): return Matrix(n, 1, lambda i,j: re(mat[i])*sin(abs(im(root))*t) + im(mat[i])*cos(im(root)*t)*abs(im(root))/im(root)) conjugate_root = [] e_vector = zeros(n,1) for evects in evector: if evects[0] not in conjugate_root: # If number of column of an eigenvector is not equal to the multiplicity # of its eigenvalue then the legt eigenvectors are calculated if len(evects[2])!=evects[1]: var_mat = Matrix(n, 1, lambda i,j: Symbol('x'+str(i))) Mnew = (M - evects[0]*eye(evects[2][-1].rows))*var_mat w = [0 for i in range(evects[1])] w[0] = evects[2][-1] for r in range(1, evects[1]): w_ = Mnew - w[r-1] sol_dict = solve(list(w_), var_mat[1:]) sol_dict[var_mat[0]] = var_mat[0] for key, value in sol_dict.items(): sol_dict[key] = value.subs(var_mat[0],1) w[r] = Matrix(n, 1, lambda i,j: sol_dict[var_mat[i]]) evects[2].append(w[r]) for i in range(evects[1]): C = next(constants) for j in range(i+1): if evects[0].has(I): evects[2][j] = simplify(evects[2][j]) e_vector += C*is_complex(evects[2][j], evects[0])*t**(i-j)*exp(re(evects[0])*t)/factorial(i-j) C = next(constants) e_vector += C*is_complex_conjugate(evects[2][j], evects[0])*t**(i-j)*exp(re(evects[0])*t)/factorial(i-j) else: e_vector += C*evects[2][j]*t**(i-j)*exp(evects[0]*t)/factorial(i-j) if evects[0].has(I): conjugate_root.append(conjugate(evects[0])) sol = [] for i in range(len(eq)): sol.append(Eq(func[i],e_vector[i])) return sol def sysode_nonlinear_2eq_order1(match_): func = match_['func'] eq = match_['eq'] fc = match_['func_coeff'] t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] if match_['type_of_equation'] == 'type5': sol = _nonlinear_2eq_order1_type5(func, t, eq) return sol x = func[0].func y = func[1].func for i in range(2): eqs = 0 for terms in Add.make_args(eq[i]): eqs += terms/fc[i,func[i],1] eq[i] = eqs if match_['type_of_equation'] == 'type1': sol = _nonlinear_2eq_order1_type1(x, y, t, eq) elif match_['type_of_equation'] == 'type2': sol = _nonlinear_2eq_order1_type2(x, y, t, eq) elif match_['type_of_equation'] == 'type3': sol = _nonlinear_2eq_order1_type3(x, y, t, eq) elif match_['type_of_equation'] == 'type4': sol = _nonlinear_2eq_order1_type4(x, y, t, eq) return sol def _nonlinear_2eq_order1_type1(x, y, t, eq): r""" Equations: .. math:: x' = x^n F(x,y) .. math:: y' = g(y) F(x,y) Solution: .. math:: x = \varphi(y), \int \frac{1}{g(y) F(\varphi(y),y)} \,dy = t + C_2 where if `n \neq 1` .. math:: \varphi = [C_1 + (1-n) \int \frac{1}{g(y)} \,dy]^{\frac{1}{1-n}} if `n = 1` .. math:: \varphi = C_1 e^{\int \frac{1}{g(y)} \,dy} where `C_1` and `C_2` are arbitrary constants. """ C1, C2 = get_numbered_constants(eq, num=2) n = Wild('n', exclude=[x(t),y(t)]) f = Wild('f') u, v, phi = symbols('u, v, phi', function=True) r = eq[0].match(diff(x(t),t) - x(t)**n*f) g = ((diff(y(t),t) - eq[1])/r[f]).subs(y(t),v) F = r[f].subs(x(t),u).subs(y(t),v) n = r[n] if n!=1: phi = (C1 + (1-n)*Integral(1/g, v))**(1/(1-n)) else: phi = C1*exp(Integral(1/g, v)) phi = phi.doit() sol2 = solve(Integral(1/(g*F.subs(u,phi)), v).doit() - t - C2, v) sol = [] for sols in sol2: sol.append(Eq(x(t),phi.subs(v, sols))) sol.append(Eq(y(t), sols)) return sol def _nonlinear_2eq_order1_type2(x, y, t, eq): r""" Equations: .. math:: x' = e^{\lambda x} F(x,y) .. math:: y' = g(y) F(x,y) Solution: .. math:: x = \varphi(y), \int \frac{1}{g(y) F(\varphi(y),y)} \,dy = t + C_2 where if `\lambda \neq 0` .. math:: \varphi = -\frac{1}{\lambda} log(C_1 - \lambda \int \frac{1}{g(y)} \,dy) if `\lambda = 0` .. math:: \varphi = C_1 + \int \frac{1}{g(y)} \,dy where `C_1` and `C_2` are arbitrary constants. """ C1, C2 = get_numbered_constants(eq, num=2) n = Wild('n', exclude=[x(t),y(t)]) f = Wild('f') u, v, phi = symbols('u, v, phi', function=True) r = eq[0].match(diff(x(t),t) - exp(n*x(t))*f) g = ((diff(y(t),t) - eq[1])/r[f]).subs(y(t),v) F = r[f].subs(x(t),u).subs(y(t),v) n = r[n] if n: phi = -1/n*log(C1 - n*Integral(1/g, v)) else: phi = C1 + Integral(1/g, v) phi = phi.doit() sol2 = solve(Integral(1/(g*F.subs(u,phi)), v).doit() - t - C2, v) sol = [] for sols in sol2: sol.append(Eq(x(t),phi.subs(v, sols))) sol.append(Eq(y(t), sols)) return sol def _nonlinear_2eq_order1_type3(x, y, t, eq): r""" Autonomous system of general form .. math:: x' = F(x,y) .. math:: y' = G(x,y) Assuming `y = y(x, C_1)` where `C_1` is an arbitrary constant is the general solution of the first-order equation .. math:: F(x,y) y'_x = G(x,y) Then the general solution of the original system of equations has the form .. math:: \int \frac{1}{F(x,y(x,C_1))} \,dx = t + C_1 """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) u, v = symbols('u, v', function=True) f = Wild('f') g = Wild('g') r1 = eq[0].match(diff(x(t),t) - f) r2 = eq[1].match(diff(y(t),t) - g) F = r1[f].subs(x(t),u).subs(y(t),v) G = r2[g].subs(x(t),u).subs(y(t),v) sol2r = dsolve(Eq(diff(v(u),u), G.subs(v,v(u))/F.subs(v,v(u)))) for sol2s in sol2r: sol1 = solve(Integral(1/F.subs(v, sol2s.rhs), u).doit() - t - C2, u) sol = [] for sols in sol1: sol.append(Eq(x(t), sols)) sol.append(Eq(y(t), (sol2s.rhs).subs(u, sols))) return sol def _nonlinear_2eq_order1_type4(x, y, t, eq): r""" Equation: .. math:: x' = f_1(x) g_1(y) \phi(x,y,t) .. math:: y' = f_2(x) g_2(y) \phi(x,y,t) First integral: .. math:: \int \frac{f_2(x)}{f_1(x)} \,dx - \int \frac{g_1(y)}{g_2(y)} \,dy = C where `C` is an arbitrary constant. On solving the first integral for `x` (resp., `y` ) and on substituting the resulting expression into either equation of the original solution, one arrives at a firs-order equation for determining `y` (resp., `x` ). """ C1, C2 = get_numbered_constants(eq, num=2) u, v = symbols('u, v') f = Wild('f') g = Wild('g') f1 = Wild('f1', exclude=[v,t]) f2 = Wild('f2', exclude=[v,t]) g1 = Wild('g1', exclude=[u,t]) g2 = Wild('g2', exclude=[u,t]) r1 = eq[0].match(diff(x(t),t) - f) r2 = eq[1].match(diff(y(t),t) - g) num, den = ( (r1[f].subs(x(t),u).subs(y(t),v))/ (r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom() R1 = num.match(f1*g1) R2 = den.match(f2*g2) phi = (r1[f].subs(x(t),u).subs(y(t),v))/num F1 = R1[f1]; F2 = R2[f2] G1 = R1[g1]; G2 = R2[g2] sol1r = solve(Integral(F2/F1, u).doit() - Integral(G1/G2,v).doit() - C1, u) sol2r = solve(Integral(F2/F1, u).doit() - Integral(G1/G2,v).doit() - C1, v) sol = [] for sols in sol1r: sol.append(Eq(y(t), dsolve(diff(v(t),t) - F2.subs(u,sols).subs(v,v(t))*G2.subs(v,v(t))*phi.subs(u,sols).subs(v,v(t))).rhs)) for sols in sol2r: sol.append(Eq(x(t), dsolve(diff(u(t),t) - F1.subs(u,u(t))*G1.subs(v,sols).subs(u,u(t))*phi.subs(v,sols).subs(u,u(t))).rhs)) return set(sol) def _nonlinear_2eq_order1_type5(func, t, eq): r""" Clairaut system of ODEs .. math:: x = t x' + F(x',y') .. math:: y = t y' + G(x',y') The following are solutions of the system `(i)` straight lines: .. math:: x = C_1 t + F(C_1, C_2), y = C_2 t + G(C_1, C_2) where `C_1` and `C_2` are arbitrary constants; `(ii)` envelopes of the above lines; `(iii)` continuously differentiable lines made up from segments of the lines `(i)` and `(ii)`. """ C1, C2 = get_numbered_constants(eq, num=2) f = Wild('f') g = Wild('g') def check_type(x, y): r1 = eq[0].match(t*diff(x(t),t) - x(t) + f) r2 = eq[1].match(t*diff(y(t),t) - y(t) + g) if not (r1 and r2): r1 = eq[0].match(diff(x(t),t) - x(t)/t + f/t) r2 = eq[1].match(diff(y(t),t) - y(t)/t + g/t) if not (r1 and r2): r1 = (-eq[0]).match(t*diff(x(t),t) - x(t) + f) r2 = (-eq[1]).match(t*diff(y(t),t) - y(t) + g) if not (r1 and r2): r1 = (-eq[0]).match(diff(x(t),t) - x(t)/t + f/t) r2 = (-eq[1]).match(diff(y(t),t) - y(t)/t + g/t) return [r1, r2] for func_ in func: if isinstance(func_, list): x = func[0][0].func y = func[0][1].func [r1, r2] = check_type(x, y) if not (r1 and r2): [r1, r2] = check_type(y, x) x, y = y, x x1 = diff(x(t),t); y1 = diff(y(t),t) return {Eq(x(t), C1*t + r1[f].subs(x1,C1).subs(y1,C2)), Eq(y(t), C2*t + r2[g].subs(x1,C1).subs(y1,C2))} def sysode_nonlinear_3eq_order1(match_): x = match_['func'][0].func y = match_['func'][1].func z = match_['func'][2].func eq = match_['eq'] fc = match_['func_coeff'] func = match_['func'] t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] if match_['type_of_equation'] == 'type1': sol = _nonlinear_3eq_order1_type1(x, y, z, t, eq) if match_['type_of_equation'] == 'type2': sol = _nonlinear_3eq_order1_type2(x, y, z, t, eq) if match_['type_of_equation'] == 'type3': sol = _nonlinear_3eq_order1_type3(x, y, z, t, eq) if match_['type_of_equation'] == 'type4': sol = _nonlinear_3eq_order1_type4(x, y, z, t, eq) if match_['type_of_equation'] == 'type5': sol = _nonlinear_3eq_order1_type5(x, y, z, t, eq) return sol def _nonlinear_3eq_order1_type1(x, y, z, t, eq): r""" Equations: .. math:: a x' = (b - c) y z, \enspace b y' = (c - a) z x, \enspace c z' = (a - b) x y First Integrals: .. math:: a x^{2} + b y^{2} + c z^{2} = C_1 .. math:: a^{2} x^{2} + b^{2} y^{2} + c^{2} z^{2} = C_2 where `C_1` and `C_2` are arbitrary constants. On solving the integrals for `y` and `z` and on substituting the resulting expressions into the first equation of the system, we arrives at a separable first-order equation on `x`. Similarly doing that for other two equations, we will arrive at first order equation on `y` and `z` too. References ========== -http://eqworld.ipmnet.ru/en/solutions/sysode/sode0401.pdf """ C1, C2 = get_numbered_constants(eq, num=2) u, v, w = symbols('u, v, w') p = Wild('p', exclude=[x(t), y(t), z(t), t]) q = Wild('q', exclude=[x(t), y(t), z(t), t]) s = Wild('s', exclude=[x(t), y(t), z(t), t]) r = (diff(x(t),t) - eq[0]).match(p*y(t)*z(t)) r.update((diff(y(t),t) - eq[1]).match(q*z(t)*x(t))) r.update((diff(z(t),t) - eq[2]).match(s*x(t)*y(t))) n1, d1 = r[p].as_numer_denom() n2, d2 = r[q].as_numer_denom() n3, d3 = r[s].as_numer_denom() val = solve([n1*u-d1*v+d1*w, d2*u+n2*v-d2*w, d3*u-d3*v-n3*w],[u,v]) vals = [val[v], val[u]] c = lcm(vals[0].as_numer_denom()[1], vals[1].as_numer_denom()[1]) b = vals[0].subs(w,c) a = vals[1].subs(w,c) y_x = sqrt(((c*C1-C2) - a*(c-a)*x(t)**2)/(b*(c-b))) z_x = sqrt(((b*C1-C2) - a*(b-a)*x(t)**2)/(c*(b-c))) z_y = sqrt(((a*C1-C2) - b*(a-b)*y(t)**2)/(c*(a-c))) x_y = sqrt(((c*C1-C2) - b*(c-b)*y(t)**2)/(a*(c-a))) x_z = sqrt(((b*C1-C2) - c*(b-c)*z(t)**2)/(a*(b-a))) y_z = sqrt(((a*C1-C2) - c*(a-c)*z(t)**2)/(b*(a-b))) try: sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x).rhs except: sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x, hint='separable_Integral') try: sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y).rhs except: sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y, hint='separable_Integral') try: sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z).rhs except: sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z, hint='separable_Integral') return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)] def _nonlinear_3eq_order1_type2(x, y, z, t, eq): r""" Equations: .. math:: a x' = (b - c) y z f(x, y, z, t) .. math:: b y' = (c - a) z x f(x, y, z, t) .. math:: c z' = (a - b) x y f(x, y, z, t) First Integrals: .. math:: a x^{2} + b y^{2} + c z^{2} = C_1 .. math:: a^{2} x^{2} + b^{2} y^{2} + c^{2} z^{2} = C_2 where `C_1` and `C_2` are arbitrary constants. On solving the integrals for `y` and `z` and on substituting the resulting expressions into the first equation of the system, we arrives at a first-order differential equations on `x`. Similarly doing that for other two equations we will arrive at first order equation on `y` and `z`. References ========== -http://eqworld.ipmnet.ru/en/solutions/sysode/sode0402.pdf """ C1, C2 = get_numbered_constants(eq, num=2) u, v, w = symbols('u, v, w') p = Wild('p', exclude=[x(t), y(t), z(t), t]) q = Wild('q', exclude=[x(t), y(t), z(t), t]) s = Wild('s', exclude=[x(t), y(t), z(t), t]) f = Wild('f') r1 = (diff(x(t),t) - eq[0]).match(y(t)*z(t)*f) r = collect_const(r1[f]).match(p*f) r.update(((diff(y(t),t) - eq[1])/r[f]).match(q*z(t)*x(t))) r.update(((diff(z(t),t) - eq[2])/r[f]).match(s*x(t)*y(t))) n1, d1 = r[p].as_numer_denom() n2, d2 = r[q].as_numer_denom() n3, d3 = r[s].as_numer_denom() val = solve([n1*u-d1*v+d1*w, d2*u+n2*v-d2*w, -d3*u+d3*v+n3*w],[u,v]) vals = [val[v], val[u]] c = lcm(vals[0].as_numer_denom()[1], vals[1].as_numer_denom()[1]) a = vals[0].subs(w,c) b = vals[1].subs(w,c) y_x = sqrt(((c*C1-C2) - a*(c-a)*x(t)**2)/(b*(c-b))) z_x = sqrt(((b*C1-C2) - a*(b-a)*x(t)**2)/(c*(b-c))) z_y = sqrt(((a*C1-C2) - b*(a-b)*y(t)**2)/(c*(a-c))) x_y = sqrt(((c*C1-C2) - b*(c-b)*y(t)**2)/(a*(c-a))) x_z = sqrt(((b*C1-C2) - c*(b-c)*z(t)**2)/(a*(b-a))) y_z = sqrt(((a*C1-C2) - c*(a-c)*z(t)**2)/(b*(a-b))) try: sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x*r[f]).rhs except: sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x*r[f], hint='separable_Integral') try: sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y*r[f]).rhs except: sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y*r[f], hint='separable_Integral') try: sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z*r[f]).rhs except: sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z*r[f], hint='separable_Integral') return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)] def _nonlinear_3eq_order1_type3(x, y, z, t, eq): r""" Equations: .. math:: x' = c F_2 - b F_3, \enspace y' = a F_3 - c F_1, \enspace z' = b F_1 - a F_2 where `F_n = F_n(x, y, z, t)`. 1. First Integral: .. math:: a x + b y + c z = C_1, where C is an arbitrary constant. 2. If we assume function `F_n` to be independent of `t`,i.e, `F_n` = `F_n (x, y, z)` Then, on eliminating `t` and `z` from the first two equation of the system, one arrives at the first-order equation .. math:: \frac{dy}{dx} = \frac{a F_3 (x, y, z) - c F_1 (x, y, z)}{c F_2 (x, y, z) - b F_3 (x, y, z)} where `z = \frac{1}{c} (C_1 - a x - b y)` References ========== -http://eqworld.ipmnet.ru/en/solutions/sysode/sode0404.pdf """ C1 = get_numbered_constants(eq, num=1) u, v, w = symbols('u, v, w') p = Wild('p', exclude=[x(t), y(t), z(t), t]) q = Wild('q', exclude=[x(t), y(t), z(t), t]) s = Wild('s', exclude=[x(t), y(t), z(t), t]) F1, F2, F3 = symbols('F1, F2, F3', cls=Wild) r1 = (diff(x(t),t) - eq[0]).match(F2-F3) r = collect_const(r1[F2]).match(s*F2) r.update(collect_const(r1[F3]).match(q*F3)) if eq[1].has(r[F2]) and not eq[1].has(r[F3]): r[F2], r[F3] = r[F3], r[F2] r[s], r[q] = -r[q], -r[s] r.update((diff(y(t),t) - eq[1]).match(p*r[F3] - r[s]*F1)) a = r[p]; b = r[q]; c = r[s] F1 = r[F1].subs(x(t),u).subs(y(t),v).subs(z(t),w) F2 = r[F2].subs(x(t),u).subs(y(t),v).subs(z(t),w) F3 = r[F3].subs(x(t),u).subs(y(t),v).subs(z(t),w) z_xy = (C1-a*u-b*v)/c y_zx = (C1-a*u-c*w)/b x_yz = (C1-b*v-c*w)/a y_x = dsolve(diff(v(u),u) - ((a*F3-c*F1)/(c*F2-b*F3)).subs(w,z_xy).subs(v,v(u))).rhs z_x = dsolve(diff(w(u),u) - ((b*F1-a*F2)/(c*F2-b*F3)).subs(v,y_zx).subs(w,w(u))).rhs z_y = dsolve(diff(w(v),v) - ((b*F1-a*F2)/(a*F3-c*F1)).subs(u,x_yz).subs(w,w(v))).rhs x_y = dsolve(diff(u(v),v) - ((c*F2-b*F3)/(a*F3-c*F1)).subs(w,z_xy).subs(u,u(v))).rhs y_z = dsolve(diff(v(w),w) - ((a*F3-c*F1)/(b*F1-a*F2)).subs(u,x_yz).subs(v,v(w))).rhs x_z = dsolve(diff(u(w),w) - ((c*F2-b*F3)/(b*F1-a*F2)).subs(v,y_zx).subs(u,u(w))).rhs sol1 = dsolve(diff(u(t),t) - (c*F2 - b*F3).subs(v,y_x).subs(w,z_x).subs(u,u(t))).rhs sol2 = dsolve(diff(v(t),t) - (a*F3 - c*F1).subs(u,x_y).subs(w,z_y).subs(v,v(t))).rhs sol3 = dsolve(diff(w(t),t) - (b*F1 - a*F2).subs(u,x_z).subs(v,y_z).subs(w,w(t))).rhs return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)] def _nonlinear_3eq_order1_type4(x, y, z, t, eq): r""" Equations: .. math:: x' = c z F_2 - b y F_3, \enspace y' = a x F_3 - c z F_1, \enspace z' = b y F_1 - a x F_2 where `F_n = F_n (x, y, z, t)` 1. First integral: .. math:: a x^{2} + b y^{2} + c z^{2} = C_1 where `C` is an arbitrary constant. 2. Assuming the function `F_n` is independent of `t`: `F_n = F_n (x, y, z)`. Then on eliminating `t` and `z` from the first two equations of the system, one arrives at the first-order equation .. math:: \frac{dy}{dx} = \frac{a x F_3 (x, y, z) - c z F_1 (x, y, z)} {c z F_2 (x, y, z) - b y F_3 (x, y, z)} where `z = \pm \sqrt{\frac{1}{c} (C_1 - a x^{2} - b y^{2})}` References ========== -http://eqworld.ipmnet.ru/en/solutions/sysode/sode0405.pdf """ C1 = get_numbered_constants(eq, num=1) u, v, w = symbols('u, v, w') p = Wild('p', exclude=[x(t), y(t), z(t), t]) q = Wild('q', exclude=[x(t), y(t), z(t), t]) s = Wild('s', exclude=[x(t), y(t), z(t), t]) F1, F2, F3 = symbols('F1, F2, F3', cls=Wild) r1 = eq[0].match(diff(x(t),t) - z(t)*F2 + y(t)*F3) r = collect_const(r1[F2]).match(s*F2) r.update(collect_const(r1[F3]).match(q*F3)) if eq[1].has(r[F2]) and not eq[1].has(r[F3]): r[F2], r[F3] = r[F3], r[F2] r[s], r[q] = -r[q], -r[s] r.update((diff(y(t),t) - eq[1]).match(p*x(t)*r[F3] - r[s]*z(t)*F1)) a = r[p]; b = r[q]; c = r[s] F1 = r[F1].subs(x(t),u).subs(y(t),v).subs(z(t),w) F2 = r[F2].subs(x(t),u).subs(y(t),v).subs(z(t),w) F3 = r[F3].subs(x(t),u).subs(y(t),v).subs(z(t),w) x_yz = sqrt((C1 - b*v**2 - c*w**2)/a) y_zx = sqrt((C1 - c*w**2 - a*u**2)/b) z_xy = sqrt((C1 - a*u**2 - b*v**2)/c) y_x = dsolve(diff(v(u),u) - ((a*u*F3-c*w*F1)/(c*w*F2-b*v*F3)).subs(w,z_xy).subs(v,v(u))).rhs z_x = dsolve(diff(w(u),u) - ((b*v*F1-a*u*F2)/(c*w*F2-b*v*F3)).subs(v,y_zx).subs(w,w(u))).rhs z_y = dsolve(diff(w(v),v) - ((b*v*F1-a*u*F2)/(a*u*F3-c*w*F1)).subs(u,x_yz).subs(w,w(v))).rhs x_y = dsolve(diff(u(v),v) - ((c*w*F2-b*v*F3)/(a*u*F3-c*w*F1)).subs(w,z_xy).subs(u,u(v))).rhs y_z = dsolve(diff(v(w),w) - ((a*u*F3-c*w*F1)/(b*v*F1-a*u*F2)).subs(u,x_yz).subs(v,v(w))).rhs x_z = dsolve(diff(u(w),w) - ((c*w*F2-b*v*F3)/(b*v*F1-a*u*F2)).subs(v,y_zx).subs(u,u(w))).rhs sol1 = dsolve(diff(u(t),t) - (c*w*F2 - b*v*F3).subs(v,y_x).subs(w,z_x).subs(u,u(t))).rhs sol2 = dsolve(diff(v(t),t) - (a*u*F3 - c*w*F1).subs(u,x_y).subs(w,z_y).subs(v,v(t))).rhs sol3 = dsolve(diff(w(t),t) - (b*v*F1 - a*u*F2).subs(u,x_z).subs(v,y_z).subs(w,w(t))).rhs return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)] def _nonlinear_3eq_order1_type5(x, y, t, eq): r""" .. math:: x' = x (c F_2 - b F_3), \enspace y' = y (a F_3 - c F_1), \enspace z' = z (b F_1 - a F_2) where `F_n = F_n (x, y, z, t)` and are arbitrary functions. First Integral: .. math:: \left|x\right|^{a} \left|y\right|^{b} \left|z\right|^{c} = C_1 where `C` is an arbitrary constant. If the function `F_n` is independent of `t`, then, by eliminating `t` and `z` from the first two equations of the system, one arrives at a first-order equation. References ========== -http://eqworld.ipmnet.ru/en/solutions/sysode/sode0406.pdf """ C1 = get_numbered_constants(eq, num=1) u, v, w = symbols('u, v, w') p = Wild('p', exclude=[x(t), y(t), z(t), t]) q = Wild('q', exclude=[x(t), y(t), z(t), t]) s = Wild('s', exclude=[x(t), y(t), z(t), t]) F1, F2, F3 = symbols('F1, F2, F3', cls=Wild) r1 = eq[0].match(diff(x(t),t) - x(t)*(F2 - F3)) r = collect_const(r1[F2]).match(s*F2) r.update(collect_const(r1[F3]).match(q*F3)) if eq[1].has(r[F2]) and not eq[1].has(r[F3]): r[F2], r[F3] = r[F3], r[F2] r[s], r[q] = -r[q], -r[s] r.update((diff(y(t),t) - eq[1]).match(y(t)*(a*r[F3] - r[c]*F1))) a = r[p]; b = r[q]; c = r[s] F1 = r[F1].subs(x(t),u).subs(y(t),v).subs(z(t),w) F2 = r[F2].subs(x(t),u).subs(y(t),v).subs(z(t),w) F3 = r[F3].subs(x(t),u).subs(y(t),v).subs(z(t),w) x_yz = (C1*v**-b*w**-c)**-a y_zx = (C1*w**-c*u**-a)**-b z_xy = (C1*u**-a*v**-b)**-c y_x = dsolve(diff(v(u),u) - ((v*(a*F3-c*F1))/(u*(c*F2-b*F3))).subs(w,z_xy).subs(v,v(u))).rhs z_x = dsolve(diff(w(u),u) - ((w*(b*F1-a*F2))/(u*(c*F2-b*F3))).subs(v,y_zx).subs(w,w(u))).rhs z_y = dsolve(diff(w(v),v) - ((w*(b*F1-a*F2))/(v*(a*F3-c*F1))).subs(u,x_yz).subs(w,w(v))).rhs x_y = dsolve(diff(u(v),v) - ((u*(c*F2-b*F3))/(v*(a*F3-c*F1))).subs(w,z_xy).subs(u,u(v))).rhs y_z = dsolve(diff(v(w),w) - ((v*(a*F3-c*F1))/(w*(b*F1-a*F2))).subs(u,x_yz).subs(v,v(w))).rhs x_z = dsolve(diff(u(w),w) - ((u*(c*F2-b*F3))/(w*(b*F1-a*F2))).subs(v,y_zx).subs(u,u(w))).rhs sol1 = dsolve(diff(u(t),t) - (u*(c*F2-b*F3)).subs(v,y_x).subs(w,z_x).subs(u,u(t))).rhs sol2 = dsolve(diff(v(t),t) - (v*(a*F3-c*F1)).subs(u,x_y).subs(w,z_y).subs(v,v(t))).rhs sol3 = dsolve(diff(w(t),t) - (w*(b*F1-a*F2)).subs(u,x_z).subs(v,y_z).subs(w,w(t))).rhs return [Eq(x(t), sol1), Eq(y(t), sol2), Eq(z(t), sol3)]
bsd-3-clause
2013Commons/HUE-SHARK
build/env/lib/python2.7/site-packages/Django-1.2.3-py2.7.egg/django/utils/datetime_safe.py
496
2685
# Python's datetime strftime doesn't handle dates before 1900. # These classes override date and datetime to support the formatting of a date # through its full "proleptic Gregorian" date range. # # Based on code submitted to comp.lang.python by Andrew Dalke # # >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A") # '1850/08/02 was a Friday' from datetime import date as real_date, datetime as real_datetime import re import time class date(real_date): def strftime(self, fmt): return strftime(self, fmt) class datetime(real_datetime): def strftime(self, fmt): return strftime(self, fmt) def combine(self, date, time): return datetime(date.year, date.month, date.day, time.hour, time.minute, time.microsecond, time.tzinfo) def date(self): return date(self.year, self.month, self.day) def new_date(d): "Generate a safe date from a datetime.date object." return date(d.year, d.month, d.day) def new_datetime(d): """ Generate a safe datetime from a datetime.date or datetime.datetime object. """ kw = [d.year, d.month, d.day] if isinstance(d, real_datetime): kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo]) return datetime(*kw) # This library does not support strftime's "%s" or "%y" format strings. # Allowed if there's an even number of "%"s because they are escaped. _illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])") def _findall(text, substr): # Also finds overlaps sites = [] i = 0 while 1: j = text.find(substr, i) if j == -1: break sites.append(j) i=j+1 return sites def strftime(dt, fmt): if dt.year >= 1900: return super(type(dt), dt).strftime(fmt) illegal_formatting = _illegal_formatting.search(fmt) if illegal_formatting: raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0)) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6 * (delta // 100 + delta // 400) year = year + off # Move to around the year 2000 year = year + ((2000 - year) // 28) * 28 timetuple = dt.timetuple() s1 = time.strftime(fmt, (year,) + timetuple[1:]) sites1 = _findall(s1, str(year)) s2 = time.strftime(fmt, (year+28,) + timetuple[1:]) sites2 = _findall(s2, str(year+28)) sites = [] for site in sites1: if site in sites2: sites.append(site) s = s1 syear = "%04d" % (dt.year,) for site in sites: s = s[:site] + syear + s[site+4:] return s
apache-2.0
jackxiang/google-app-engine-django
appengine_django/management/commands/rollback.py
60
1645
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import logging from django.core.management.base import BaseCommand def run_appcfg(): # import this so that we run through the checks at the beginning # and report the appropriate errors import appcfg # We don't really want to use that one though, it just executes this one from google.appengine.tools import appcfg # Reset the logging level to WARN as appcfg will spew tons of logs on INFO logging.getLogger().setLevel(logging.WARN) # Note: if we decide to change the name of this command to something other # than 'rollback' we will have to munge the args to replace whatever # we called it with 'rollback' new_args = sys.argv[:] new_args.append('.') appcfg.main(new_args) class Command(BaseCommand): """Calls the appcfg.py's rollback command for the current project. Any additional arguments are passed directly to appcfg.py. """ help = 'Calls appcfg.py rollback for the current project.' args = '[any appcfg.py options]' def run_from_argv(self, argv): run_appcfg()
apache-2.0
sebrandon1/nova
nova/tests/unit/api/openstack/compute/test_extended_server_attributes.py
3
10156
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_serialization import jsonutils from nova.api.openstack import wsgi as os_wsgi from nova import compute from nova import exception from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes NAME_FMT = cfg.CONF.instance_name_template UUID1 = '00000000-0000-0000-0000-000000000001' UUID2 = '00000000-0000-0000-0000-000000000002' UUID3 = '00000000-0000-0000-0000-000000000003' UUID4 = '00000000-0000-0000-0000-000000000004' UUID5 = '00000000-0000-0000-0000-000000000005' def fake_services(host): service_list = [objects.Service(id=0, host=host, forced_down=True, binary='nova-compute')] return objects.ServiceList(objects=service_list) def fake_compute_get(*args, **kwargs): return fakes.stub_instance_obj( None, 1, uuid=UUID3, host="host-fake", node="node-fake", reservation_id="r-1", launch_index=0, kernel_id=UUID4, ramdisk_id=UUID5, display_name="hostname-1", root_device_name="/dev/vda", user_data="userdata", services=fake_services("host-fake")) def fake_compute_get_all(*args, **kwargs): inst_list = [ fakes.stub_instance_obj( None, 1, uuid=UUID1, host="host-1", node="node-1", reservation_id="r-1", launch_index=0, kernel_id=UUID4, ramdisk_id=UUID5, display_name="hostname-1", root_device_name="/dev/vda", user_data="userdata", services=fake_services("host-1")), fakes.stub_instance_obj( None, 2, uuid=UUID2, host="host-2", node="node-2", reservation_id="r-2", launch_index=1, kernel_id=UUID4, ramdisk_id=UUID5, display_name="hostname-2", root_device_name="/dev/vda", user_data="userdata", services=fake_services("host-2")), ] return objects.InstanceList(objects=inst_list) class ExtendedServerAttributesTestV21(test.TestCase): content_type = 'application/json' prefix = 'OS-EXT-SRV-ATTR:' fake_url = '/v2/fake' wsgi_api_version = os_wsgi.DEFAULT_API_VERSION def setUp(self): super(ExtendedServerAttributesTestV21, self).setUp() fakes.stub_out_nw_api(self) self.stubs.Set(compute.api.API, 'get', fake_compute_get) self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all) self.stub_out('nova.db.instance_get_by_uuid', fake_compute_get) def _make_request(self, url): req = fakes.HTTPRequest.blank(url) req.headers['Accept'] = self.content_type req.headers = {os_wsgi.API_VERSION_REQUEST_HEADER: 'compute %s' % self.wsgi_api_version} res = req.get_response( fakes.wsgi_app_v21(init_only=('servers', 'os-extended-server-attributes'))) return res def _get_server(self, body): return jsonutils.loads(body).get('server') def _get_servers(self, body): return jsonutils.loads(body).get('servers') def assertServerAttributes(self, server, host, node, instance_name): self.assertEqual(server.get('%shost' % self.prefix), host) self.assertEqual(server.get('%sinstance_name' % self.prefix), instance_name) self.assertEqual(server.get('%shypervisor_hostname' % self.prefix), node) def test_show(self): url = self.fake_url + '/servers/%s' % UUID3 res = self._make_request(url) self.assertEqual(res.status_int, 200) self.assertServerAttributes(self._get_server(res.body), host='host-fake', node='node-fake', instance_name=NAME_FMT % 1) def test_detail(self): url = self.fake_url + '/servers/detail' res = self._make_request(url) self.assertEqual(res.status_int, 200) for i, server in enumerate(self._get_servers(res.body)): self.assertServerAttributes(server, host='host-%s' % (i + 1), node='node-%s' % (i + 1), instance_name=NAME_FMT % (i + 1)) @mock.patch.object(compute.api.API, 'get_all') def test_detail_empty_instance_list_invalid_status(self, mock_get_all_method): mock_get_all_method.return_value = objects.InstanceList(objects=[]) url = "%s%s" % (self.fake_url, '/servers/detail?status=invalid_status') res = self._make_request(url) # check status code 200 with empty instance list self.assertEqual(200, res.status_int) self.assertEqual(0, len(self._get_servers(res.body))) def test_no_instance_passthrough_404(self): def fake_compute_get(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake') self.stubs.Set(compute.api.API, 'get', fake_compute_get) url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115' res = self._make_request(url) self.assertEqual(res.status_int, 404) class ExtendedServerAttributesTestV23(ExtendedServerAttributesTestV21): wsgi_api_version = '2.3' def assertServerAttributes(self, server, host, node, instance_name, reservation_id, launch_index, kernel_id, ramdisk_id, hostname, root_device_name, user_data): super(ExtendedServerAttributesTestV23, self).assertServerAttributes( server, host, node, instance_name) self.assertEqual(server.get('%sreservation_id' % self.prefix), reservation_id) self.assertEqual(server.get('%slaunch_index' % self.prefix), launch_index) self.assertEqual(server.get('%skernel_id' % self.prefix), kernel_id) self.assertEqual(server.get('%sramdisk_id' % self.prefix), ramdisk_id) self.assertEqual(server.get('%shostname' % self.prefix), hostname) self.assertEqual(server.get('%sroot_device_name' % self.prefix), root_device_name) self.assertEqual(server.get('%suser_data' % self.prefix), user_data) def test_show(self): url = self.fake_url + '/servers/%s' % UUID3 res = self._make_request(url) self.assertEqual(res.status_int, 200) self.assertServerAttributes(self._get_server(res.body), host='host-fake', node='node-fake', instance_name=NAME_FMT % 1, reservation_id="r-1", launch_index=0, kernel_id=UUID4, ramdisk_id=UUID5, hostname="hostname-1", root_device_name="/dev/vda", user_data="userdata") def test_detail(self): url = self.fake_url + '/servers/detail' res = self._make_request(url) self.assertEqual(res.status_int, 200) for i, server in enumerate(self._get_servers(res.body)): self.assertServerAttributes(server, host='host-%s' % (i + 1), node='node-%s' % (i + 1), instance_name=NAME_FMT % (i + 1), reservation_id="r-%s" % (i + 1), launch_index=i, kernel_id=UUID4, ramdisk_id=UUID5, hostname="hostname-%s" % (i + 1), root_device_name="/dev/vda", user_data="userdata") class ExtendedServerAttributesTestV216(ExtendedServerAttributesTestV21): wsgi_api_version = '2.16' def assertServerAttributes(self, server, host, node, instance_name, host_status): super(ExtendedServerAttributesTestV216, self).assertServerAttributes( server, host, node, instance_name) self.assertEqual(server.get('host_status'), host_status) def test_show(self): url = self.fake_url + '/servers/%s' % UUID3 res = self._make_request(url) self.assertEqual(res.status_int, 200) self.assertServerAttributes(self._get_server(res.body), host='host-fake', node='node-fake', instance_name=NAME_FMT % 1, host_status="DOWN") def test_detail(self): url = self.fake_url + '/servers/detail' res = self._make_request(url) self.assertEqual(res.status_int, 200) for i, server in enumerate(self._get_servers(res.body)): self.assertServerAttributes(server, host='host-%s' % (i + 1), node='node-%s' % (i + 1), instance_name=NAME_FMT % (i + 1), host_status="DOWN")
apache-2.0
bsmr-docker/compose
tests/integration/resilience_test.py
29
1545
from __future__ import unicode_literals from __future__ import absolute_import import mock from compose.project import Project from .testcases import DockerClientTestCase class ResilienceTest(DockerClientTestCase): def setUp(self): self.db = self.create_service('db', volumes=['/var/db'], command='top') self.project = Project('composetest', [self.db], self.client) container = self.db.create_container() self.db.start_container(container) self.host_path = container.get('Volumes')['/var/db'] def test_successful_recreate(self): self.project.up(force_recreate=True) container = self.db.containers()[0] self.assertEqual(container.get('Volumes')['/var/db'], self.host_path) def test_create_failure(self): with mock.patch('compose.service.Service.create_container', crash): with self.assertRaises(Crash): self.project.up(force_recreate=True) self.project.up() container = self.db.containers()[0] self.assertEqual(container.get('Volumes')['/var/db'], self.host_path) def test_start_failure(self): with mock.patch('compose.service.Service.start_container', crash): with self.assertRaises(Crash): self.project.up(force_recreate=True) self.project.up() container = self.db.containers()[0] self.assertEqual(container.get('Volumes')['/var/db'], self.host_path) class Crash(Exception): pass def crash(*args, **kwargs): raise Crash()
apache-2.0
tonyli71/nuage-openstack-neutron
nuage_neutron/plugins/nuage/nuagedb.py
1
11936
# Copyright 2014 Alcatel-Lucent USA Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants as os_constants from neutron.db import common_db_mixin from neutron.db import external_net_db from neutron.db import extraroute_db from neutron.db import l3_db from neutron.db import models_v2 from neutron.db import securitygroups_db from neutron.plugins.nuage import nuage_models def add_net_partition(session, netpart_id, l3dom_id, l2dom_id, ent_name, l3isolated, l3shared): net_partitioninst = nuage_models.NetPartition(id=netpart_id, name=ent_name, l3dom_tmplt_id=l3dom_id, l2dom_tmplt_id=l2dom_id, isolated_zone=l3isolated, shared_zone=l3shared) session.add(net_partitioninst) return net_partitioninst def delete_net_partition(session, net_partition): session.delete(net_partition) def delete_net_partition_by_id(session, netpart_id): query = session.query(nuage_models.NetPartition) query.filter_by(id=netpart_id).delete() def get_net_partition_by_name(session, name): query = session.query(nuage_models.NetPartition) return query.filter_by(name=name).first() def get_net_partition_by_id(session, id): query = session.query(nuage_models.NetPartition) return query.filter_by(id=id).first() def get_net_partitions(session, filters=None, fields=None): query = session.query(nuage_models.NetPartition) common_db = common_db_mixin.CommonDbMixin() query = common_db._apply_filters_to_query(query, nuage_models.NetPartition, filters) return query def get_net_partition_ids(session): query = session.query(nuage_models.NetPartition.id) return [netpart[0] for netpart in query] def get_net_partition_with_lock(session, netpart_id): query = session.query(nuage_models.NetPartition) netpart_db = query.filter_by(id=netpart_id).with_lockmode('update').one() return netpart_db def get_subnet_with_lock(session, sub_id): query = session.query(models_v2.Subnet) subnet_db = query.filter_by(id=sub_id).with_lockmode('update').one() return subnet_db def get_router_with_lock(session, router_id): query = session.query(l3_db.Router) router_db = query.filter_by(id=router_id).with_lockmode('update').one() return router_db def get_secgrp_with_lock(session, secgrp_id): query = session.query(securitygroups_db.SecurityGroup) secgrp_db = query.filter_by(id=secgrp_id).with_lockmode('update').one() return secgrp_db def get_secgrprule_ids(session): query = session.query(securitygroups_db.SecurityGroupRule.id) return [secgrprule[0] for secgrprule in query] def get_secgrprule_with_lock(session, secgrprule_id): query = session.query(securitygroups_db.SecurityGroupRule) secgrprule_db = (query.filter_by(id=secgrprule_id).with_lockmode( 'update').one()) return secgrprule_db def get_port_with_lock(session, port_id): query = session.query(models_v2.Port) port_db = query.filter_by(id=port_id).with_lockmode('update').one() return port_db def get_dhcp_port_with_lock(session, net_id): query = session.query(models_v2.Port) port_db = query.filter_by(network_id=net_id).filter_by( device_owner=os_constants.DEVICE_OWNER_DHCP).with_lockmode( 'update').first() return port_db def get_fip_with_lock(session, fip_id): query = session.query(l3_db.FloatingIP) fip_db = query.filter_by(id=fip_id).with_lockmode('update').one() return fip_db def add_entrouter_mapping(session, np_id, router_id, n_l3id, rt, rd): ent_rtr_mapping = nuage_models.NetPartitionRouter(net_partition_id=np_id, router_id=router_id, nuage_router_id=n_l3id, nuage_rtr_rt=rt, nuage_rtr_rd=rd) session.add(ent_rtr_mapping) def update_entrouter_mapping(ent_rtr_mapping, new_dict): ent_rtr_mapping.update(new_dict) def add_subnetl2dom_mapping(session, neutron_subnet_id, nuage_sub_id, np_id, l2dom_id=None, nuage_user_id=None, nuage_group_id=None, managed=False): subnet_l2dom = nuage_models.SubnetL2Domain(subnet_id=neutron_subnet_id, nuage_subnet_id=nuage_sub_id, net_partition_id=np_id, nuage_l2dom_tmplt_id=l2dom_id, nuage_user_id=nuage_user_id, nuage_group_id=nuage_group_id, nuage_managed_subnet=managed) session.add(subnet_l2dom) return subnet_l2dom def get_update_netpartition(session, new_dict): netpart = get_net_partition_with_lock(session, new_dict['id']) netpart.update(new_dict) def update_subnetl2dom_mapping(subnet_l2dom, new_dict): subnet_l2dom.update(new_dict) def get_update_subnetl2dom_mapping(session, new_dict): subnet_l2dom = get_subnet_l2dom_with_lock(session, new_dict['subnet_id']) subnet_l2dom.update(new_dict) def update_entrtr_mapping(ent_rtr, new_dict): ent_rtr.update(new_dict) def get_update_entrtr_mapping(session, new_dict): ent_rtr = get_ent_rtr_mapping_with_lock(session, new_dict['router_id']) ent_rtr.update(new_dict) def delete_subnetl2dom_mapping(session, subnet_l2dom): session.delete(subnet_l2dom) def get_subnet_l2dom_by_id(session, id): query = session.query(nuage_models.SubnetL2Domain) return query.filter_by(subnet_id=id).first() def get_subnet_l2dom_by_nuage_id(session, id): query = session.query(nuage_models.SubnetL2Domain) return query.filter_by(nuage_subnet_id=str(id)).first() def get_subnet_l2dom_with_lock(session, id): query = session.query(nuage_models.SubnetL2Domain) subl2dom = query.filter_by(subnet_id=id).with_lockmode('update').one() return subl2dom def get_ent_rtr_mapping_by_entid(session, entid): query = session.query(nuage_models.NetPartitionRouter) return query.filter_by(net_partition_id=entid).all() def get_ent_rtr_mapping_by_rtrid(session, rtrid): query = session.query(nuage_models.NetPartitionRouter) return query.filter_by(router_id=rtrid).first() def add_network_binding(session, network_id, network_type, physical_network, vlan_id): binding = nuage_models.ProviderNetBinding( network_id=network_id, network_type=network_type, physical_network=physical_network, vlan_id=vlan_id) session.add(binding) return binding def get_network_binding(session, network_id): return (session.query(nuage_models.ProviderNetBinding). filter_by(network_id=network_id). first()) def get_network_binding_with_lock(session, network_id): return (session.query(nuage_models.ProviderNetBinding). filter_by(network_id=network_id).with_lockmode('update').first()) def get_ent_rtr_mapping_with_lock(session, rtrid): query = session.query(nuage_models.NetPartitionRouter) entrtr = query.filter_by(router_id=rtrid).with_lockmode('update').one() return entrtr def get_ipalloc_for_fip(session, network_id, ip, lock=False): query = session.query(models_v2.IPAllocation) if lock: # Lock is required when the resource is synced ipalloc_db = (query.filter_by(network_id=network_id).filter_by( ip_address=ip).with_lockmode('update').one()) else: ipalloc_db = (query.filter_by(network_id=network_id).filter_by( ip_address=ip).one()) return make_ipalloc_dict(ipalloc_db) def get_all_net_partitions(session): net_partitions = get_net_partitions(session) return make_net_partition_list(net_partitions) def get_all_routes(session): routes = session.query(extraroute_db.RouterRoute) return make_route_list(routes) def get_ext_network_ids(session): query = session.query(external_net_db.ExternalNetwork.network_id) return [net[0] for net in query] def get_route_with_lock(session, dest, nhop): query = session.query(extraroute_db.RouterRoute) route_db = (query.filter_by(destination=dest).filter_by(nexthop=nhop) .with_lockmode('update').one()) return make_route_dict(route_db) def get_all_provider_nets(session): provider_nets = session.query(nuage_models.ProviderNetBinding) return make_provider_net_list(provider_nets) def make_provider_net_list(provider_nets): return [make_provider_net_dict(pnet) for pnet in provider_nets] def make_provider_net_dict(provider_net): return {'network_id': provider_net['network_id'], 'network_type': provider_net['network_type'], 'physical_network': provider_net['physical_network'], 'vlan_id': provider_net['vlan_id']} def make_ipalloc_dict(subnet_db): return {'port_id': subnet_db['port_id'], 'subnet_id': subnet_db['subnet_id'], 'network_id': subnet_db['network_id'], 'ip_address': subnet_db['ip_address']} def make_net_partition_dict(net_partition): return {'id': net_partition['id'], 'name': net_partition['name'], 'l3dom_tmplt_id': net_partition['l3dom_tmplt_id'], 'l2dom_tmplt_id': net_partition['l2dom_tmplt_id']} def make_net_partition_list(net_partitions): return [make_net_partition_dict(net_partition) for net_partition in net_partitions] def make_route_dict(route): return {'destination': route['destination'], 'nexthop': route['nexthop'], 'router_id': route['router_id']} def make_route_list(routes): return [make_route_dict(route) for route in routes] def make_subnl2dom_dict(subl2dom): return {'subnet_id': subl2dom['subnet_id'], 'net_partition_id': subl2dom['net_partition_id'], 'nuage_subnet_id': subl2dom['nuage_subnet_id'], 'nuage_l2dom_tmplt_id': subl2dom['nuage_l2dom_tmplt_id'], 'nuage_user_id': subl2dom['nuage_user_id'], 'nuage_group_id': subl2dom['nuage_group_id']} def make_entrtr_dict(entrtr): return {'net_partition_id': entrtr['net_partition_id'], 'router_id': entrtr['router_id'], 'nuage_router_id': entrtr['nuage_router_id']} def make_provider_net_dict(provider_net): return {'network_id': provider_net['network_id'], 'network_type': provider_net['network_type'], 'physical_network': provider_net['physical_network'], 'vlan_id': provider_net['vlan_id']}
apache-2.0
chriscoyfish/coala-bears
bears/c_languages/ClangComplexityBear.py
4
4936
from clang.cindex import Index, CursorKind from coalib.bears.LocalBear import LocalBear from coalib.results.Result import Result from coalib.results.SourceRange import SourceRange from coalib.bearlib import deprecate_settings from bears.c_languages.ClangBear import clang_available, ClangBear class ClangComplexityBear(LocalBear): """ Calculates cyclomatic complexity of each function and displays it to the user. """ LANGUAGES = ClangBear.LANGUAGES REQUIREMENTS = ClangBear.REQUIREMENTS AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'coala-devel@googlegroups.com'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Complexity'} check_prerequisites = classmethod(clang_available) _decisive_cursor_kinds = { CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT, CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT} def function_key_points(self, cursor, top_function_level=False): """ Calculates number of function's decision points and exit points. :param top_function_level: Whether cursor is in the top level of the function. """ decisions, exits = 0, 0 for child in cursor.get_children(): if child.kind in self._decisive_cursor_kinds: decisions += 1 elif child.kind == CursorKind.RETURN_STMT: exits += 1 if top_function_level: # There is no point to move forward, so just return. return decisions, exits child_decisions, child_exits = self.function_key_points(child) decisions += child_decisions exits += child_exits if top_function_level: # Implicit return statement. exits += 1 return decisions, exits def complexities(self, cursor, filename): """ Calculates cyclomatic complexities of functions. """ file = cursor.location.file if file is not None and file.name != filename: # There is nothing to do in another file. return if cursor.kind == CursorKind.FUNCTION_DECL: child = next((child for child in cursor.get_children() if child.kind != CursorKind.PARM_DECL), None) if child: decisions, exits = self.function_key_points(child, True) complexity = max(1, decisions - exits + 2) yield cursor, complexity else: for child in cursor.get_children(): yield from self.complexities(child, filename) @deprecate_settings(cyclomatic_complexity='max_complexity') def run(self, filename, file, cyclomatic_complexity: int=8): """ Check for all functions if they are too complicated using the cyclomatic complexity metric. You can read more about this metric at <https://www.wikiwand.com/en/Cyclomatic_complexity>. :param cyclomatic_complexity: Maximum cyclomatic complexity that is considered to be normal. The value of 10 had received substantial corroborating evidence. But the general recommendation: "For each module, either limit cyclomatic complexity to [the agreed-upon limit] or provide a written explanation of why the limit was exceeded." """ root = Index.create().parse(filename).cursor for cursor, complexity in self.complexities(root, filename): if complexity > cyclomatic_complexity: affected_code = (SourceRange.from_clang_range(cursor.extent),) yield Result( self, "The function '{function}' should be simplified. Its " "cyclomatic complexity is {complexity} which exceeds " "maximal recommended value " "of {rec_value}.".format( function=cursor.displayname, complexity=complexity, rec_value=cyclomatic_complexity), affected_code=affected_code, additional_info=( "The cyclomatic complexity is a metric that measures " "how complicated a function is by counting branches " "and exits of each function.\n\n" "Your function seems to be complicated and should be " "refactored so that it can be understood by other " "people easily.\n\nSee " "<http://www.wikiwand.com/en/Cyclomatic_complexity>" " for more information."))
agpl-3.0
Voluntarynet/BitmessageKit
BitmessageKit/Vendor/static-python/Lib/test/test_compile.py
45
18749
import unittest import sys import _ast from test import test_support import textwrap class TestSpecifics(unittest.TestCase): def test_no_ending_newline(self): compile("hi", "<test>", "exec") compile("hi\r", "<test>", "exec") def test_empty(self): compile("", "<test>", "exec") def test_other_newlines(self): compile("\r\n", "<test>", "exec") compile("\r", "<test>", "exec") compile("hi\r\nstuff\r\ndef f():\n pass\r", "<test>", "exec") compile("this_is\rreally_old_mac\rdef f():\n pass", "<test>", "exec") def test_debug_assignment(self): # catch assignments to __debug__ self.assertRaises(SyntaxError, compile, '__debug__ = 1', '?', 'single') import __builtin__ prev = __builtin__.__debug__ setattr(__builtin__, '__debug__', 'sure') setattr(__builtin__, '__debug__', prev) def test_argument_handling(self): # detect duplicate positional and keyword arguments self.assertRaises(SyntaxError, eval, 'lambda a,a:0') self.assertRaises(SyntaxError, eval, 'lambda a,a=1:0') self.assertRaises(SyntaxError, eval, 'lambda a=1,a=1:0') try: exec 'def f(a, a): pass' self.fail("duplicate arguments") except SyntaxError: pass try: exec 'def f(a = 0, a = 1): pass' self.fail("duplicate keyword arguments") except SyntaxError: pass try: exec 'def f(a): global a; a = 1' self.fail("variable is global and local") except SyntaxError: pass def test_syntax_error(self): self.assertRaises(SyntaxError, compile, "1+*3", "filename", "exec") def test_none_keyword_arg(self): self.assertRaises(SyntaxError, compile, "f(None=1)", "<string>", "exec") def test_duplicate_global_local(self): try: exec 'def f(a): global a; a = 1' self.fail("variable is global and local") except SyntaxError: pass def test_exec_functional_style(self): # Exec'ing a tuple of length 2 works. g = {'b': 2} exec("a = b + 1", g) self.assertEqual(g['a'], 3) # As does exec'ing a tuple of length 3. l = {'b': 3} g = {'b': 5, 'c': 7} exec("a = b + c", g, l) self.assertNotIn('a', g) self.assertEqual(l['a'], 10) # Tuples not of length 2 or 3 are invalid. with self.assertRaises(TypeError): exec("a = b + 1",) with self.assertRaises(TypeError): exec("a = b + 1", {}, {}, {}) # Can't mix and match the two calling forms. g = {'a': 3, 'b': 4} l = {} with self.assertRaises(TypeError): exec("a = b + 1", g) in g with self.assertRaises(TypeError): exec("a = b + 1", g, l) in g, l def test_exec_with_general_mapping_for_locals(self): class M: "Test mapping interface versus possible calls from eval()." def __getitem__(self, key): if key == 'a': return 12 raise KeyError def __setitem__(self, key, value): self.results = (key, value) def keys(self): return list('xyz') m = M() g = globals() exec 'z = a' in g, m self.assertEqual(m.results, ('z', 12)) try: exec 'z = b' in g, m except NameError: pass else: self.fail('Did not detect a KeyError') exec 'z = dir()' in g, m self.assertEqual(m.results, ('z', list('xyz'))) exec 'z = globals()' in g, m self.assertEqual(m.results, ('z', g)) exec 'z = locals()' in g, m self.assertEqual(m.results, ('z', m)) try: exec 'z = b' in m except TypeError: pass else: self.fail('Did not validate globals as a real dict') class A: "Non-mapping" pass m = A() try: exec 'z = a' in g, m except TypeError: pass else: self.fail('Did not validate locals as a mapping') # Verify that dict subclasses work as well class D(dict): def __getitem__(self, key): if key == 'a': return 12 return dict.__getitem__(self, key) d = D() exec 'z = a' in g, d self.assertEqual(d['z'], 12) def test_extended_arg(self): longexpr = 'x = x or ' + '-x' * 2500 code = ''' def f(x): %s %s %s %s %s %s %s %s %s %s # the expressions above have no effect, x == argument while x: x -= 1 # EXTENDED_ARG/JUMP_ABSOLUTE here return x ''' % ((longexpr,)*10) exec code self.assertEqual(f(5), 0) def test_complex_args(self): with test_support.check_py3k_warnings( ("tuple parameter unpacking has been removed", SyntaxWarning)): exec textwrap.dedent(''' def comp_args((a, b)): return a,b self.assertEqual(comp_args((1, 2)), (1, 2)) def comp_args((a, b)=(3, 4)): return a, b self.assertEqual(comp_args((1, 2)), (1, 2)) self.assertEqual(comp_args(), (3, 4)) def comp_args(a, (b, c)): return a, b, c self.assertEqual(comp_args(1, (2, 3)), (1, 2, 3)) def comp_args(a=2, (b, c)=(3, 4)): return a, b, c self.assertEqual(comp_args(1, (2, 3)), (1, 2, 3)) self.assertEqual(comp_args(), (2, 3, 4)) ''') def test_argument_order(self): try: exec 'def f(a=1, (b, c)): pass' self.fail("non-default args after default") except SyntaxError: pass def test_float_literals(self): # testing bad float literals self.assertRaises(SyntaxError, eval, "2e") self.assertRaises(SyntaxError, eval, "2.0e+") self.assertRaises(SyntaxError, eval, "1e-") self.assertRaises(SyntaxError, eval, "3-4e/21") def test_indentation(self): # testing compile() of indented block w/o trailing newline" s = """ if 1: if 2: pass""" compile(s, "<string>", "exec") # This test is probably specific to CPython and may not generalize # to other implementations. We are trying to ensure that when # the first line of code starts after 256, correct line numbers # in tracebacks are still produced. def test_leading_newlines(self): s256 = "".join(["\n"] * 256 + ["spam"]) co = compile(s256, 'fn', 'exec') self.assertEqual(co.co_firstlineno, 257) self.assertEqual(co.co_lnotab, '') def test_literals_with_leading_zeroes(self): for arg in ["077787", "0xj", "0x.", "0e", "090000000000000", "080000000000000", "000000000000009", "000000000000008", "0b42", "0BADCAFE", "0o123456789", "0b1.1", "0o4.2", "0b101j2", "0o153j2", "0b100e1", "0o777e1", "0o8", "0o78"]: self.assertRaises(SyntaxError, eval, arg) self.assertEqual(eval("0777"), 511) self.assertEqual(eval("0777L"), 511) self.assertEqual(eval("000777"), 511) self.assertEqual(eval("0xff"), 255) self.assertEqual(eval("0xffL"), 255) self.assertEqual(eval("0XfF"), 255) self.assertEqual(eval("0777."), 777) self.assertEqual(eval("0777.0"), 777) self.assertEqual(eval("000000000000000000000000000000000000000000000000000777e0"), 777) self.assertEqual(eval("0777e1"), 7770) self.assertEqual(eval("0e0"), 0) self.assertEqual(eval("0000E-012"), 0) self.assertEqual(eval("09.5"), 9.5) self.assertEqual(eval("0777j"), 777j) self.assertEqual(eval("00j"), 0j) self.assertEqual(eval("00.0"), 0) self.assertEqual(eval("0e3"), 0) self.assertEqual(eval("090000000000000."), 90000000000000.) self.assertEqual(eval("090000000000000.0000000000000000000000"), 90000000000000.) self.assertEqual(eval("090000000000000e0"), 90000000000000.) self.assertEqual(eval("090000000000000e-0"), 90000000000000.) self.assertEqual(eval("090000000000000j"), 90000000000000j) self.assertEqual(eval("000000000000007"), 7) self.assertEqual(eval("000000000000008."), 8.) self.assertEqual(eval("000000000000009."), 9.) self.assertEqual(eval("0b101010"), 42) self.assertEqual(eval("-0b000000000010"), -2) self.assertEqual(eval("0o777"), 511) self.assertEqual(eval("-0o0000010"), -8) self.assertEqual(eval("020000000000.0"), 20000000000.0) self.assertEqual(eval("037777777777e0"), 37777777777.0) self.assertEqual(eval("01000000000000000000000.0"), 1000000000000000000000.0) def test_unary_minus(self): # Verify treatment of unary minus on negative numbers SF bug #660455 if sys.maxint == 2147483647: # 32-bit machine all_one_bits = '0xffffffff' self.assertEqual(eval(all_one_bits), 4294967295L) self.assertEqual(eval("-" + all_one_bits), -4294967295L) elif sys.maxint == 9223372036854775807: # 64-bit machine all_one_bits = '0xffffffffffffffff' self.assertEqual(eval(all_one_bits), 18446744073709551615L) self.assertEqual(eval("-" + all_one_bits), -18446744073709551615L) else: self.fail("How many bits *does* this machine have???") # Verify treatment of constant folding on -(sys.maxint+1) # i.e. -2147483648 on 32 bit platforms. Should return int, not long. self.assertIsInstance(eval("%s" % (-sys.maxint - 1)), int) self.assertIsInstance(eval("%s" % (-sys.maxint - 2)), long) if sys.maxint == 9223372036854775807: def test_32_63_bit_values(self): a = +4294967296 # 1 << 32 b = -4294967296 # 1 << 32 c = +281474976710656 # 1 << 48 d = -281474976710656 # 1 << 48 e = +4611686018427387904 # 1 << 62 f = -4611686018427387904 # 1 << 62 g = +9223372036854775807 # 1 << 63 - 1 h = -9223372036854775807 # 1 << 63 - 1 for variable in self.test_32_63_bit_values.func_code.co_consts: if variable is not None: self.assertIsInstance(variable, int) def test_sequence_unpacking_error(self): # Verify sequence packing/unpacking with "or". SF bug #757818 i,j = (1, -1) or (-1, 1) self.assertEqual(i, 1) self.assertEqual(j, -1) def test_none_assignment(self): stmts = [ 'None = 0', 'None += 0', '__builtins__.None = 0', 'def None(): pass', 'class None: pass', '(a, None) = 0, 0', 'for None in range(10): pass', 'def f(None): pass', 'import None', 'import x as None', 'from x import None', 'from x import y as None' ] for stmt in stmts: stmt += "\n" self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'single') self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec') # This is ok. compile("from None import x", "tmp", "exec") compile("from x import None as y", "tmp", "exec") compile("import None as x", "tmp", "exec") def test_import(self): succeed = [ 'import sys', 'import os, sys', 'import os as bar', 'import os.path as bar', 'from __future__ import nested_scopes, generators', 'from __future__ import (nested_scopes,\ngenerators)', 'from __future__ import (nested_scopes,\ngenerators,)', 'from sys import stdin, stderr, stdout', 'from sys import (stdin, stderr,\nstdout)', 'from sys import (stdin, stderr,\nstdout,)', 'from sys import (stdin\n, stderr, stdout)', 'from sys import (stdin\n, stderr, stdout,)', 'from sys import stdin as si, stdout as so, stderr as se', 'from sys import (stdin as si, stdout as so, stderr as se)', 'from sys import (stdin as si, stdout as so, stderr as se,)', ] fail = [ 'import (os, sys)', 'import (os), (sys)', 'import ((os), (sys))', 'import (sys', 'import sys)', 'import (os,)', 'import os As bar', 'import os.path a bar', 'from sys import stdin As stdout', 'from sys import stdin a stdout', 'from (sys) import stdin', 'from __future__ import (nested_scopes', 'from __future__ import nested_scopes)', 'from __future__ import nested_scopes,\ngenerators', 'from sys import (stdin', 'from sys import stdin)', 'from sys import stdin, stdout,\nstderr', 'from sys import stdin si', 'from sys import stdin,' 'from sys import (*)', 'from sys import (stdin,, stdout, stderr)', 'from sys import (stdin, stdout),', ] for stmt in succeed: compile(stmt, 'tmp', 'exec') for stmt in fail: self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec') def test_for_distinct_code_objects(self): # SF bug 1048870 def f(): f1 = lambda x=1: x f2 = lambda x=2: x return f1, f2 f1, f2 = f() self.assertNotEqual(id(f1.func_code), id(f2.func_code)) def test_lambda_doc(self): l = lambda: "foo" self.assertIsNone(l.__doc__) def test_unicode_encoding(self): code = u"# -*- coding: utf-8 -*-\npass\n" self.assertRaises(SyntaxError, compile, code, "tmp", "exec") def test_subscripts(self): # SF bug 1448804 # Class to make testing subscript results easy class str_map(object): def __init__(self): self.data = {} def __getitem__(self, key): return self.data[str(key)] def __setitem__(self, key, value): self.data[str(key)] = value def __delitem__(self, key): del self.data[str(key)] def __contains__(self, key): return str(key) in self.data d = str_map() # Index d[1] = 1 self.assertEqual(d[1], 1) d[1] += 1 self.assertEqual(d[1], 2) del d[1] self.assertNotIn(1, d) # Tuple of indices d[1, 1] = 1 self.assertEqual(d[1, 1], 1) d[1, 1] += 1 self.assertEqual(d[1, 1], 2) del d[1, 1] self.assertNotIn((1, 1), d) # Simple slice d[1:2] = 1 self.assertEqual(d[1:2], 1) d[1:2] += 1 self.assertEqual(d[1:2], 2) del d[1:2] self.assertNotIn(slice(1, 2), d) # Tuple of simple slices d[1:2, 1:2] = 1 self.assertEqual(d[1:2, 1:2], 1) d[1:2, 1:2] += 1 self.assertEqual(d[1:2, 1:2], 2) del d[1:2, 1:2] self.assertNotIn((slice(1, 2), slice(1, 2)), d) # Extended slice d[1:2:3] = 1 self.assertEqual(d[1:2:3], 1) d[1:2:3] += 1 self.assertEqual(d[1:2:3], 2) del d[1:2:3] self.assertNotIn(slice(1, 2, 3), d) # Tuple of extended slices d[1:2:3, 1:2:3] = 1 self.assertEqual(d[1:2:3, 1:2:3], 1) d[1:2:3, 1:2:3] += 1 self.assertEqual(d[1:2:3, 1:2:3], 2) del d[1:2:3, 1:2:3] self.assertNotIn((slice(1, 2, 3), slice(1, 2, 3)), d) # Ellipsis d[...] = 1 self.assertEqual(d[...], 1) d[...] += 1 self.assertEqual(d[...], 2) del d[...] self.assertNotIn(Ellipsis, d) # Tuple of Ellipses d[..., ...] = 1 self.assertEqual(d[..., ...], 1) d[..., ...] += 1 self.assertEqual(d[..., ...], 2) del d[..., ...] self.assertNotIn((Ellipsis, Ellipsis), d) def test_mangling(self): class A: def f(): __mangled = 1 __not_mangled__ = 2 import __mangled_mod import __package__.module self.assertIn("_A__mangled", A.f.func_code.co_varnames) self.assertIn("__not_mangled__", A.f.func_code.co_varnames) self.assertIn("_A__mangled_mod", A.f.func_code.co_varnames) self.assertIn("__package__", A.f.func_code.co_varnames) def test_compile_ast(self): fname = __file__ if fname.lower().endswith(('pyc', 'pyo')): fname = fname[:-1] with open(fname, 'r') as f: fcontents = f.read() sample_code = [ ['<assign>', 'x = 5'], ['<print1>', 'print 1'], ['<printv>', 'print v'], ['<printTrue>', 'print True'], ['<printList>', 'print []'], ['<ifblock>', """if True:\n pass\n"""], ['<forblock>', """for n in [1, 2, 3]:\n print n\n"""], ['<deffunc>', """def foo():\n pass\nfoo()\n"""], [fname, fcontents], ] for fname, code in sample_code: co1 = compile(code, '%s1' % fname, 'exec') ast = compile(code, '%s2' % fname, 'exec', _ast.PyCF_ONLY_AST) self.assertTrue(type(ast) == _ast.Module) co2 = compile(ast, '%s3' % fname, 'exec') self.assertEqual(co1, co2) # the code object's filename comes from the second compilation step self.assertEqual(co2.co_filename, '%s3' % fname) # raise exception when node type doesn't match with compile mode co1 = compile('print 1', '<string>', 'exec', _ast.PyCF_ONLY_AST) self.assertRaises(TypeError, compile, co1, '<ast>', 'eval') # raise exception when node type is no start node self.assertRaises(TypeError, compile, _ast.If(), '<ast>', 'exec') # raise exception when node has invalid children ast = _ast.Module() ast.body = [_ast.BoolOp()] self.assertRaises(TypeError, compile, ast, '<ast>', 'exec') def test_main(): test_support.run_unittest(TestSpecifics) if __name__ == "__main__": test_main()
mit
michaelforfxhelp/fxhelprepo
third_party/chromium/testing/gtest/test/gtest_xml_output_unittest.py
5
11521
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_xml_output module""" __author__ = 'eefacm@gmail.com (Sean Mcafee)' import errno import os import sys from xml.dom import minidom, Node import gtest_test_utils import gtest_xml_test_utils GTEST_OUTPUT_FLAG = "--gtest_output" GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml" GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_" SUPPORTS_STACK_TRACES = False if SUPPORTS_STACK_TRACES: STACK_TRACE_TEMPLATE = "\nStack trace:\n*" else: STACK_TRACE_TEMPLATE = "" EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="23" failures="4" disabled="2" errors="0" time="*" name="AllTests"> <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/> </testsuite> <testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*"> <testcase name="Fails" status="run" time="*" classname="FailedTest"> <failure message="Value of: 2&#x0A;Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Value of: 2 Expected: 1%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*"> <testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/> <testcase name="Fails" status="run" time="*" classname="MixedResultTest"> <failure message="Value of: 2&#x0A;Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Value of: 2 Expected: 1%(stack)s]]></failure> <failure message="Value of: 3&#x0A;Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Value of: 3 Expected: 2%(stack)s]]></failure> </testcase> <testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/> </testsuite> <testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*"> <testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest"> <failure message="Failed&#x0A;XML output: &lt;?xml encoding=&quot;utf-8&quot;&gt;&lt;top&gt;&lt;![CDATA[cdata text]]&gt;&lt;/top&gt;" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Failed XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]&gt;<![CDATA[</top>%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*"> <testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest"> <failure message="Failed&#x0A;Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Failed Invalid characters in brackets []%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*"> <testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/> </testsuite> <testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*"> <testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/> <testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/> <testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/> <testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/> </testsuite> <testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*"> <testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/> <testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/> <testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/> </testsuite> <testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" /> <testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" /> <testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" /> <testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" /> </testsuite> <testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" /> </testsuite> <testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" /> </testsuite> <testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" /> </testsuite> <testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" /> </testsuite> </testsuites>""" % {'stack': STACK_TRACE_TEMPLATE} EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="0" failures="0" disabled="0" errors="0" time="*" name="AllTests"> </testsuites>""" class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase): """ Unit test for Google Test's XML output functionality. """ def testNonEmptyXmlOutput(self): """ Runs a test program that generates a non-empty XML output, and tests that the XML output is expected. """ self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1) def testEmptyXmlOutput(self): """ Runs a test program that generates an empty XML output, and tests that the XML output is expected. """ self._TestXmlOutput("gtest_no_test_unittest", EXPECTED_EMPTY_XML, 0) def testDefaultOutputFile(self): """ Confirms that Google Test produces an XML output file with the expected default name if no name is explicitly specified. """ output_file = os.path.join(gtest_test_utils.GetTempDir(), GTEST_DEFAULT_OUTPUT_FILE) gtest_prog_path = gtest_test_utils.GetTestExecutablePath( "gtest_no_test_unittest") try: os.remove(output_file) except OSError, e: if e.errno != errno.ENOENT: raise p = gtest_test_utils.Subprocess( [gtest_prog_path, "%s=xml" % GTEST_OUTPUT_FLAG], working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) self.assert_(os.path.isfile(output_file)) def testSuppressedXmlOutput(self): """ Tests that no XML file is generated if the default XML listener is shut down before RUN_ALL_TESTS is invoked. """ xml_path = os.path.join(gtest_test_utils.GetTempDir(), GTEST_PROGRAM_NAME + "out.xml") if os.path.isfile(xml_path): os.remove(xml_path) gtest_prog_path = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME) command = [gtest_prog_path, "%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path), "--shut_down_xml"] p = gtest_test_utils.Subprocess(command) if p.terminated_by_signal: self.assert_(False, "%s was killed by signal %d" % (gtest_prog_name, p.signal)) else: self.assert_(p.exited) self.assertEquals(1, p.exit_code, "'%s' exited with code %s, which doesn't match " "the expected exit code %s." % (command, p.exit_code, 1)) self.assert_(not os.path.isfile(xml_path)) def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code): """ Asserts that the XML document generated by running the program gtest_prog_name matches expected_xml, a string containing another XML document. Furthermore, the program's exit code must be expected_exit_code. """ xml_path = os.path.join(gtest_test_utils.GetTempDir(), gtest_prog_name + "out.xml") gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name) command = [gtest_prog_path, "%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path)] p = gtest_test_utils.Subprocess(command) if p.terminated_by_signal: self.assert_(False, "%s was killed by signal %d" % (gtest_prog_name, p.signal)) else: self.assert_(p.exited) self.assertEquals(expected_exit_code, p.exit_code, "'%s' exited with code %s, which doesn't match " "the expected exit code %s." % (command, p.exit_code, expected_exit_code)) expected = minidom.parseString(expected_xml) actual = minidom.parse(xml_path) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual .unlink() if __name__ == '__main__': os.environ['GTEST_STACK_TRACE_DEPTH'] = '1' gtest_test_utils.Main()
mpl-2.0
TomasTomecek/compose
tests/unit/interpolation_test.py
38
1676
import unittest from compose.config.interpolation import BlankDefaultDict as bddict from compose.config.interpolation import interpolate from compose.config.interpolation import InvalidInterpolation class InterpolationTest(unittest.TestCase): def test_valid_interpolations(self): self.assertEqual(interpolate('$foo', bddict(foo='hi')), 'hi') self.assertEqual(interpolate('${foo}', bddict(foo='hi')), 'hi') self.assertEqual(interpolate('${subject} love you', bddict(subject='i')), 'i love you') self.assertEqual(interpolate('i ${verb} you', bddict(verb='love')), 'i love you') self.assertEqual(interpolate('i love ${object}', bddict(object='you')), 'i love you') def test_empty_value(self): self.assertEqual(interpolate('${foo}', bddict(foo='')), '') def test_unset_value(self): self.assertEqual(interpolate('${foo}', bddict()), '') def test_escaped_interpolation(self): self.assertEqual(interpolate('$${foo}', bddict(foo='hi')), '${foo}') def test_invalid_strings(self): self.assertRaises(InvalidInterpolation, lambda: interpolate('${', bddict())) self.assertRaises(InvalidInterpolation, lambda: interpolate('$}', bddict())) self.assertRaises(InvalidInterpolation, lambda: interpolate('${}', bddict())) self.assertRaises(InvalidInterpolation, lambda: interpolate('${ }', bddict())) self.assertRaises(InvalidInterpolation, lambda: interpolate('${ foo}', bddict())) self.assertRaises(InvalidInterpolation, lambda: interpolate('${foo }', bddict())) self.assertRaises(InvalidInterpolation, lambda: interpolate('${foo!}', bddict()))
apache-2.0
elventear/ansible-modules-core
network/openswitch/_ops_template.py
23
7218
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['deprecated'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: ops_template version_added: "2.1" author: "Peter Sprygada (@privateip)" short_description: Push configuration to OpenSwitch description: - The OpenSwitch platform provides a library for pushing JSON structured configuration files into the current running-config. This module will read the current configuration from OpenSwitch and compare it against a provided candidate configuration. If there are changes, the candidate configuration is merged with the current configuration and pushed into OpenSwitch deprecated: Deprecated in 2.2. Use ops_config instead extends_documentation_fragment: openswitch options: src: description: - The path to the config source. The source can be either a file with config or a template that will be merged during runtime. By default the task will search for the source file in role or playbook root folder in templates directory. required: true force: description: - The force argument instructs the module to not consider the current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. required: false default: false choices: ['yes', 'no'] backup: description: - When this argument is configured true, the module will backup the running-config from the node prior to making any changes. The backup file will be written to backups/ in the root of the playbook directory. required: false default: false choices: ['yes', 'no'] config: description: - The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the implementer to pass in the configuration to use as the base config for comparison. required: false default: null """ EXAMPLES = """ - name: set hostname with file lookup ops_template: src: ./hostname.json backup: yes remote_user: admin become: yes - name: set hostname with var ops_template: src: "{{ config }}" remote_user: admin become: yes """ RETURN = """ updates: description: The list of configuration updates to be merged returned: always type: dict sample: {obj, obj} responses: description: returns the responses when configuring using cli returned: when transport == cli type: list sample: [...] """ import ansible.module_utils.openswitch from ansible.module_utils.netcfg import NetworkConfig, dumps from ansible.module_utils.network import NetworkModule from ansible.module_utils.openswitch import HAS_OPS def get_config(module): config = module.params['config'] or dict() if not config and not module.params['force']: config = module.config.get_config() return config def sort(val): if isinstance(val, (list, set)): return sorted(val) return val def diff(this, other, path=None): updates = list() path = path or list() for key, value in this.items(): if key not in other: other_value = other.get(key) updates.append((list(path), key, value, other_value)) else: if isinstance(this[key], dict): path.append(key) updates.extend(diff(this[key], other[key], list(path))) path.pop() else: other_value = other.get(key) if sort(this[key]) != sort(other_value): updates.append((list(path), key, value, other_value)) return updates def merge(changeset, config=None): config = config or dict() for path, key, value, _ in changeset: current_level = config for part in path: if part not in current_level: current_level[part] = dict() current_level = current_level[part] current_level[key] = value return config def main(): """ main entry point for module execution """ argument_spec = dict( src=dict(type='str'), force=dict(default=False, type='bool'), backup=dict(default=False, type='bool'), config=dict(type='dict'), ) mutually_exclusive = [('config', 'backup'), ('config', 'force')] module = NetworkModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True) if not module.params['transport'] and not HAS_OPS: module.fail_json(msg='unable to import ops.dc library') result = dict(changed=False) contents = get_config(module) result['_backup'] = contents if module.params['transport'] in ['ssh', 'rest']: config = contents try: src = module.from_json(module.params['src']) except ValueError: module.fail_json(msg='unable to load src due to json parsing error') changeset = diff(src, config) candidate = merge(changeset, config) updates = dict() for path, key, new_value, old_value in changeset: path = '%s.%s' % ('.'.join(path), key) updates[path] = str(new_value) result['updates'] = updates if changeset: if not module.check_mode: module.config(config) result['changed'] = True else: candidate = NetworkConfig(contents=module.params['src'], indent=4) if contents: config = NetworkConfig(contents=contents, indent=4) if not module.params['force']: commands = candidate.difference(config) commands = dumps(commands, 'commands').split('\n') commands = [str(c) for c in commands if c] else: commands = str(candidate).split('\n') if commands: if not module.check_mode: response = module.config(commands) result['responses'] = response result['changed'] = True result['updates'] = commands module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
pratikmallya/hue
desktop/core/ext-py/boto-2.38.0/boto/sqs/message.py
135
9892
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ SQS Message A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS Message are here: http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes back out. However, to allow messages to have richer semantics, the Message class must support the following interfaces: The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a boto Queue object and represents the queue that the message will be stored in. The default value for this parameter is None. The constructor for the Message class must accept a keyword parameter "body" which represents the content or body of the message. The format of this parameter will depend on the behavior of the particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the user the body passed to the constructor should be a dict-like object that can be used to populate the initial state of the message. The Message class must provide an encode method that accepts a value of the same type as the body parameter of the constructor and returns a string of characters that are able to be stored in an SQS message body (see rules above). The Message class must provide a decode method that accepts a string of characters that can be stored (and probably were stored!) in an SQS message and return an object of a type that is consistent with the "body" parameter accepted on the class constructor. The Message class must provide a __len__ method that will return the size of the encoded message that would be stored in SQS based on the current state of the Message object. The Message class must provide a get_body method that will return the body of the message in the same format accepted in the constructor of the class. The Message class must provide a set_body method that accepts a message body in the same format accepted by the constructor of the class. This method should alter to the internal state of the Message object to reflect the state represented in the message body parameter. The Message class must provide a get_body_encoded method that returns the current body of the message in the format in which it would be stored in SQS. """ import base64 import boto from boto.compat import StringIO from boto.compat import six from boto.sqs.attributes import Attributes from boto.sqs.messageattributes import MessageAttributes from boto.exception import SQSDecodeError class RawMessage(object): """ Base class for SQS messages. RawMessage does not encode the message in any way. Whatever you store in the body of the message is what will be written to SQS and whatever is returned from SQS is stored directly into the body of the message. """ def __init__(self, queue=None, body=''): self.queue = queue self.set_body(body) self.id = None self.receipt_handle = None self.md5 = None self.attributes = Attributes(self) self.message_attributes = MessageAttributes(self) self.md5_message_attributes = None def __len__(self): return len(self.encode(self._body)) def startElement(self, name, attrs, connection): if name == 'Attribute': return self.attributes if name == 'MessageAttribute': return self.message_attributes return None def endElement(self, name, value, connection): if name == 'Body': self.set_body(value) elif name == 'MessageId': self.id = value elif name == 'ReceiptHandle': self.receipt_handle = value elif name == 'MD5OfBody': self.md5 = value elif name == 'MD5OfMessageAttributes': self.md5_message_attributes = value else: setattr(self, name, value) def endNode(self, connection): self.set_body(self.decode(self.get_body())) def encode(self, value): """Transform body object into serialized byte array format.""" return value def decode(self, value): """Transform seralized byte array into any object.""" return value def set_body(self, body): """Override the current body for this object, using decoded format.""" self._body = body def get_body(self): return self._body def get_body_encoded(self): """ This method is really a semi-private method used by the Queue.write method when writing the contents of the message to SQS. You probably shouldn't need to call this method in the normal course of events. """ return self.encode(self.get_body()) def delete(self): if self.queue: return self.queue.delete_message(self) def change_visibility(self, visibility_timeout): if self.queue: self.queue.connection.change_message_visibility(self.queue, self.receipt_handle, visibility_timeout) class Message(RawMessage): """ The default Message class used for SQS queues. This class automatically encodes/decodes the message body using Base64 encoding to avoid any illegal characters in the message body. See: https://forums.aws.amazon.com/thread.jspa?threadID=13067 for details on why this is a good idea. The encode/decode is meant to be transparent to the end-user. """ def encode(self, value): if not isinstance(value, six.binary_type): value = value.encode('utf-8') return base64.b64encode(value).decode('utf-8') def decode(self, value): try: value = base64.b64decode(value.encode('utf-8')).decode('utf-8') except: boto.log.warning('Unable to decode message') return value return value class MHMessage(Message): """ The MHMessage class provides a message that provides RFC821-like headers like this: HeaderName: HeaderValue The encoding/decoding of this is handled automatically and after the message body has been read, the message instance can be treated like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'. """ def __init__(self, queue=None, body=None, xml_attrs=None): if body is None or body == '': body = {} super(MHMessage, self).__init__(queue, body) def decode(self, value): try: msg = {} fp = StringIO(value) line = fp.readline() while line: delim = line.find(':') key = line[0:delim] value = line[delim+1:].strip() msg[key.strip()] = value.strip() line = fp.readline() except: raise SQSDecodeError('Unable to decode message', self) return msg def encode(self, value): s = '' for item in value.items(): s = s + '%s: %s\n' % (item[0], item[1]) return s def __contains__(self, key): return key in self._body def __getitem__(self, key): if key in self._body: return self._body[key] else: raise KeyError(key) def __setitem__(self, key, value): self._body[key] = value self.set_body(self._body) def keys(self): return self._body.keys() def values(self): return self._body.values() def items(self): return self._body.items() def has_key(self, key): return key in self._body def update(self, d): self._body.update(d) self.set_body(self._body) def get(self, key, default=None): return self._body.get(key, default) class EncodedMHMessage(MHMessage): """ The EncodedMHMessage class provides a message that provides RFC821-like headers like this: HeaderName: HeaderValue This variation encodes/decodes the body of the message in base64 automatically. The message instance can be treated like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'. """ def decode(self, value): try: value = base64.b64decode(value.encode('utf-8')).decode('utf-8') except: raise SQSDecodeError('Unable to decode message', self) return super(EncodedMHMessage, self).decode(value) def encode(self, value): value = super(EncodedMHMessage, self).encode(value) return base64.b64encode(value.encode('utf-8')).decode('utf-8')
apache-2.0
mcardillo55/django
django/contrib/admindocs/utils.py
411
4187
"Misc. utility functions/classes for admin documentation generator." import re from email.errors import HeaderParseError from email.parser import HeaderParser from django.core.urlresolvers import reverse from django.utils.encoding import force_bytes from django.utils.safestring import mark_safe try: import docutils.core import docutils.nodes import docutils.parsers.rst.roles except ImportError: docutils_is_available = False else: docutils_is_available = True def trim_docstring(docstring): """ Uniformly trim leading/trailing whitespace from docstrings. Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation """ if not docstring or not docstring.strip(): return '' # Convert tabs to spaces and split into lines lines = docstring.expandtabs().splitlines() indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip()) trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]] return "\n".join(trimmed).strip() def parse_docstring(docstring): """ Parse out the parts of a docstring. Return (title, body, metadata). """ docstring = trim_docstring(docstring) parts = re.split(r'\n{2,}', docstring) title = parts[0] if len(parts) == 1: body = '' metadata = {} else: parser = HeaderParser() try: metadata = parser.parsestr(parts[-1]) except HeaderParseError: metadata = {} body = "\n\n".join(parts[1:]) else: metadata = dict(metadata.items()) if metadata: body = "\n\n".join(parts[1:-1]) else: body = "\n\n".join(parts[1:]) return title, body, metadata def parse_rst(text, default_reference_context, thing_being_parsed=None): """ Convert the string from reST to an XHTML fragment. """ overrides = { 'doctitle_xform': True, 'inital_header_level': 3, "default_reference_context": default_reference_context, "link_base": reverse('django-admindocs-docroot').rstrip('/'), 'raw_enabled': False, 'file_insertion_enabled': False, } if thing_being_parsed: thing_being_parsed = force_bytes("<%s>" % thing_being_parsed) # Wrap ``text`` in some reST that sets the default role to ``cmsreference``, # then restores it. source = """ .. default-role:: cmsreference %s .. default-role:: """ parts = docutils.core.publish_parts(source % text, source_path=thing_being_parsed, destination_path=None, writer_name='html', settings_overrides=overrides) return mark_safe(parts['fragment']) # # reST roles # ROLES = { 'model': '%s/models/%s/', 'view': '%s/views/%s/', 'template': '%s/templates/%s/', 'filter': '%s/filters/#%s', 'tag': '%s/tags/#%s', } def create_reference_role(rolename, urlbase): def _role(name, rawtext, text, lineno, inliner, options=None, content=None): if options is None: options = {} if content is None: content = [] node = docutils.nodes.reference( rawtext, text, refuri=(urlbase % ( inliner.document.settings.link_base, text.lower(), )), **options ) return [node], [] docutils.parsers.rst.roles.register_canonical_role(rolename, _role) def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None): if options is None: options = {} if content is None: content = [] context = inliner.document.settings.default_reference_context node = docutils.nodes.reference( rawtext, text, refuri=(ROLES[context] % ( inliner.document.settings.link_base, text.lower(), )), **options ) return [node], [] if docutils_is_available: docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role) for name, urlbase in ROLES.items(): create_reference_role(name, urlbase)
bsd-3-clause