repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ClearCorp-dev/odoo-clearcorp | TODO-9.0/budget/__openerp__.py | 2 | 2376 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Public Budget',
'version': '0.1',
'url': 'http://launchpad.net/openerp-ccorp-addons',
'author': 'ClearCorp',
'website': 'http://clearcorp.co.cr',
'category': 'Generic Modules/Base',
'description': """ This module adds the logic for Public Budget management and it's different processes
""",
'depends': [
'base',
'account',
'purchase',
'sale',
'purchase_order_discount',
'hr_payroll',
'hr_payroll_account',
'hr_expense',
'account_account_extended_ccorp',
'expense_line_partner',
'account_distribution_line',
],
'data': [
'security/security.xml',
'security/ir.model.access.csv',
'budget_workflow.xml',
'wizard/budget_program_populate_view.xml',
'budget_view.xml',
'wizard/budget_import_catalog_view.xml',
'res_partner_view.xml',
'budget_sequence.xml',
'account_invoice_view.xml',
'account_view.xml',
'account_move_line.xml',
'hr_expense_view.xml',
'hr_expense_workflow.xml',
'purchase_view.xml',
'purchase_workflow.xml',
'hr_payroll.xml'
# 'sale_view.xml'
],
'license': 'AGPL-3',
'installable': True,
'active': False,
'application': True,
}
| agpl-3.0 |
delinhabit/django | tests/migrations/test_autodetector.py | 48 | 110201 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.db import connection, models
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ModelState, ProjectState
from django.test import TestCase, mock, override_settings
from .models import FoodManager, FoodQuerySet
class DeconstructableObject(object):
"""
A custom deconstructable object.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def deconstruct(self):
return (
self.__module__ + '.' + self.__class__.__name__,
self.args,
self.kwargs
)
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
])
author_name_null = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, null=True)),
])
author_name_longer = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=400)),
])
author_name_renamed = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("names", models.CharField(max_length=200)),
])
author_name_default = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default='Ada Lovelace')),
])
author_name_deconstructable_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject())),
])
author_name_deconstructable_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject())),
])
author_name_deconstructable_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
])
author_name_deconstructable_4 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
])
author_name_deconstructable_list_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructableObject(), 123])),
])
author_name_deconstructable_list_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructableObject(), 123])),
])
author_name_deconstructable_list_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructableObject(), 999])),
])
author_name_deconstructable_tuple_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructableObject(), 123))),
])
author_name_deconstructable_tuple_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructableObject(), 123))),
])
author_name_deconstructable_tuple_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructableObject(), 999))),
])
author_name_deconstructable_dict_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructableObject(), 'otheritem': 123
})),
])
author_name_deconstructable_dict_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructableObject(), 'otheritem': 123
})),
])
author_name_deconstructable_dict_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructableObject(), 'otheritem': 999
})),
])
author_name_nested_deconstructable_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject(
DeconstructableObject(1),
(DeconstructableObject('t1'), DeconstructableObject('t2'),),
a=DeconstructableObject('A'),
b=DeconstructableObject(B=DeconstructableObject('c')),
))),
])
author_name_nested_deconstructable_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject(
DeconstructableObject(1),
(DeconstructableObject('t1'), DeconstructableObject('t2'),),
a=DeconstructableObject('A'),
b=DeconstructableObject(B=DeconstructableObject('c')),
))),
])
author_name_nested_deconstructable_changed_arg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject(
DeconstructableObject(1),
(DeconstructableObject('t1'), DeconstructableObject('t2-changed'),),
a=DeconstructableObject('A'),
b=DeconstructableObject(B=DeconstructableObject('c')),
))),
])
author_name_nested_deconstructable_extra_arg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject(
DeconstructableObject(1),
(DeconstructableObject('t1'), DeconstructableObject('t2'),),
None,
a=DeconstructableObject('A'),
b=DeconstructableObject(B=DeconstructableObject('c')),
))),
])
author_name_nested_deconstructable_changed_kwarg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject(
DeconstructableObject(1),
(DeconstructableObject('t1'), DeconstructableObject('t2'),),
a=DeconstructableObject('A'),
b=DeconstructableObject(B=DeconstructableObject('c-changed')),
))),
])
author_name_nested_deconstructable_extra_kwarg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject(
DeconstructableObject(1),
(DeconstructableObject('t1'), DeconstructableObject('t2'),),
a=DeconstructableObject('A'),
b=DeconstructableObject(B=DeconstructableObject('c')),
c=None,
))),
])
author_custom_pk = ModelState("testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))])
author_with_biography_non_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField()),
("biography", models.TextField()),
])
author_with_biography_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(blank=True)),
("biography", models.TextField(blank=True)),
])
author_with_book = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
author_with_book_order_wrt = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
], options={"order_with_respect_to": "book"})
author_renamed_with_book = ModelState("testapp", "Writer", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
author_with_publisher_string = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher_name", models.CharField(max_length=200)),
])
author_with_publisher = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
])
author_with_user = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("auth.User", models.CASCADE)),
])
author_with_custom_user = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("thirdapp.CustomUser", models.CASCADE)),
])
author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",))
author_proxy_options = ModelState("testapp", "AuthorProxy", [], {
"proxy": True,
"verbose_name": "Super Author",
}, ("testapp.author", ))
author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", ))
author_proxy_third = ModelState("thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_proxy = ModelState("testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy", ))
author_unmanaged = ModelState("testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author", ))
author_unmanaged_managed = ModelState("testapp", "AuthorUnmanaged", [], {}, ("testapp.author", ))
author_unmanaged_default_pk = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_unmanaged_custom_pk = ModelState("testapp", "Author", [
("pk_field", models.IntegerField(primary_key=True)),
])
author_with_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher")),
])
author_with_m2m_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", blank=True)),
])
author_with_m2m_through = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Contract")),
])
author_with_former_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.CharField(max_length=100)),
])
author_with_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {
"permissions": [('can_hire', 'Can hire')],
"verbose_name": "Authi",
})
author_with_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_one"})
author_with_new_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_two"})
author_renamed_with_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_one"})
author_renamed_with_new_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_three"})
contract = ModelState("testapp", "Contract", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
])
publisher = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
])
publisher_with_author = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
publisher_with_aardvark_author = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Aardvark", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
publisher_with_book = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Book", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
other_pony = ModelState("otherapp", "Pony", [
("id", models.AutoField(primary_key=True)),
])
other_pony_food = ModelState("otherapp", "Pony", [
("id", models.AutoField(primary_key=True)),
], managers=[
('food_qs', FoodQuerySet.as_manager()),
('food_mgr', FoodManager('a', 'b')),
('food_mgr_kwargs', FoodManager('x', 'y', 3, 4)),
])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_proxy_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("thirdapp.AuthorProxy", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_migrations_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.UnmigratedModel", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_no_author = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
])
book_with_author_renamed = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_field_and_author_renamed = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("writer", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_multiple_authors = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author")),
("title", models.CharField(max_length=200)),
])
book_with_multiple_authors_through_attribution = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author", through="otherapp.Attribution")),
("title", models.CharField(max_length=200)),
])
book_foo_together = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("author", "title")},
"unique_together": {("author", "title")},
})
book_foo_together_2 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "author")},
"unique_together": {("title", "author")},
})
book_foo_together_3 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "newfield")},
"unique_together": {("title", "newfield")},
})
book_foo_together_4 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "newfield2")},
"unique_together": {("title", "newfield2")},
})
attribution = ModelState("otherapp", "Attribution", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
edition = ModelState("thirdapp", "Edition", [
("id", models.AutoField(primary_key=True)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
custom_user = ModelState("thirdapp", "CustomUser", [
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
], bases=(AbstractBaseUser, ))
custom_user_no_inherit = ModelState("thirdapp", "CustomUser", [
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
])
aardvark = ModelState("thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_testapp = ModelState("testapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_based_on_author = ModelState("testapp", "Aardvark", [], bases=("testapp.Author", ))
aardvark_pk_fk_author = ModelState("testapp", "Aardvark", [
("id", models.OneToOneField("testapp.Author", models.CASCADE, primary_key=True)),
])
knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))])
rabbit = ModelState("eggs", "Rabbit", [
("id", models.AutoField(primary_key=True)),
("knight", models.ForeignKey("eggs.Knight", models.CASCADE)),
("parent", models.ForeignKey("eggs.Rabbit", models.CASCADE)),
], {"unique_together": {("parent", "knight")}})
def repr_changes(self, changes, include_dependencies=False):
output = ""
for app_label, migrations in sorted(changes.items()):
output += " %s:\n" % app_label
for migration in migrations:
output += " %s\n" % migration.name
for operation in migration.operations:
output += " %s\n" % operation
if include_dependencies:
output += " Dependencies:\n"
if migration.dependencies:
for dep in migration.dependencies:
output += " %s\n" % (dep,)
else:
output += " None\n"
return output
def assertNumberMigrations(self, changes, app_label, number):
if len(changes.get(app_label, [])) != number:
self.fail("Incorrect number of migrations (%s) for %s (expected %s)\n%s" % (
len(changes.get(app_label, [])),
app_label,
number,
self.repr_changes(changes),
))
def assertMigrationDependencies(self, changes, app_label, index, dependencies):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
if set(migration.dependencies) != set(dependencies):
self.fail("Migration dependencies mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
dependencies,
self.repr_changes(changes, include_dependencies=True),
))
def assertOperationTypes(self, changes, app_label, index, types):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
real_types = [operation.__class__.__name__ for operation in migration.operations]
if types != real_types:
self.fail("Operation type mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
types,
self.repr_changes(changes),
))
def assertOperationAttributes(self, changes, app_label, index, operation_index, **attrs):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
if len(changes[app_label]) < index + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_index,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_index]
for attr, value in attrs.items():
if getattr(operation, attr, None) != value:
self.fail("Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_index,
attr,
value,
getattr(operation, attr, None),
self.repr_changes(changes),
))
def assertOperationFieldAttributes(self, changes, app_label, index, operation_index, **attrs):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
if len(changes[app_label]) < index + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_index,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_index]
if not hasattr(operation, 'field'):
self.fail("No field attribute for %s.%s op #%s." % (
app_label,
migration.name,
operation_index,
))
field = operation.field
for attr, value in attrs.items():
if getattr(field, attr, None) != value:
self.fail("Field attribute mismatch for %s.%s op #%s, field.%s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_index,
attr,
value,
getattr(field, attr, None),
self.repr_changes(changes),
))
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model(model_state.clone())
return project_state
def test_arrange_for_graph(self):
"""Tests auto-naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector.arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"""
Tests that trim does not remove dependencies but does remove unwanted
apps.
"""
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector.arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, {"testapp"})
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_custom_migration_name(self):
"""Tests custom naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
migration_name = 'custom_name'
changes = autodetector.arrange_for_graph(changes, graph, migration_name)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name)
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name)
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_new_model(self):
"""Tests autodetection of new models."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.other_pony_food])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Pony")
self.assertEqual([name for name, mgr in changes['otherapp'][0].operations[0].managers],
['food_qs', 'food_mgr', 'food_mgr_kwargs'])
def test_old_model(self):
"""Tests deletion of old models."""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
def test_add_field(self):
"""Tests autodetection of new fields."""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_remove_field(self):
"""Tests autodetection of removed fields."""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RemoveField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_alter_field(self):
"""Tests autodetection of new fields."""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_longer])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_alter_field_to_not_null_with_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name_default])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default='Ada Lovelace')
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
return_value=models.NOT_PROVIDED)
def test_alter_field_to_not_null_without_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default=models.NOT_PROVIDED)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
return_value='Some Name')
def test_alter_field_to_not_null_oneoff_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=False)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default="Some Name")
def test_rename_field(self):
"""Tests autodetection of renamed fields."""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="name", new_name="names")
def test_rename_model(self):
"""Tests autodetection of renamed models."""
# Make state
before = self.make_project_state([self.author_with_book, self.book])
after = self.make_project_state([self.author_renamed_with_book, self.book_with_author_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="Author", new_name="Writer")
# Now that RenameModel handles related fields too, there should be
# no AlterField for the related field.
self.assertNumberMigrations(changes, 'otherapp', 0)
def test_rename_model_with_renamed_rel_field(self):
"""
Tests autodetection of renamed models while simultaneously renaming one
of the fields that relate to the renamed model.
"""
# Make state
before = self.make_project_state([self.author_with_book, self.book])
after = self.make_project_state([self.author_renamed_with_book, self.book_with_field_and_author_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({
"ask_rename": True,
"ask_rename_model": True,
}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="Author", new_name="Writer")
# Right number/type of migrations for related field rename?
# Alter is already taken care of.
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, old_name="author", new_name="writer")
def test_rename_model_with_fks_in_different_position(self):
"""
#24537 - Tests that the order of fields in a model does not influence
the RenameModel detection.
"""
before = self.make_project_state([
ModelState("testapp", "EntityA", [
("id", models.AutoField(primary_key=True)),
]),
ModelState("testapp", "EntityB", [
("id", models.AutoField(primary_key=True)),
("some_label", models.CharField(max_length=255)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
]),
])
after = self.make_project_state([
ModelState("testapp", "EntityA", [
("id", models.AutoField(primary_key=True)),
]),
ModelState("testapp", "RenamedEntityB", [
("id", models.AutoField(primary_key=True)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
("some_label", models.CharField(max_length=255)),
]),
])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True}))
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="EntityB", new_name="RenamedEntityB")
def test_fk_dependency(self):
"""Tests that having a ForeignKey automatically adds a dependency."""
# Make state
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (author),
# thirdapp (edition) depends on otherapp (book)
before = self.make_project_state([])
after = self.make_project_state([self.author_name, self.book, self.edition])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("testapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="Edition")
self.assertMigrationDependencies(changes, 'thirdapp', 0, [("otherapp", "auto_1")])
def test_proxy_fk_dependency(self):
"""Tests that FK dependencies still work on proxy models."""
# Make state
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (authorproxy)
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("thirdapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="AuthorProxy")
self.assertMigrationDependencies(changes, 'thirdapp', 0, [("testapp", "auto_1")])
def test_same_app_no_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
def test_circular_fk_dependency(self):
"""
Tests that having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_book, self.book, self.publisher_with_book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [("otherapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 2)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'otherapp', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'otherapp', 0, [])
self.assertMigrationDependencies(changes, 'otherapp', 1, [("otherapp", "auto_1"), ("testapp", "auto_1")])
# both split migrations should be `initial`
self.assertTrue(changes['otherapp'][0].initial)
self.assertTrue(changes['otherapp'][1].initial)
def test_same_app_circular_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app does
not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
def test_same_app_circular_fk_dependency_and_unique_together(self):
"""
#22275 - Tests that a migration with circular FK dependency does not try
to create unique together constraint before creating all required fields
first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.knight, self.rabbit])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'eggs', 1)
self.assertOperationTypes(changes, 'eggs', 0, ["CreateModel", "CreateModel", "AlterUniqueTogether"])
self.assertNotIn("unique_together", changes['eggs'][0].operations[0].options)
self.assertNotIn("unique_together", changes['eggs'][0].operations[1].options)
self.assertMigrationDependencies(changes, 'eggs', 0, [])
def test_alter_db_table_add(self):
"""Tests detection for adding db_table in model's options."""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table="author_one")
def test_alter_db_table_change(self):
"""Tests detection for changing db_table in model's options'."""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_with_new_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table="author_two")
def test_alter_db_table_remove(self):
"""Tests detection for removing db_table in model's options."""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table=None)
def test_alter_db_table_no_changes(self):
"""
Tests that alter_db_table doesn't generate a migration if no changes
have been made.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_with_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_keep_db_table_with_model_change(self):
"""
Tests when model changes but db_table stays as-is, autodetector must not
create more than one operation.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_renamed_with_db_table_options])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor")
def test_alter_db_table_with_model_change(self):
"""
Tests when model and db_table changes, autodetector must create two
operations.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_renamed_with_new_db_table_options])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel", "AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="newauthor", table="author_three")
def test_empty_foo_together(self):
"""
#23452 - Empty unique/index_together shouldn't generate a migration.
"""
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_secified = ModelState("a", "model", [("id", models.AutoField(primary_key=True))])
# Explicitly testing for None, since this was the issue in #23452 after
# a AlterFooTogether operation with e.g. () as value
model_state_none = ModelState("a", "model", [
("id", models.AutoField(primary_key=True))
], {
"index_together": None,
"unique_together": None,
})
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState("a", "model", [
("id", models.AutoField(primary_key=True))
], {
"index_together": set(),
"unique_together": set(),
})
def test(from_state, to_state, msg):
before = self.make_project_state([from_state])
after = self.make_project_state([to_state])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
if len(changes) > 0:
ops = ', '.join(o.__class__.__name__ for o in changes['a'][0].operations)
self.fail('Created operation(s) %s from %s' % (ops, msg))
tests = (
(model_state_not_secified, model_state_not_secified, '"not specified" to "not specified"'),
(model_state_not_secified, model_state_none, '"not specified" to "None"'),
(model_state_not_secified, model_state_empty, '"not specified" to "empty"'),
(model_state_none, model_state_not_secified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(model_state_empty, model_state_not_secified, '"empty" to "not specified"'),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_add_foo_together(self):
"""Tests index/unique_together detection."""
# Make state
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_foo_together])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("author", "title")})
def test_remove_foo_together(self):
"""Tests index/unique_together detection."""
before = self.make_project_state([self.author_empty, self.book_foo_together])
after = self.make_project_state([self.author_empty, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together=set())
def test_foo_together_remove_fk(self):
"""Tests unique_together and field removal detection & ordering"""
# Make state
before = self.make_project_state([self.author_empty, self.book_foo_together])
after = self.make_project_state([self.author_empty, self.book_with_no_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, [
"AlterUniqueTogether", "AlterIndexTogether", "RemoveField"
])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 2, model_name="book", name="author")
def test_foo_together_no_changes(self):
"""
Tests that index/unique_together doesn't generate a migration if no
changes have been made.
"""
# Make state
before = self.make_project_state([self.author_empty, self.book_foo_together])
after = self.make_project_state([self.author_empty, self.book_foo_together])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_foo_together_ordering(self):
"""
Tests that index/unique_together also triggers on ordering changes.
"""
# Make state
before = self.make_project_state([self.author_empty, self.book_foo_together])
after = self.make_project_state([self.author_empty, self.book_foo_together_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("title", "author")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("title", "author")})
def test_add_field_and_foo_together(self):
"""
Tests that added fields will be created before using them in
index/unique_together.
"""
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_foo_together_3])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AddField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield")})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("title", "newfield")})
def test_remove_field_and_foo_together(self):
"""
Tests that removed fields will be removed after updating
index/unique_together.
"""
before = self.make_project_state([self.author_empty, self.book_foo_together_3])
after = self.make_project_state([self.author_empty, self.book_foo_together])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, model_name="book", name="newfield")
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("author", "title")})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("author", "title")})
def test_rename_field_and_foo_together(self):
"""
Tests that removed fields will be removed after updating
index/unique_together.
"""
before = self.make_project_state([self.author_empty, self.book_foo_together_3])
after = self.make_project_state([self.author_empty, self.book_foo_together_4])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={
("title", "newfield2")
})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("title", "newfield2")})
def test_proxy(self):
"""Tests that the autodetector correctly deals with proxy models."""
# First, we test adding a proxy model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True})
# Now, we test turning a proxy model into a non-proxy model
# It should delete the proxy then make the real one
before = self.make_project_state([self.author_empty, self.author_proxy])
after = self.make_project_state([self.author_empty, self.author_proxy_notproxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy", options={})
def test_proxy_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on proxy
models.
"""
# First, we test the default pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'id')
# Now, we test the custom pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'pk_field')
def test_unmanaged_create(self):
"""Tests that the autodetector correctly deals with managed models."""
# First, we test adding an unmanaged model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_unmanaged])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0,
name="AuthorUnmanaged", options={"managed": False})
def test_unmanaged_to_managed(self):
# Now, we test turning an unmanaged model into a managed model
before = self.make_project_state([self.author_empty, self.author_unmanaged])
after = self.make_project_state([self.author_empty, self.author_unmanaged_managed])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, 'testapp', 0, 0,
name="authorunmanaged", options={})
def test_managed_to_unmanaged(self):
# Now, we turn managed to unmanaged.
before = self.make_project_state([self.author_empty, self.author_unmanaged_managed])
after = self.make_project_state([self.author_empty, self.author_unmanaged])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0,
name="authorunmanaged", options={"managed": False})
def test_unmanaged_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on
unmanaged models.
"""
# First, we test the default pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_unmanaged_default_pk, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'id')
# Now, we test the custom pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_unmanaged_custom_pk, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'pk_field')
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable(self):
before = self.make_project_state([self.custom_user])
after = self.make_project_state([self.custom_user, self.author_with_custom_user])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [("__setting__", "AUTH_USER_MODEL")])
def test_swappable_changed(self):
before = self.make_project_state([self.custom_user, self.author_with_user])
with override_settings(AUTH_USER_MODEL="thirdapp.CustomUser"):
after = self.make_project_state([self.custom_user, self.author_with_custom_user])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name='user')
fk_field = changes['testapp'][0].operations[0].field
to_model = '%s.%s' % (fk_field.remote_field.model._meta.app_label, fk_field.remote_field.model._meta.object_name)
self.assertEqual(to_model, 'thirdapp.CustomUser')
def test_add_field_with_default(self):
"""#22030 - Adding a field with a default should work."""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name_default])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="name")
def test_custom_deconstructable(self):
"""
Two instances which deconstruct to the same value aren't considered a
change.
"""
before = self.make_project_state([self.author_name_deconstructable_1])
after = self.make_project_state([self.author_name_deconstructable_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_deconstruct_field_kwarg(self):
"""Field instances are handled correctly by nested deconstruction."""
before = self.make_project_state([self.author_name_deconstructable_3])
after = self.make_project_state([self.author_name_deconstructable_4])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
def test_deconstructable_list(self):
"""Nested deconstruction descends into lists."""
# When lists contain items that deconstruct to identical values, those lists
# should be considered equal for the purpose of detecting state changes
# (even if the original items are unequal).
before = self.make_project_state([self.author_name_deconstructable_list_1])
after = self.make_project_state([self.author_name_deconstructable_list_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed lists should be reported
# as a change
before = self.make_project_state([self.author_name_deconstructable_list_1])
after = self.make_project_state([self.author_name_deconstructable_list_3])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(len(changes), 1)
def test_deconstructable_tuple(self):
"""Nested deconstruction descends into tuples."""
# When tuples contain items that deconstruct to identical values, those tuples
# should be considered equal for the purpose of detecting state changes
# (even if the original items are unequal).
before = self.make_project_state([self.author_name_deconstructable_tuple_1])
after = self.make_project_state([self.author_name_deconstructable_tuple_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed tuples should be reported
# as a change
before = self.make_project_state([self.author_name_deconstructable_tuple_1])
after = self.make_project_state([self.author_name_deconstructable_tuple_3])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(len(changes), 1)
def test_deconstructable_dict(self):
"""Nested deconstruction descends into dict values."""
# When dicts contain items whose values deconstruct to identical values,
# those dicts should be considered equal for the purpose of detecting
# state changes (even if the original values are unequal).
before = self.make_project_state([self.author_name_deconstructable_dict_1])
after = self.make_project_state([self.author_name_deconstructable_dict_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed dicts should be reported
# as a change
before = self.make_project_state([self.author_name_deconstructable_dict_1])
after = self.make_project_state([self.author_name_deconstructable_dict_3])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(len(changes), 1)
def test_nested_deconstructable_objects(self):
"""
Nested deconstruction is applied recursively to the args/kwargs of
deconstructed objects.
"""
# If the items within a deconstructed object's args/kwargs have the same
# deconstructed values - whether or not the items themselves are different
# instances - then the object as a whole is regarded as unchanged.
before = self.make_project_state([self.author_name_nested_deconstructable_1])
after = self.make_project_state([self.author_name_nested_deconstructable_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
# Differences that exist solely within the args list of a deconstructed object
# should be reported as changes
before = self.make_project_state([self.author_name_nested_deconstructable_1])
after = self.make_project_state([self.author_name_nested_deconstructable_changed_arg])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(len(changes), 1)
# Additional args should also be reported as a change
before = self.make_project_state([self.author_name_nested_deconstructable_1])
after = self.make_project_state([self.author_name_nested_deconstructable_extra_arg])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(len(changes), 1)
# Differences that exist solely within the kwargs dict of a deconstructed object
# should be reported as changes
before = self.make_project_state([self.author_name_nested_deconstructable_1])
after = self.make_project_state([self.author_name_nested_deconstructable_changed_kwarg])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(len(changes), 1)
# Additional kwargs should also be reported as a change
before = self.make_project_state([self.author_name_nested_deconstructable_1])
after = self.make_project_state([self.author_name_nested_deconstructable_extra_kwarg])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(len(changes), 1)
def test_deconstruct_type(self):
"""
#22951 -- Uninstanted classes with deconstruct are correctly returned
by deep_deconstruct during serialization.
"""
author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(
max_length=200,
# IntegerField intentionally not instantiated.
default=models.IntegerField,
))
],
)
# Make state
before = self.make_project_state([])
after = self.make_project_state([author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
def test_replace_string_with_foreignkey(self):
"""
#22300 - Adding an FK in the same "spot" as a deleted CharField should
work.
"""
# Make state
before = self.make_project_state([self.author_with_publisher_string])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publisher_name")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publisher")
def test_foreign_key_removed_before_target_model(self):
"""
Removing an FK and the model it targets in the same change must remove
the FK field before the model to maintain consistency.
"""
before = self.make_project_state([self.author_with_publisher, self.publisher])
after = self.make_project_state([self.author_name]) # removes both the model and FK
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Publisher")
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_many_to_many(self, mocked_ask_method):
"""#22435 - Adding a ManyToManyField should not prompt for a default."""
before = self.make_project_state([self.author_empty, self.publisher])
after = self.make_project_state([self.author_with_m2m, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers")
def test_alter_many_to_many(self):
before = self.make_project_state([self.author_with_m2m, self.publisher])
after = self.make_project_state([self.author_with_m2m_blank, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers")
def test_create_with_through_model(self):
"""
Adding a m2m with a through model and the models that use it should be
ordered correctly.
"""
before = self.make_project_state([])
after = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, [
"CreateModel", "CreateModel", "CreateModel", "AddField", "AddField"
])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Contract")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 3, model_name='contract', name='publisher')
self.assertOperationAttributes(changes, 'testapp', 0, 4, model_name='author', name='publishers')
def test_many_to_many_removed_before_through_model(self):
"""
Removing a ManyToManyField and the "through" model in the same change
must remove the field before the model to maintain consistency.
"""
before = self.make_project_state([
self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution
])
# Remove both the through model and ManyToMany
after = self.make_project_state([self.book_with_no_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveField", "RemoveField", "RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="author", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, name="book", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 2, name="authors", model_name='book')
self.assertOperationAttributes(changes, 'otherapp', 0, 3, name='Attribution')
def test_many_to_many_removed_before_through_model_2(self):
"""
Removing a model that contains a ManyToManyField and the "through" model
in the same change must remove the field before the model to maintain
consistency.
"""
before = self.make_project_state([
self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution
])
# Remove both the through model and ManyToMany
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, [
"RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"
])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="author", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, name="book", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 2, name="authors", model_name='book')
self.assertOperationAttributes(changes, 'otherapp', 0, 3, name='Attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 4, name='Book')
def test_m2m_w_through_multistep_remove(self):
"""
A model with a m2m field that specifies a "through" model cannot be
removed in the same migration as that through model as the schema will
pass through an inconsistent state. The autodetector should produce two
migrations to avoid this issue.
"""
before = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract])
after = self.make_project_state([self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, [
"RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"
])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers", model_name='author')
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author", model_name='contract')
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher", model_name='contract')
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 4, name="Contract")
def test_concrete_field_changed_to_many_to_many(self):
"""
#23938 - Tests that changing a concrete field into a ManyToManyField
first removes the concrete field and then adds the m2m field.
"""
before = self.make_project_state([self.author_with_former_m2m])
after = self.make_project_state([self.author_with_m2m, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name='Publisher')
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publishers", model_name='author')
def test_many_to_many_changed_to_concrete_field(self):
"""
#23938 - Tests that changing a ManyToManyField into a concrete field
first removes the m2m field and then adds the concrete field.
"""
before = self.make_project_state([self.author_with_m2m, self.publisher])
after = self.make_project_state([self.author_with_former_m2m])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "AddField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 2, name='Publisher')
self.assertOperationFieldAttributes(changes, 'testapp', 0, 1, max_length=100)
def test_non_circular_foreignkey_dependency_removal(self):
"""
If two models with a ForeignKey from one to the other are removed at the
same time, the autodetector should remove them in the correct order.
"""
before = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publisher", model_name='author')
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author", model_name='publisher')
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Publisher")
def test_alter_model_options(self):
"""Changing a model's options should make a change."""
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, options={
"permissions": [('can_hire', 'Can hire')],
"verbose_name": "Authi",
})
# Changing them back to empty should also make a change
before = self.make_project_state([self.author_with_options])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", options={})
def test_alter_model_options_proxy(self):
"""Changing a proxy model's options should also make a change."""
before = self.make_project_state([self.author_proxy, self.author_empty])
after = self.make_project_state([self.author_proxy_options, self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="authorproxy", options={
"verbose_name": "Super Author"
})
def test_set_alter_order_with_respect_to(self):
"""Tests that setting order_with_respect_to adds a field."""
# Make state
before = self.make_project_state([self.book, self.author_with_book])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to="book")
def test_add_alter_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the FK too does
things in the right order.
"""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name="book")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
def test_remove_alter_order_with_respect_to(self):
"""
Tests that removing order_with_respect_to when removing the FK too does
things in the right order.
"""
# Make state
before = self.make_project_state([self.book, self.author_with_book_order_wrt])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo", "RemoveField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to=None)
self.assertOperationAttributes(changes, 'testapp', 0, 1, model_name="author", name="book")
def test_add_model_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the whole model
does things in the right order.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
self.assertNotIn("_order", [name for name, field in changes['testapp'][0].operations[0].fields])
def test_alter_model_managers(self):
"""
Tests that changing the model managers adds a new operation.
"""
# Make state
before = self.make_project_state([self.other_pony])
after = self.make_project_state([self.other_pony_food])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["AlterModelManagers"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="pony")
self.assertEqual([name for name, mgr in changes['otherapp'][0].operations[0].managers],
['food_qs', 'food_mgr', 'food_mgr_kwargs'])
self.assertEqual(changes['otherapp'][0].operations[0].managers[1][1].args, ('a', 'b', 1, 2))
self.assertEqual(changes['otherapp'][0].operations[0].managers[2][1].args, ('x', 'y', 3, 4))
def test_swappable_first_inheritance(self):
"""Tests that swappable models get their CreateModel first."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.custom_user, self.aardvark])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_first_setting(self):
"""Tests that swappable models get their CreateModel first."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.custom_user_no_inherit, self.aardvark])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
def test_bases_first(self):
"""Tests that bases of other models come first."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.aardvark_based_on_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_multiple_bases(self):
"""#23956 - Tests that inheriting models doesn't move *_ptr fields into AddField operations."""
A = ModelState("app", "A", [("a_id", models.AutoField(primary_key=True))])
B = ModelState("app", "B", [("b_id", models.AutoField(primary_key=True))])
C = ModelState("app", "C", [], bases=("app.A", "app.B"))
D = ModelState("app", "D", [], bases=("app.A", "app.B"))
E = ModelState("app", "E", [], bases=("app.A", "app.B"))
# Make state
before = self.make_project_state([])
after = self.make_project_state([A, B, C, D, E])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, [
"CreateModel", "CreateModel", "CreateModel", "CreateModel", "CreateModel"
])
self.assertOperationAttributes(changes, "app", 0, 0, name="A")
self.assertOperationAttributes(changes, "app", 0, 1, name="B")
self.assertOperationAttributes(changes, "app", 0, 2, name="C")
self.assertOperationAttributes(changes, "app", 0, 3, name="D")
self.assertOperationAttributes(changes, "app", 0, 4, name="E")
def test_proxy_bases_first(self):
"""Tests that bases of proxies come first."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy, self.author_proxy_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorProxy")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="AAuthorProxyProxy")
def test_pk_fk_included(self):
"""
Tests that a relation used as the primary key is kept as part of
CreateModel.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.aardvark_pk_fk_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_first_dependency(self):
"""
Tests that a dependency to an app with no migrations uses __first__.
"""
# Load graph
loader = MigrationLoader(connection)
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("migrations", "__first__")])
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_last_dependency(self):
"""
Tests that a dependency to an app with existing migrations uses the
last migration of that app.
"""
# Load graph
loader = MigrationLoader(connection)
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("migrations", "0002_second")])
def test_alter_fk_before_model_deletion(self):
"""
Tests that ForeignKeys are altered _before_ the model they used to
refer to are deleted.
"""
# Make state
before = self.make_project_state([self.author_name, self.publisher_with_author])
after = self.make_project_state([self.aardvark_testapp, self.publisher_with_aardvark_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Aardvark")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Author")
def test_fk_dependency_other_app(self):
"""
#23100 - Tests that ForeignKeys correctly depend on other apps' models.
"""
# Make state
before = self.make_project_state([self.author_name, self.book])
after = self.make_project_state([self.author_with_book, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="book")
self.assertMigrationDependencies(changes, 'testapp', 0, [("otherapp", "__first__")])
def test_circular_dependency_mixed_addcreate(self):
"""
#23315 - Tests that the dependency resolver knows to put all CreateModel
before AddField and not become unsolvable.
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("country", models.ForeignKey("b.DeliveryCountry", models.CASCADE)),
])
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
])
apackage = ModelState("b", "APackage", [
("id", models.AutoField(primary_key=True)),
("person", models.ForeignKey("a.Person", models.CASCADE)),
])
country = ModelState("b", "DeliveryCountry", [
("id", models.AutoField(primary_key=True)),
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, person, apackage, country])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel", "CreateModel"])
@override_settings(AUTH_USER_MODEL="a.Tenant")
def test_circular_dependency_swappable(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models.
"""
tenant = ModelState("a", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("b.Address", models.CASCADE))],
bases=(AbstractBaseUser, )
)
address = ModelState("b", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE)),
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, tenant])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'a', 0, [])
self.assertMigrationDependencies(changes, 'a', 1, [('a', 'auto_1'), ('b', 'auto_1')])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'b', 0, [('__setting__', 'AUTH_USER_MODEL')])
@override_settings(AUTH_USER_MODEL="b.Tenant")
def test_circular_dependency_swappable2(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models but with the swappable not being the first migrated
model.
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE)),
])
tenant = ModelState("b", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("a.Address", models.CASCADE))],
bases=(AbstractBaseUser, )
)
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, tenant])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'a', 0, [])
self.assertMigrationDependencies(changes, 'a', 1, [('__setting__', 'AUTH_USER_MODEL'), ('a', 'auto_1')])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'b', 0, [('a', 'auto_1')])
@override_settings(AUTH_USER_MODEL="a.Person")
def test_circular_dependency_swappable_self(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models.
"""
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
("parent1", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, related_name='children'))
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([person])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'a', 0, [])
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and blank `CharField` or `TextField`
without default should not prompt for a default.
"""
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_biography_blank])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition')
def test_add_non_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and non-blank `CharField` or `TextField`
without default should prompt for a default.
"""
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_biography_non_blank])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(mocked_ask_method.call_count, 2)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0)
| bsd-3-clause |
SamuelMarks/supernaw | node_modules/protagonist/snowcrash/tools/gyp/pylib/gyp/MSVSVersion.py | 486 | 15539 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name == '2013' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 non-Express has a x64-x86 cross that we want to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '12.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
renanaugustom/Ferramenta-de-transformacao-de-requisitos-para-arquitetura-de-tres-camadas | Projeto/parametros/views.py | 1 | 2334 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
from django.forms import ModelForm
from annoying.decorators import render_to
from annoying.decorators import ajax_request
from annoying.functions import get_object_or_None
from django.views.decorators.csrf import csrf_exempt
from django.utils import simplejson
from django.core import serializers
from django.http import HttpResponse
from parametros.models import Parametro
@csrf_exempt
@ajax_request
def listar_ajax(request):
idMetodo = request.POST.get('idMetodo', False)
listaParametros = Parametro.objects.filter(metodo_id = idMetodo)
listaRetorno = []
for parametro in listaParametros:
data = {}
data = {'pk':parametro.pk,'nome':parametro.nome,'tipo':parametro.tipo.pk, 'nomeTipo':parametro.tipo.nome}
listaRetorno.append(data.copy())
json_data = simplejson.dumps( {'parametros':listaRetorno, 'status':'OK' } )
return HttpResponse( json_data, mimetype='application/json' )
@csrf_exempt
@ajax_request
def cadastrar_ajax(request):
nome = request.POST.get('nome', False)
metodo_id = request.POST.get('metodo_id', False)
tipo_id = request.POST.get('tipo_id', False)
if nome and metodo_id and tipo_id:
parametro = Parametro(nome = nome, metodo_id = metodo_id, tipo_id = tipo_id)
parametro.save()
return {'status': 'OK', "idParametro": parametro.id}
else:
return {'status' : 'Erro'}
@csrf_exempt
@ajax_request
def excluir_ajax(request):
idParametro = request.POST.get('idParametro', False)
if idParametro:
parametro = get_object_or_None(Parametro, id=idParametro)
if parametro is None:
return {'status' : 'Parâmetro não encontrado'}
else:
parametro.delete()
return {'status': 'OK'}
else:
return {'status' : 'ID do parâmetro inválido'}
@csrf_exempt
@ajax_request
def verificanomeparametro_ajax(request):
nomeParametro = request.POST.get('nomeParametro', False)
metodo_id = request.POST.get('metodo_id', False)
if nomeParametro and metodo_id:
parametro = get_object_or_None(Parametro, nome = nomeParametro, metodo_id = metodo_id)
if parametro is None:
return {'status' : 'OK', 'jaExiste' : 'false' }
else:
return {'status' : 'OK', 'jaExiste' : 'true' }
else:
return {'status' : 'Dados referente ao parâmetro são inválidos'}
| mit |
kbrebanov/ansible | lib/ansible/utils/module_docs_fragments/avi.py | 6 | 1851 | #
# Created on December 12, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Avi Version: 16.3.4
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Avi common documentation fragment
DOCUMENTATION = """
options:
controller:
description:
- IP address or hostname of the controller. The default value is the environment variable C(AVI_CONTROLLER).
username:
description:
- Username used for accessing Avi controller. The default value is the environment variable C(AVI_USERNAME).
password:
description:
- Password of Avi user in Avi controller. The default value is the environment variable C(AVI_PASSWORD).
tenant:
description:
- Name of tenant used for all Avi API calls and context of object.
default: admin
tenant_uuid:
description:
- UUID of tenant used for all Avi API calls and context of object.
default: ''
api_version:
description:
- Avi API version of to use for Avi API and objects.
notes:
- For more information on using Ansible to manage Avi Network devices see U(https://www.ansible.com/ansible-avi-networks).
"""
| gpl-3.0 |
paxapy/zulip | zerver/management/commands/query_ldap.py | 46 | 1292 | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
import sys
from django.contrib.auth import authenticate, login, get_backends
from django.core.management.base import BaseCommand
from django.conf import settings
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
# Quick tool to test whether you're correctly authenticating to LDAP
def query_ldap(**options):
# type: (**str) -> None
email = options['email']
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
if ldap_attrs is None:
print("No such user found")
else:
for django_field, ldap_field in settings.AUTH_LDAP_USER_ATTR_MAP.items():
print("%s: %s" % (django_field, ldap_attrs[ldap_field]))
class Command(BaseCommand):
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to query")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
query_ldap(**options)
| apache-2.0 |
yyt030/pyzmq | zmq/tests/test_pair.py | 43 | 1260 | # Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import zmq
from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
x = b' '
class TestPair(BaseZMQTestCase):
def test_basic(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
msg1 = b'message1'
msg2 = self.ping_pong(s1, s2, msg1)
self.assertEqual(msg1, msg2)
def test_multiple(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
for i in range(10):
msg = i*x
s1.send(msg)
for i in range(10):
msg = i*x
s2.send(msg)
for i in range(10):
msg = s1.recv()
self.assertEqual(msg, i*x)
for i in range(10):
msg = s2.recv()
self.assertEqual(msg, i*x)
def test_json(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
o = dict(a=10,b=list(range(10)))
o2 = self.ping_pong_json(s1, s2, o)
def test_pyobj(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
o = dict(a=10,b=range(10))
o2 = self.ping_pong_pyobj(s1, s2, o)
if have_gevent:
class TestReqRepGreen(GreenTest, TestPair):
pass
| bsd-3-clause |
NikNitro/Python-iBeacon-Scan | sympy/combinatorics/util.py | 35 | 16221 | from __future__ import print_function, division
from sympy.ntheory import isprime
from sympy.combinatorics.permutations import Permutation, _af_invert, _af_rmul
from sympy.core.compatibility import range
rmul = Permutation.rmul
_af_new = Permutation._af_new
############################################
#
# Utilities for computational group theory
#
############################################
def _base_ordering(base, degree):
r"""
Order `\{0, 1, ..., n-1\}` so that base points come first and in order.
Parameters
==========
``base`` - the base
``degree`` - the degree of the associated permutation group
Returns
=======
A list ``base_ordering`` such that ``base_ordering[point]`` is the
number of ``point`` in the ordering.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _base_ordering
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> _base_ordering(S.base, S.degree)
[0, 1, 2, 3]
Notes
=====
This is used in backtrack searches, when we define a relation `<<` on
the underlying set for a permutation group of degree `n`,
`\{0, 1, ..., n-1\}`, so that if `(b_1, b_2, ..., b_k)` is a base we
have `b_i << b_j` whenever `i<j` and `b_i << a` for all
`i\in\{1,2, ..., k\}` and `a` is not in the base. The idea is developed
and applied to backtracking algorithms in [1], pp.108-132. The points
that are not in the base are taken in increasing order.
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
"""
base_len = len(base)
ordering = [0]*degree
for i in range(base_len):
ordering[base[i]] = i
current = base_len
for i in range(degree):
if i not in base:
ordering[i] = current
current += 1
return ordering
def _check_cycles_alt_sym(perm):
"""
Checks for cycles of prime length p with n/2 < p < n-2.
Here `n` is the degree of the permutation. This is a helper function for
the function is_alt_sym from sympy.combinatorics.perm_groups.
Examples
========
>>> from sympy.combinatorics.util import _check_cycles_alt_sym
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12]])
>>> _check_cycles_alt_sym(a)
False
>>> b = Permutation([[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10]])
>>> _check_cycles_alt_sym(b)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.is_alt_sym
"""
n = perm.size
af = perm.array_form
current_len = 0
total_len = 0
used = set()
for i in range(n//2):
if not i in used and i < n//2 - total_len:
current_len = 1
used.add(i)
j = i
while(af[j] != i):
current_len += 1
j = af[j]
used.add(j)
total_len += current_len
if current_len > n//2 and current_len < n - 2 and isprime(current_len):
return True
return False
def _distribute_gens_by_base(base, gens):
"""
Distribute the group elements ``gens`` by membership in basic stabilizers.
Notice that for a base `(b_1, b_2, ..., b_k)`, the basic stabilizers
are defined as `G^{(i)} = G_{b_1, ..., b_{i-1}}` for
`i \in\{1, 2, ..., k\}`.
Parameters
==========
``base`` - a sequence of points in `\{0, 1, ..., n-1\}`
``gens`` - a list of elements of a permutation group of degree `n`.
Returns
=======
List of length `k`, where `k` is
the length of ``base``. The `i`-th entry contains those elements in
``gens`` which fix the first `i` elements of ``base`` (so that the
`0`-th entry is equal to ``gens`` itself). If no element fixes the first
`i` elements of ``base``, the `i`-th element is set to a list containing
the identity element.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.util import _distribute_gens_by_base
>>> D = DihedralGroup(3)
>>> D.schreier_sims()
>>> D.strong_gens
[(0 1 2), (0 2), (1 2)]
>>> D.base
[0, 1]
>>> _distribute_gens_by_base(D.base, D.strong_gens)
[[(0 1 2), (0 2), (1 2)],
[(1 2)]]
See Also
========
_strong_gens_from_distr, _orbits_transversals_from_bsgs,
_handle_precomputed_bsgs
"""
base_len = len(base)
degree = gens[0].size
stabs = [[] for _ in range(base_len)]
max_stab_index = 0
for gen in gens:
j = 0
while j < base_len - 1 and gen._array_form[base[j]] == base[j]:
j += 1
if j > max_stab_index:
max_stab_index = j
for k in range(j + 1):
stabs[k].append(gen)
for i in range(max_stab_index + 1, base_len):
stabs[i].append(_af_new(list(range(degree))))
return stabs
def _handle_precomputed_bsgs(base, strong_gens, transversals=None,
basic_orbits=None, strong_gens_distr=None):
"""
Calculate BSGS-related structures from those present.
The base and strong generating set must be provided; if any of the
transversals, basic orbits or distributed strong generators are not
provided, they will be calculated from the base and strong generating set.
Parameters
==========
``base`` - the base
``strong_gens`` - the strong generators
``transversals`` - basic transversals
``basic_orbits`` - basic orbits
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Returns
=======
``(transversals, basic_orbits, strong_gens_distr)`` where ``transversals``
are the basic transversals, ``basic_orbits`` are the basic orbits, and
``strong_gens_distr`` are the strong generators distributed by membership
in basic stabilizers.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.util import _handle_precomputed_bsgs
>>> D = DihedralGroup(3)
>>> D.schreier_sims()
>>> _handle_precomputed_bsgs(D.base, D.strong_gens,
... basic_orbits=D.basic_orbits)
([{0: (2), 1: (0 1 2), 2: (0 2)}, {1: (2), 2: (1 2)}], [[0, 1, 2], [1, 2]], [[(0 1 2), (0 2), (1 2)], [(1 2)]])
See Also
========
_orbits_transversals_from_bsgs, distribute_gens_by_base
"""
if strong_gens_distr is None:
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
if transversals is None:
if basic_orbits is None:
basic_orbits, transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
else:
transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr,
transversals_only=True)
else:
if basic_orbits is None:
base_len = len(base)
basic_orbits = [None]*base_len
for i in range(base_len):
basic_orbits[i] = list(transversals[i].keys())
return transversals, basic_orbits, strong_gens_distr
def _orbits_transversals_from_bsgs(base, strong_gens_distr,
transversals_only=False):
"""
Compute basic orbits and transversals from a base and strong generating set.
The generators are provided as distributed across the basic stabilizers.
If the optional argument ``transversals_only`` is set to True, only the
transversals are returned.
Parameters
==========
``base`` - the base
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
``transversals_only`` - a flag switching between returning only the
transversals/ both orbits and transversals
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import _orbits_transversals_from_bsgs
>>> from sympy.combinatorics.util import (_orbits_transversals_from_bsgs,
... _distribute_gens_by_base)
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> _orbits_transversals_from_bsgs(S.base, strong_gens_distr)
([[0, 1, 2], [1, 2]], [{0: (2), 1: (0 1 2), 2: (0 2 1)}, {1: (2), 2: (1 2)}])
See Also
========
_distribute_gens_by_base, _handle_precomputed_bsgs
"""
from sympy.combinatorics.perm_groups import _orbit_transversal
base_len = len(base)
degree = strong_gens_distr[0][0].size
transversals = [None]*base_len
if transversals_only is False:
basic_orbits = [None]*base_len
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(degree, strong_gens_distr[i],
base[i], pairs=True))
if transversals_only is False:
basic_orbits[i] = list(transversals[i].keys())
if transversals_only:
return transversals
else:
return basic_orbits, transversals
def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None):
"""
Remove redundant generators from a strong generating set.
Parameters
==========
``base`` - a base
``strong_gens`` - a strong generating set relative to ``base``
``basic_orbits`` - basic orbits
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Returns
=======
A strong generating set with respect to ``base`` which is a subset of
``strong_gens``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.util import _remove_gens
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(15)
>>> base, strong_gens = S.schreier_sims_incremental()
>>> new_gens = _remove_gens(base, strong_gens)
>>> len(new_gens)
14
>>> _verify_bsgs(S, base, new_gens)
True
Notes
=====
This procedure is outlined in [1],p.95.
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
"""
from sympy.combinatorics.perm_groups import _orbit
base_len = len(base)
degree = strong_gens[0].size
if strong_gens_distr is None:
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
temp = strong_gens_distr[:]
if basic_orbits is None:
basic_orbits = []
for i in range(base_len):
basic_orbit = _orbit(degree, strong_gens_distr[i], base[i])
basic_orbits.append(basic_orbit)
strong_gens_distr.append([])
res = strong_gens[:]
for i in range(base_len - 1, -1, -1):
gens_copy = strong_gens_distr[i][:]
for gen in strong_gens_distr[i]:
if gen not in strong_gens_distr[i + 1]:
temp_gens = gens_copy[:]
temp_gens.remove(gen)
if temp_gens == []:
continue
temp_orbit = _orbit(degree, temp_gens, base[i])
if temp_orbit == basic_orbits[i]:
gens_copy.remove(gen)
res.remove(gen)
return res
def _strip(g, base, orbits, transversals):
"""
Attempt to decompose a permutation using a (possibly partial) BSGS
structure.
This is done by treating the sequence ``base`` as an actual base, and
the orbits ``orbits`` and transversals ``transversals`` as basic orbits and
transversals relative to it.
This process is called "sifting". A sift is unsuccessful when a certain
orbit element is not found or when after the sift the decomposition
doesn't end with the identity element.
The argument ``transversals`` is a list of dictionaries that provides
transversal elements for the orbits ``orbits``.
Parameters
==========
``g`` - permutation to be decomposed
``base`` - sequence of points
``orbits`` - a list in which the ``i``-th entry is an orbit of ``base[i]``
under some subgroup of the pointwise stabilizer of `
`base[0], base[1], ..., base[i - 1]``. The groups themselves are implicit
in this function since the only information we need is encoded in the orbits
and transversals
``transversals`` - a list of orbit transversals associated with the orbits
``orbits``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.util import _strip
>>> S = SymmetricGroup(5)
>>> S.schreier_sims()
>>> g = Permutation([0, 2, 3, 1, 4])
>>> _strip(g, S.base, S.basic_orbits, S.basic_transversals)
((4), 5)
Notes
=====
The algorithm is described in [1],pp.89-90. The reason for returning
both the current state of the element being decomposed and the level
at which the sifting ends is that they provide important information for
the randomized version of the Schreier-Sims algorithm.
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims_random
"""
h = g._array_form
base_len = len(base)
for i in range(base_len):
beta = h[base[i]]
if beta == base[i]:
continue
if beta not in orbits[i]:
return _af_new(h), i + 1
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
return _af_new(h), base_len + 1
def _strip_af(h, base, orbits, transversals, j):
"""
optimized _strip, with h, transversals and result in array form
if the stripped elements is the identity, it returns False, base_len + 1
j h[base[i]] == base[i] for i <= j
"""
base_len = len(base)
for i in range(j+1, base_len):
beta = h[base[i]]
if beta == base[i]:
continue
if beta not in orbits[i]:
return h, i + 1
u = transversals[i][beta]
if h == u:
return False, base_len + 1
h = _af_rmul(_af_invert(u), h)
return h, base_len + 1
def _strong_gens_from_distr(strong_gens_distr):
"""
Retrieve strong generating set from generators of basic stabilizers.
This is just the union of the generators of the first and second basic
stabilizers.
Parameters
==========
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import (_strong_gens_from_distr,
... _distribute_gens_by_base)
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> S.strong_gens
[(0 1 2), (2)(0 1), (1 2)]
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> _strong_gens_from_distr(strong_gens_distr)
[(0 1 2), (2)(0 1), (1 2)]
See Also
========
_distribute_gens_by_base
"""
if len(strong_gens_distr) == 1:
return strong_gens_distr[0][:]
else:
result = strong_gens_distr[0]
for gen in strong_gens_distr[1]:
if gen not in result:
result.append(gen)
return result
| gpl-3.0 |
ArcherSys/ArcherSys | Lib/site-packages/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| mit |
jopamer/swift | utils/dev-scripts/csvcolumn_to_scurve.py | 14 | 1694 | #!/usr/bin/env python
# This is a simple script that reads in a csv file, selects a column, and then
# forms an "s-curve" graph of that column.
import argparse
import csv
import sys
def get_data(input_file, before_column, after_column):
def get_selected_csv_rows(input_file, before_column, after_column):
for row in csv.DictReader(input_file):
before = float(row[before_column])
after = float(row[after_column])
delta = after / before
yield delta
def f(input_data):
result = list(enumerate(sorted(input_data)))
count = float(len(result) - 1)
return [(x[0] / count, x[1]) for x in result]
return f(get_selected_csv_rows(input_file, before_column, after_column))
def main():
p = argparse.ArgumentParser(description="""
A script that reads in a csv file, splices out selected before/after
column, and then outputs a new csv file with that data in s-curve form. An
s-curve is a graph where one sorts the output %-change and graphs the %-n
vs %-change.
NOTE: We assume that the csv has a csv header that maps to the before and
after column names passed in.
""")
p.add_argument('input_file', type=argparse.FileType('r'))
p.add_argument('before_column_name', type=str)
p.add_argument('after_column_name', type=str)
args = p.parse_args()
data = get_data(args.input_file, args.before_column_name,
args.after_column_name)
w = csv.DictWriter(sys.stdout, fieldnames=['N/total', 'New/Old'])
w.writeheader()
for d in data:
w.writerow({'N/total': d[0], 'New/Old': d[1]})
if __name__ == "__main__":
main()
| apache-2.0 |
bristy/login_app_flask | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| mit |
sushantgoel/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/setuptools/command/test.py | 285 | 5932 | from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import *
from pkg_resources import _namespace_packages
from unittest import TestLoader, main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__!='setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self,module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file!='__init__.py':
submodule = module.__name__+'.'+file[:-3]
else:
if resource_exists(
module.__name__, file+'/__init__.py'
):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests)!=1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module+".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution,'test_loader',None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
def with_project_on_sys_path(self, func):
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
import unittest
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
loader_ep = EntryPoint.parse("x="+self.test_loader)
loader_class = loader_ep.load(require=False)
cks = loader_class()
unittest.main(
None, None, [unittest.__file__]+self.test_args,
testLoader = cks
)
| apache-2.0 |
matsumoto-r/synciga | src/tools/gyp/pylib/gyp/generator/gypd.py | 912 | 3325 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| bsd-3-clause |
baltzar/alexandria | alexandria/urls.py | 1 | 1069 | """alexandria URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
import library.views
urlpatterns = [
url(r'^$', library.views.index, name='index'),
url(r'^publish/$', library.views.publish, name='publish'),
url(r'^review/$', library.views.review, name='review'),
url(r'^review/(?P<post_id>[0-9]+)/response/$', library.views.review_response, name='review_response'),
url(r'^admin/', admin.site.urls),
]
| mit |
james-dibble/Embedded-Systems-Assignment | EmbeddedSystems.Applications/EmbeddedSystems.Client/jsoncpp-src-0.6.0-rc2/scons-local-1.2.0/SCons/Tool/default.py | 12 | 1641 | """SCons.Tool.default
Initialization with a default tool list.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/default.py 3842 2008/12/20 22:59:52 scons"
import SCons.Tool
def generate(env):
"""Add default tools."""
for t in SCons.Tool.tool_list(env['PLATFORM'], env):
SCons.Tool.Tool(t)(env)
def exists(env):
return 1
| mit |
everypony/ponyFiction | ponyFiction/management/commands/initsphinx.py | 1 | 1710 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db.models import Prefetch
from django.core.management.base import BaseCommand
from ponyFiction.sphinx import sphinx
from ponyFiction.models import Story, Chapter, Author
class Command(BaseCommand):
help = 'Load all stories and chapters into sphinx rt index'
def handle(self, *args, **options):
ok = 0
pk = 0
stories = None
count = Story.objects.count()
while True:
stories = tuple(Story.objects.all().filter(pk__gt=pk).prefetch_related(
Prefetch('authors', queryset=Author.objects.all().only('id', 'username')),
'characters',
'categories',
'classifications',
'comment_set'
)[:500])
if not stories:
break
Story.bl.add_stories_to_search(stories)
pk = stories[-1].id
ok += len(stories)
self.stderr.write(' [%.1f%%] %d/%d stories\r' % (ok * 100 / count, ok, count), ending='')
with sphinx:
sphinx.flush('stories')
self.stderr.write('')
ok = 0
pk = 0
chapters = None
count = Chapter.objects.count()
while True:
chapters = tuple(Chapter.objects.all().filter(pk__gt=pk)[:50])
if not chapters:
break
Chapter.bl.add_chapters_to_search(chapters)
pk = chapters[-1].id
ok += len(chapters)
self.stderr.write(' [%.1f%%] %d/%d chapters\r' % (ok * 100 / count, ok, count), ending='')
with sphinx:
sphinx.flush('chapters')
self.stderr.write('')
| gpl-3.0 |
lmajewski/linux-samsung-devel | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
adrian-ionescu/apache-spark | python/setup.py | 11 | 9765 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
exit(-1)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.6'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': ['pandas>=0.13.0']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
tlakshman26/cinder-new-branch | cinder/volume/drivers/netapp/eseries/host_mapper.py | 28 | 10852 | # Copyright (c) 2015 Alex Meade. All Rights Reserved.
# Copyright (c) 2015 Yogesh Kshirsagar. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" This module handles mapping E-Series volumes to E-Series Hosts and Host
Groups.
"""
import collections
import random
from oslo_log import log as logging
from six.moves import range
from cinder import exception
from cinder.i18n import _
from cinder import utils as cinder_utils
from cinder.volume.drivers.netapp.eseries import exception as eseries_exc
from cinder.volume.drivers.netapp.eseries import utils
LOG = logging.getLogger(__name__)
@cinder_utils.trace_method
@cinder_utils.synchronized('map_es_volume')
def map_volume_to_single_host(client, volume, eseries_vol, host,
vol_map, multiattach_enabled):
"""Maps the e-series volume to host with initiator."""
LOG.debug("Attempting to map volume %s to single host.", volume['id'])
# If volume is not mapped on the backend, map directly to host
if not vol_map:
mappings = client.get_volume_mappings_for_host(host['hostRef'])
lun = _get_free_lun(client, host, multiattach_enabled, mappings)
return client.create_volume_mapping(eseries_vol['volumeRef'],
host['hostRef'], lun)
# If volume is already mapped to desired host
if vol_map.get('mapRef') == host['hostRef']:
return vol_map
multiattach_cluster_ref = None
try:
host_group = client.get_host_group_by_name(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
multiattach_cluster_ref = host_group['clusterRef']
except exception.NotFound:
pass
# Volume is mapped to the multiattach host group
if vol_map.get('mapRef') == multiattach_cluster_ref:
LOG.debug("Volume %s is mapped to multiattach host group.",
volume['id'])
# If volume is not currently attached according to Cinder, it is
# safe to delete the mapping
if not (volume['attach_status'] == 'attached'):
LOG.debug("Volume %(vol)s is not currently attached, moving "
"existing mapping to host %(host)s.",
{'vol': volume['id'], 'host': host['label']})
mappings = client.get_volume_mappings_for_host(
host['hostRef'])
lun = _get_free_lun(client, host, multiattach_enabled, mappings)
return client.move_volume_mapping_via_symbol(
vol_map.get('mapRef'), host['hostRef'], lun
)
# If we got this far, volume must be mapped to something else
msg = _("Cannot attach already attached volume %s; "
"multiattach is disabled via the "
"'netapp_enable_multiattach' configuration option.")
raise exception.NetAppDriverException(msg % volume['id'])
@cinder_utils.trace_method
@cinder_utils.synchronized('map_es_volume')
def map_volume_to_multiple_hosts(client, volume, eseries_vol, target_host,
mapping):
"""Maps the e-series volume to multiattach host group."""
LOG.debug("Attempting to map volume %s to multiple hosts.", volume['id'])
# If volume is already mapped to desired host, return the mapping
if mapping['mapRef'] == target_host['hostRef']:
LOG.debug("Volume %(vol)s already mapped to host %(host)s",
{'vol': volume['id'], 'host': target_host['label']})
return mapping
# If target host in a host group, ensure it is the multiattach host group
if target_host['clusterRef'] != utils.NULL_REF:
host_group = client.get_host_group(target_host[
'clusterRef'])
if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME:
msg = _("Specified host to map to volume %(vol)s is in "
"unsupported host group with %(group)s.")
params = {'vol': volume['id'], 'group': host_group['label']}
raise eseries_exc.UnsupportedHostGroup(msg % params)
mapped_host_group = None
multiattach_host_group = None
try:
mapped_host_group = client.get_host_group(mapping['mapRef'])
# If volume is mapped to a foreign host group raise an error
if mapped_host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME:
raise eseries_exc.UnsupportedHostGroup(
volume_id=volume['id'], group=mapped_host_group['label'])
multiattach_host_group = mapped_host_group
except exception.NotFound:
pass
if not multiattach_host_group:
multiattach_host_group = client.get_host_group_by_name(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
# If volume is mapped directly to a host, move the host into the
# multiattach host group. Error if the host is in a foreign host group
if not mapped_host_group:
current_host = client.get_host(mapping['mapRef'])
if current_host['clusterRef'] != utils.NULL_REF:
host_group = client.get_host_group(current_host[
'clusterRef'])
if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME:
msg = _("Currently mapped host for volume %(vol)s is in "
"unsupported host group with %(group)s.")
params = {'vol': volume['id'], 'group': host_group['label']}
raise eseries_exc.UnsupportedHostGroup(msg % params)
client.set_host_group_for_host(current_host['hostRef'],
multiattach_host_group['clusterRef'])
# Move destination host into multiattach host group
client.set_host_group_for_host(target_host[
'hostRef'], multiattach_host_group['clusterRef'])
# Once both existing and target hosts are in the multiattach host group,
# move the volume mapping to said group.
if not mapped_host_group:
LOG.debug("Moving mapping for volume %s to multiattach host group.",
volume['id'])
return client.move_volume_mapping_via_symbol(
mapping.get('lunMappingRef'),
multiattach_host_group['clusterRef'],
mapping['lun']
)
return mapping
def _get_free_lun(client, host, multiattach_enabled, mappings):
"""Returns least used LUN ID available on the given host."""
if not _is_host_full(client, host):
unused_luns = _get_unused_lun_ids(mappings)
if unused_luns:
chosen_lun = random.sample(unused_luns, 1)
return chosen_lun[0]
elif multiattach_enabled:
msg = _("No unused LUN IDs are available on the host; "
"multiattach is enabled which requires that all LUN IDs "
"to be unique across the entire host group.")
raise exception.NetAppDriverException(msg)
used_lun_counts = _get_used_lun_id_counter(mappings)
# most_common returns an arbitrary tuple of members with same frequency
for lun_id, __ in reversed(used_lun_counts.most_common()):
if _is_lun_id_available_on_host(client, host, lun_id):
return lun_id
msg = _("No free LUN IDs left. Maximum number of volumes that can be "
"attached to host (%s) has been exceeded.")
raise exception.NetAppDriverException(msg % utils.MAX_LUNS_PER_HOST)
def _get_unused_lun_ids(mappings):
"""Returns unused LUN IDs given mappings."""
used_luns = _get_used_lun_ids_for_mappings(mappings)
unused_luns = (set(range(utils.MAX_LUNS_PER_HOST)) - set(used_luns))
return unused_luns
def _get_used_lun_id_counter(mapping):
"""Returns used LUN IDs with count as a dictionary."""
used_luns = _get_used_lun_ids_for_mappings(mapping)
used_lun_id_counter = collections.Counter(used_luns)
return used_lun_id_counter
def _is_host_full(client, host):
"""Checks whether maximum volumes attached to a host have been reached."""
luns = client.get_volume_mappings_for_host(host['hostRef'])
return len(luns) >= utils.MAX_LUNS_PER_HOST
def _is_lun_id_available_on_host(client, host, lun_id):
"""Returns a boolean value depending on whether a LUN ID is available."""
mapping = client.get_volume_mappings_for_host(host['hostRef'])
used_lun_ids = _get_used_lun_ids_for_mappings(mapping)
return lun_id not in used_lun_ids
def _get_used_lun_ids_for_mappings(mappings):
"""Returns used LUNs when provided with mappings."""
used_luns = set(map(lambda lun: int(lun['lun']), mappings))
# E-Series uses LUN ID 0 for special purposes and should not be
# assigned for general use
used_luns.add(0)
return used_luns
def unmap_volume_from_host(client, volume, host, mapping):
# Volume is mapped directly to host, so delete the mapping
if mapping.get('mapRef') == host['hostRef']:
LOG.debug("Volume %(vol)s is mapped directly to host %(host)s; "
"removing mapping.", {'vol': volume['id'],
'host': host['label']})
client.delete_volume_mapping(mapping['lunMappingRef'])
return
try:
host_group = client.get_host_group(mapping['mapRef'])
except exception.NotFound:
# Volumes is mapped but to a different initiator
raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
host=host['label'])
# If volume is mapped to a foreign host group raise error
if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME:
raise eseries_exc.UnsupportedHostGroup(volume_id=volume['id'],
group=host_group['label'])
# If target host is not in the multiattach host group
if host['clusterRef'] != host_group['clusterRef']:
raise eseries_exc.VolumeNotMapped(volume_id=volume['id'],
host=host['label'])
# Volume is mapped to multiattach host group
# Remove mapping if volume should no longer be attached after this
# operation.
if volume['status'] == 'detaching':
LOG.debug("Volume %s is mapped directly to multiattach host group but "
"is not currently attached; removing mapping.", volume['id'])
client.delete_volume_mapping(mapping['lunMappingRef'])
| apache-2.0 |
shishaochen/TensorFlow-0.8-Win | tensorflow/core/framework/cost_graph_pb2.py | 1 | 9144 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/cost_graph.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/cost_graph.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n*tensorflow/core/framework/cost_graph.proto\x12\ntensorflow\"\xfe\x02\n\x0c\x43ostGraphDef\x12+\n\x04node\x18\x01 \x03(\x0b\x32\x1d.tensorflow.CostGraphDef.Node\x1a\xc0\x02\n\x04Node\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12;\n\ninput_info\x18\x03 \x03(\x0b\x32\'.tensorflow.CostGraphDef.Node.InputInfo\x12=\n\x0boutput_info\x18\x04 \x03(\x0b\x32(.tensorflow.CostGraphDef.Node.OutputInfo\x12\x1d\n\x15temporary_memory_size\x18\x05 \x01(\x03\x12\x10\n\x08is_final\x18\x06 \x01(\x08\x1a;\n\tInputInfo\x12\x16\n\x0epreceding_node\x18\x01 \x01(\x05\x12\x16\n\x0epreceding_port\x18\x02 \x01(\x05\x1a\x34\n\nOutputInfo\x12\x0c\n\x04size\x18\x01 \x01(\x03\x12\x18\n\x10\x61lias_input_port\x18\x02 \x01(\x03\x42-\n\x18org.tensorflow.frameworkB\x0f\x43ostGraphProtosP\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_COSTGRAPHDEF_NODE_INPUTINFO = _descriptor.Descriptor(
name='InputInfo',
full_name='tensorflow.CostGraphDef.Node.InputInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='preceding_node', full_name='tensorflow.CostGraphDef.Node.InputInfo.preceding_node', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='preceding_port', full_name='tensorflow.CostGraphDef.Node.InputInfo.preceding_port', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=328,
serialized_end=387,
)
_COSTGRAPHDEF_NODE_OUTPUTINFO = _descriptor.Descriptor(
name='OutputInfo',
full_name='tensorflow.CostGraphDef.Node.OutputInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='tensorflow.CostGraphDef.Node.OutputInfo.size', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alias_input_port', full_name='tensorflow.CostGraphDef.Node.OutputInfo.alias_input_port', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=389,
serialized_end=441,
)
_COSTGRAPHDEF_NODE = _descriptor.Descriptor(
name='Node',
full_name='tensorflow.CostGraphDef.Node',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.CostGraphDef.Node.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='tensorflow.CostGraphDef.Node.id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_info', full_name='tensorflow.CostGraphDef.Node.input_info', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_info', full_name='tensorflow.CostGraphDef.Node.output_info', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='temporary_memory_size', full_name='tensorflow.CostGraphDef.Node.temporary_memory_size', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_final', full_name='tensorflow.CostGraphDef.Node.is_final', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_COSTGRAPHDEF_NODE_INPUTINFO, _COSTGRAPHDEF_NODE_OUTPUTINFO, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=121,
serialized_end=441,
)
_COSTGRAPHDEF = _descriptor.Descriptor(
name='CostGraphDef',
full_name='tensorflow.CostGraphDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node', full_name='tensorflow.CostGraphDef.node', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_COSTGRAPHDEF_NODE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=441,
)
_COSTGRAPHDEF_NODE_INPUTINFO.containing_type = _COSTGRAPHDEF_NODE
_COSTGRAPHDEF_NODE_OUTPUTINFO.containing_type = _COSTGRAPHDEF_NODE
_COSTGRAPHDEF_NODE.fields_by_name['input_info'].message_type = _COSTGRAPHDEF_NODE_INPUTINFO
_COSTGRAPHDEF_NODE.fields_by_name['output_info'].message_type = _COSTGRAPHDEF_NODE_OUTPUTINFO
_COSTGRAPHDEF_NODE.containing_type = _COSTGRAPHDEF
_COSTGRAPHDEF.fields_by_name['node'].message_type = _COSTGRAPHDEF_NODE
DESCRIPTOR.message_types_by_name['CostGraphDef'] = _COSTGRAPHDEF
CostGraphDef = _reflection.GeneratedProtocolMessageType('CostGraphDef', (_message.Message,), dict(
Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict(
InputInfo = _reflection.GeneratedProtocolMessageType('InputInfo', (_message.Message,), dict(
DESCRIPTOR = _COSTGRAPHDEF_NODE_INPUTINFO,
__module__ = 'tensorflow.core.framework.cost_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CostGraphDef.Node.InputInfo)
))
,
OutputInfo = _reflection.GeneratedProtocolMessageType('OutputInfo', (_message.Message,), dict(
DESCRIPTOR = _COSTGRAPHDEF_NODE_OUTPUTINFO,
__module__ = 'tensorflow.core.framework.cost_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CostGraphDef.Node.OutputInfo)
))
,
DESCRIPTOR = _COSTGRAPHDEF_NODE,
__module__ = 'tensorflow.core.framework.cost_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CostGraphDef.Node)
))
,
DESCRIPTOR = _COSTGRAPHDEF,
__module__ = 'tensorflow.core.framework.cost_graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CostGraphDef)
))
_sym_db.RegisterMessage(CostGraphDef)
_sym_db.RegisterMessage(CostGraphDef.Node)
_sym_db.RegisterMessage(CostGraphDef.Node.InputInfo)
_sym_db.RegisterMessage(CostGraphDef.Node.OutputInfo)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\017CostGraphProtosP\001'))
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
guilherme-gm/Hercules | tools/mobdbconverter.py | 3 | 10704 | #! /usr/bin/env python
# -*- coding: utf8 -*-
#
# This file is part of Hercules.
# http://herc.ws - http://github.com/HerculesWS/Hercules
#
# Copyright (C) 2015 Hercules Dev Team
# Copyright (C) 2015 Andrei Karas (4144)
#
# Hercules is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This Script converts mob_db.txt to mob_db.conf format
import os
import re
import sys
comaSplit = re.compile(",")
def printHeader():
print("""mob_db: (
// Mobs Database
//
/******************************************************************************
************* Entry structure ************************************************
******************************************************************************
{
// =================== Mandatory fields ===============================
Id: ID (int)
SpriteName: "SPRITE_NAME" (string)
Name: "Mob name" (string)
// =================== Optional fields ================================
Lv: level (int, defaults to 1)
Hp: health (int, defaults to 1)
Sp: mana (int, defaults to 0)
Exp: basic experience (int, defaults to 0)
JExp: job experience (int, defaults to 0)
AttackRange: attack range (int, defaults to 1)
Attack: [attack1, attack2] (int, defaults to 0)
Def: defence (int, defaults to 0)
Mdef: magic defence (int, defaults to 0)
Stats: {
Str: strength (int, defaults to 0)
Agi: agility (int, defaults to 0)
Vit: vitality (int, defaults to 0)
Int: intelligence (int, defaults to 0)
Dex: dexterity (int, defaults to 0)
Luk: luck (int, defaults to 0)
}
ViewRange: view range (int, defaults to 1)
ChaseRange: chase range (int, defaults to 1)
Size: size (int, defaults to 1)
Race: race (int, defaults to 0)
Element: (type, level)
Mode: {
CanMove: true/false (bool)
Looter: true/false (bool)
Aggressive: true/false (bool)
Assist: true/false (bool)
CastSensorIdle:true/false (bool)
Boss: true/false (bool)
Plant: true/false (bool)
CanAttack: true/false (bool)
Detector: true/false (bool)
CastSensorChase: true/false (bool)
ChangeChase: true/false (bool)
Angry: true/false (bool)
ChangeTargetMelee: true/false (bool)
ChangeTargetChase: true/false (bool)
TargetWeak: true/false (bool)
}
MoveSpeed: move speed (int, defaults to 0)
AttackDelay: attack delay (int, defaults to 4000)
AttackMotion: attack motion (int, defaults to 2000)
DamageMotion: damage motion (int, defaults to 0)
MvpExp: mvp experience (int, defaults to 0)
MvpDrops: {
AegisName: chance (string: int)
...
}
Drops: {
AegisName: chance (string: int)
...
}
},
******************************************************************************/
""")
def printFooter():
print(")")
def printField(name, value):
print("\t{0}: {1}".format(name, value))
def printField2(name, value):
print("\t\t{0}: {1}".format(name, value))
def printFieldCond2(cond, name):
if cond != 0:
print("\t\t{0}: true".format(name))
def printFieldArr(name, value, value2):
print("\t{0}: [{1}, {2}]".format(name, value, value2))
def printFieldStr(name, value):
print("\t{0}: \"{1}\"".format(name, value))
def startGroup(name):
print("\t{0}: {{".format(name))
def endGroup():
print("\t}")
def printHelp():
print("MobDB converter from txt to conf format")
print("Usage:")
print(" mobdbconverter.py re serverpath dbfilepath")
print(" mobdbconverter.py pre-re serverpath dbfilepath")
print("Usage for read from stdin:")
print(" mobdbconverter.py re dbfilepath")
def isHaveData(fields, start, cnt):
for f in range(0, cnt):
value = fields[start + f * 2]
chance = fields[start + f * 2]
if value == "" or value == "0" or chance == "" or chance == "0":
continue
return True
return False
def convertFile(inFile, itemDb):
if inFile != "" and not os.path.exists(inFile):
return
if inFile == "":
r = sys.stdin
else:
r = open(inFile, "r")
printHeader()
for line in r:
if line.strip() == "":
continue
if len(line) < 5 or line[:2] == "//":
print(line)
continue
fields = comaSplit.split(line)
if len(fields) != 57:
print(line)
continue
for f in range(0, len(fields)):
fields[f] = fields[f].strip()
print("{")
printField("Id", fields[0])
printFieldStr("SpriteName", fields[1])
printFieldStr("Name", fields[2])
printField("Lv", fields[4])
printField("Hp", fields[5])
printField("Sp", fields[6])
printField("Exp", fields[7])
printField("JExp", fields[8])
printField("AttackRange", fields[9])
printFieldArr("Attack", fields[10], fields[11])
printField("Def", fields[12])
printField("Mdef", fields[13])
startGroup("Stats")
printField2("Str", fields[14])
printField2("Agi", fields[15])
printField2("Vit", fields[16])
printField2("Int", fields[17])
printField2("Dex", fields[18])
printField2("Luk", fields[19])
endGroup()
printField("ViewRange", fields[20])
printField("ChaseRange", fields[21])
printField("Size", fields[22])
printField("Race", fields[23])
print("\tElement: ({0}, {1})".format(int(fields[24]) % 10, int(int(fields[24]) / 20)));
mode = int(fields[25], 0)
if mode != 0:
startGroup("Mode")
printFieldCond2(mode & 0x0001, "CanMove")
printFieldCond2(mode & 0x0002, "Looter")
printFieldCond2(mode & 0x0004, "Aggressive")
printFieldCond2(mode & 0x0008, "Assist")
printFieldCond2(mode & 0x0010, "CastSensorIdle")
printFieldCond2(mode & 0x0020, "Boss")
printFieldCond2(mode & 0x0040, "Plant")
printFieldCond2(mode & 0x0080, "CanAttack")
printFieldCond2(mode & 0x0100, "Detector")
printFieldCond2(mode & 0x0200, "CastSensorChase")
printFieldCond2(mode & 0x0400, "ChangeChase")
printFieldCond2(mode & 0x0800, "Angry")
printFieldCond2(mode & 0x1000, "ChangeTargetMelee")
printFieldCond2(mode & 0x2000, "ChangeTargetChase")
printFieldCond2(mode & 0x4000, "TargetWeak")
printFieldCond2(mode & 0x8000, "LiveWithoutMaster")
endGroup()
printField("MoveSpeed", fields[26])
printField("AttackDelay", fields[27])
printField("AttackMotion", fields[28])
printField("DamageMotion", fields[29])
printField("MvpExp", fields[30])
if isHaveData(fields, 31, 3):
startGroup("MvpDrops")
for f in range(0, 3):
value = fields[31 + f * 2]
chance = fields[32 + f * 2]
if value == "" or value == "0" or chance == "" or chance == "0":
continue
value = int(value)
if value not in itemDb:
print("// Error: mvp drop with id {0} not found in item_db.conf".format(value))
else:
printField2(itemDb[value], chance)
endGroup()
if isHaveData(fields, 37, 10):
startGroup("Drops")
for f in range(0, 10):
value = fields[37 + f * 2]
chance = fields[38 + f * 2]
if value == "" or value == "0" or chance == "" or chance == "0":
continue
value = int(value)
if value not in itemDb:
print("// Error: drop with id {0} not found in item_db.conf".format(value))
else:
printField2(itemDb[value], chance)
endGroup()
print("},")
printFooter()
if inFile != "":
r.close()
def readItemDB(inFile, itemDb):
itemId = 0
itemName = ""
started = False
with open(inFile, "r") as r:
for line in r:
line = line.strip()
if started == True:
if line == "},":
started = False
elif line[:10] == "AegisName:":
itemName = line[12:-1]
elif line[:3] == "Id:":
try:
itemId = int(line[4:])
except ValueError:
started = False
if itemId != 0 and itemName != "":
# was need for remove wrong characters
# itemName = itemName.replace(".", "")
# if itemName[0] >= "0" and itemName[0] <= "9":
# itemName = "Num" + itemName
itemDb[itemId] = itemName
started = False
else:
if line == "{":
started = True
itemId = 0
itemName = ""
return itemDb
if len(sys.argv) != 4 and len(sys.argv) != 3:
printHelp();
exit(1)
startPath = sys.argv[2]
if len(sys.argv) == 4:
sourceFile = sys.argv[3]
else:
sourceFile = "";
itemDb = dict()
if sys.argv[1] == "re":
itemDb = readItemDB(startPath + "/db/re/item_db.conf", itemDb)
itemDb = readItemDB(startPath + "/db/item_db2.conf", itemDb)
elif sys.argv[1] == "pre-re":
itemDb = readItemDB(startPath + "/db/pre-re/item_db.conf", itemDb)
itemDb = readItemDB(startPath + "/db/item_db2.conf", itemDb)
else:
printHelp();
exit(1)
convertFile(sourceFile, itemDb)
| gpl-3.0 |
chuan9/chromium-crosswalk | tools/site_compare/drivers/win32/keyboard.py | 173 | 6934 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare module for simulating keyboard input.
This module contains functions that can be used to simulate a user
pressing keys on a keyboard. Support is provided for formatted strings
including special characters to represent modifier keys like CTRL and ALT
"""
import time # for sleep
import win32api # for keybd_event and VkKeyCode
import win32con # Windows constants
# TODO(jhaas): Ask the readability guys if this would be acceptable:
#
# from win32con import VK_SHIFT, VK_CONTROL, VK_MENU, VK_LWIN, KEYEVENTF_KEYUP
#
# This is a violation of the style guide but having win32con. everywhere
# is just plain ugly, and win32con is a huge import for just a handful of
# constants
def PressKey(down, key):
"""Presses or unpresses a key.
Uses keybd_event to simulate either depressing or releasing
a key
Args:
down: Whether the key is to be pressed or released
key: Virtual key code of key to press or release
"""
# keybd_event injects key events at a very low level (it's the
# Windows API keyboard device drivers call) so this is a very
# reliable way of simulating user input
win32api.keybd_event(key, 0, (not down) * win32con.KEYEVENTF_KEYUP)
def TypeKey(key, keystroke_time=0):
"""Simulate a keypress of a virtual key.
Args:
key: which key to press
keystroke_time: length of time (in seconds) to "hold down" the key
Note that zero works just fine
Returns:
None
"""
# This just wraps a pair of PressKey calls with an intervening delay
PressKey(True, key)
time.sleep(keystroke_time)
PressKey(False, key)
def TypeString(string_to_type,
use_modifiers=False,
keystroke_time=0,
time_between_keystrokes=0):
"""Simulate typing a string on the keyboard.
Args:
string_to_type: the string to print
use_modifiers: specifies whether the following modifier characters
should be active:
{abc}: type characters with ALT held down
[abc]: type characters with CTRL held down
\ escapes {}[] and treats these values as literal
standard escape sequences are valid even if use_modifiers is false
\p is "pause" for one second, useful when driving menus
\1-\9 is F-key, \0 is F10
TODO(jhaas): support for explicit control of SHIFT, support for
nonprintable keys (F-keys, ESC, arrow keys, etc),
support for explicit control of left vs. right ALT or SHIFT,
support for Windows key
keystroke_time: length of time (in secondes) to "hold down" the key
time_between_keystrokes: length of time (seconds) to pause between keys
Returns:
None
"""
shift_held = win32api.GetAsyncKeyState(win32con.VK_SHIFT ) < 0
ctrl_held = win32api.GetAsyncKeyState(win32con.VK_CONTROL) < 0
alt_held = win32api.GetAsyncKeyState(win32con.VK_MENU ) < 0
next_escaped = False
escape_chars = {
'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v'}
for char in string_to_type:
vk = None
handled = False
# Check to see if this is the start or end of a modified block (that is,
# {abc} for ALT-modified keys or [abc] for CTRL-modified keys
if use_modifiers and not next_escaped:
handled = True
if char == "{" and not alt_held:
alt_held = True
PressKey(True, win32con.VK_MENU)
elif char == "}" and alt_held:
alt_held = False
PressKey(False, win32con.VK_MENU)
elif char == "[" and not ctrl_held:
ctrl_held = True
PressKey(True, win32con.VK_CONTROL)
elif char == "]" and ctrl_held:
ctrl_held = False
PressKey(False, win32con.VK_CONTROL)
else:
handled = False
# If this is an explicitly-escaped character, replace it with the
# appropriate code
if next_escaped and char in escape_chars: char = escape_chars[char]
# If this is \p, pause for one second.
if next_escaped and char == 'p':
time.sleep(1)
next_escaped = False
handled = True
# If this is \(d), press F key
if next_escaped and char.isdigit():
fkey = int(char)
if not fkey: fkey = 10
next_escaped = False
vk = win32con.VK_F1 + fkey - 1
# If this is the backslash, the next character is escaped
if not next_escaped and char == "\\":
next_escaped = True
handled = True
# If we make it here, it's not a special character, or it's an
# escaped special character which should be treated as a literal
if not handled:
next_escaped = False
if not vk: vk = win32api.VkKeyScan(char)
# VkKeyScan() returns the scan code in the low byte. The upper
# byte specifies modifiers necessary to produce the given character
# from the given scan code. The only one we're concerned with at the
# moment is Shift. Determine the shift state and compare it to the
# current state... if it differs, press or release the shift key.
new_shift_held = bool(vk & (1<<8))
if new_shift_held != shift_held:
PressKey(new_shift_held, win32con.VK_SHIFT)
shift_held = new_shift_held
# Type the key with the specified length, then wait the specified delay
TypeKey(vk & 0xFF, keystroke_time)
time.sleep(time_between_keystrokes)
# Release the modifier keys, if held
if shift_held: PressKey(False, win32con.VK_SHIFT)
if ctrl_held: PressKey(False, win32con.VK_CONTROL)
if alt_held: PressKey(False, win32con.VK_MENU)
def main():
# We're being invoked rather than imported. Let's do some tests
# Press command-R to bring up the Run dialog
PressKey(True, win32con.VK_LWIN)
TypeKey(ord('R'))
PressKey(False, win32con.VK_LWIN)
# Wait a sec to make sure it comes up
time.sleep(1)
# Invoke Notepad through the Run dialog
TypeString("wordpad\n")
# Wait another sec, then start typing
time.sleep(1)
TypeString("This is a test of SiteCompare's Keyboard.py module.\n\n")
TypeString("There should be a blank line above and below this one.\n\n")
TypeString("This line has control characters to make "
"[b]boldface text[b] and [i]italic text[i] and normal text.\n\n",
use_modifiers=True)
TypeString(r"This line should be typed with a visible delay between "
"characters. When it ends, there should be a 3-second pause, "
"then the menu will select File/Exit, then another 3-second "
"pause, then No to exit without saving. Ready?\p\p\p{f}x\p\p\pn",
use_modifiers=True,
keystroke_time=0.05,
time_between_keystrokes=0.05)
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
Anaphory/p4-phylogeny | p4/Tree_model.py | 1 | 64297 | """Various Tree methods for defining models."""
from .Data import Data
from .Alignment import Part
from .Node import NodeBranch,NodePart,NodeBranchPart
from .Model import Model
from .Glitch import Glitch
import random
import func,sys,math,pf
from .Var import var
import numpy
def _setData(self, theData):
"""Sets self.data, and self.nParts"""
# Two cases. Either
# 1. Self already has a data.
# - deleteCStuff(), and replace the old data with the new data.
# 2. Self has no data.
# a. Has a model. So just check for compatibility.
# b. Has no model, so has never seen a data before.
complaintHead = '\nTree._setData()'
if self.name:
complaintHead += ", tree '%s'" % self.name
if self.fName:
complaintHead += ", file %s" % self.fName
gm = [complaintHead]
if isinstance(theData, Data) or theData == None:
pass
else:
gm.append("Set data only to a Data object, or None, ok?")
raise Glitch(gm)
if self.data or self.model:
# Normally there won't be anything to delete, but you never know...
self.deleteCStuff()
if not theData:
self._data = None
return
if self.model:
# We have seen a data object before (otherwise we would not
# have been able to set the model). Check for compatibility.
#print "_setData() here. self.model exits."
if not self.taxNames:
gm.append("Self has model, but no taxNames. Programming error.")
raise Glitch(gm)
# Check for same number of taxa
treeNTax = len(self.taxNames)
dataNTax = len(theData.taxNames)
if self.nTax != dataNTax:
gm.append("The number of taxa in the tree (%s)" % treeNTax)
gm.append("is not the same as in the data (%s)" % dataNTax)
raise Glitch(gm)
# Check for mis-matched taxNames
isBad = 0
for tn in self.taxNames:
if tn not in theData.taxNames:
isBad = 1
break
for tn in theData.taxNames:
if tn not in self.taxNames:
isBad = 1
break
if isBad:
gm.append("TaxName mismatch between the tree and the data.")
gm.append("Here they are, sorted to show mis-matches.")
gm.append(" %25s %25s" % ('data', 'tree'))
self.taxNames.sort()
theData.taxNames.sort()
for i in range(len(self.taxNames)):
if theData.taxNames[i] == self.taxNames[i]:
gm.append(" %25s %25s" % (theData.taxNames[i], self.taxNames[i]))
else:
gm.append(" %25s %25s ***" % (theData.taxNames[i], self.taxNames[i]))
raise Glitch(gm)
# Same number of parts
if len(theData.parts) != self.model.nParts:
gm.append("nParts mis-match. len(theData.parts)=%i, model.nParts=%i" % (
len(theData.parts), self.model.nParts))
raise Glitch(gm)
# Check dims and symbols in the parts
for pNum in range(self.model.nParts):
if theData.parts[pNum].dim != self.model.parts[pNum].dim:
gm.append("Parts dim mis-match.")
raise Glitch(gm)
if theData.parts[pNum].symbols != self.model.parts[pNum].symbols:
gm.append("Parts symbols mis-match.")
raise Glitch(gm)
# Set seqNum
for n in self.iterLeavesNoRoot():
if n.seqNum != self.taxNames.index(n.name):
gm.append("seqNums do not match up with taxNames.")
raise Glitch(gm)
self._data = theData
else:
# When you do this method, _setData(), self gets a suitable
# model. We have here no model, so we may have never seen a
# data before. Or we might have just lost the model.
# Check for same number of taxa
treeNTax = 0
treeTaxNames = []
for n in self.nodes:
if n.isLeaf:
treeNTax += 1
treeTaxNames.append(n.name)
dataNTax = len(theData.taxNames)
if treeNTax != dataNTax:
gm.append("The number of taxa in the tree (%s)" % treeNTax)
gm.append("is not the same as in the data (%s)" % dataNTax)
raise Glitch(gm)
# Check for mis-matched taxNames
isBad = 0
for tn in treeTaxNames:
if tn not in theData.taxNames:
isBad = 1
break
for tn in theData.taxNames:
if tn not in treeTaxNames:
isBad = 1
break
if isBad:
gm.append("TaxName mismatch between the tree and the data.")
gm.append("Here they are, sorted to show mis-matches.")
gm.append(" %25s %25s" % ('data', 'tree'))
treeTaxNames.sort()
theData.taxNames.sort()
for i in range(len(treeTaxNames)):
if theData.taxNames[i] == treeTaxNames[i]:
gm.append(" %25s %25s" % (theData.taxNames[i], treeTaxNames[i]))
else:
gm.append("*** %25s %25s" % (theData.taxNames[i], treeTaxNames[i]))
raise Glitch(gm)
# attach
self.taxNames = theData.taxNames
self._data = theData
#self.nParts = len(theData.parts)
# Now that nParts is known ...
#print "_setData. len(theData.parts) = %s" % len(theData.parts)
self.model = Model(len(theData.parts)) # calls self.deleteCStuff()
for n in self.nodes:
if n.parts:
n.parts = []
for i in range(self.model.nParts):
n.parts.append(NodePart())
for n in self.nodes:
if n != self.root:
n.br.parts = []
for i in range(self.model.nParts):
n.br.parts.append(NodeBranchPart())
# Set modelPart dims and symbols
for pNum in range(self.model.nParts):
self.model.parts[pNum].dim = theData.parts[pNum].dim
self.model.parts[pNum].symbols = theData.parts[pNum].symbols
# There is intentionally no default pInvar, forcing the user to be explicit.
for p in self.model.parts:
p.pInvar = None
# Set seqNum
for n in self.nodes:
if n.isLeaf:
n.seqNum = self.taxNames.index(n.name)
data = property(lambda self: self._data, _setData)
def _setModel(self, theModel):
gm = ['Tree._setModel()']
#print gm[0]
#print " Got '%s'" % theModel
if isinstance(theModel, Model) or theModel == None:
pass
else:
gm.append("Attempt to set Tree.model to '%s'. " % theModel)
gm.append("Don't set the model to anything other than 'None' or a Model, ok? ")
gm.append("(And generally the user only sets it to None.) ")
raise Glitch(gm)
#if theModel and self._model: # Why do I do this?
# gm.append("The tree already has a model object; I am refusing to clobber it.")
# gm.append("Perhaps use a (perhaps duplicate) tree with no model.")
# raise Glitch, gm
if self.model or self.data:
self.deleteCStuff()
#print 'Tree._setModel() finished deleteCStuff()'
self._model = theModel
def _delModel(self):
gm = ['Tree._delModel()']
gm.append("Caught an attempt to delete self.model, but")
gm.append("self.model is a property, so you shouldn't delete it.")
gm.append("But you can set it to None if you like.")
raise Glitch(gm)
model = property(lambda self: self._model, _setModel, _delModel)
def _checkModelThing(self, partNum, symbol, complaintHead):
gm = [complaintHead]
if not self.data:
gm.append("No data. Set the data first.")
raise Glitch(gm)
if not self.model:
# When you _setData(), a model object of suitable dimensions
# is made and attached to self. If we have got here, it is
# because the model has subsequently been lost. So just
# re-instate it.
self._setData(self.data)
if partNum < 0 or partNum >= self.model.nParts:
gm.append("Zero-based partNum (%s) is out of range (of %s parts)" % (partNum, self.model.nParts))
raise Glitch(gm)
if symbol:
if type(symbol) != type('s') or len(symbol) != 1:
gm.append("Symbols must be 1-length strings.")
raise Glitch(gm)
if symbol == '?':
gm.append("Got assigned text drawing symbol '?'.")
gm.append("Don't use it-- it is reserved for missing modelThings")
raise Glitch(gm)
def newComp(self, partNum=0, free=0, spec='empirical', val=None, symbol=None):
"""Make, attach, and return a new Comp object.
The arg *spec* should be a string, one of::
'equal' no val
'empirical' no val
'specified' val=[aList]
'wag', etc no val
(ie one of the empirical protein models, including
cpREV, d78, jtt, mtREV24, mtmam, wag, etc)
If spec='specified', then you specify dim or dim-1 values in a
list as the 'val' arg.
This method returns a Comp object, which you can ignore if it is a
tree-homogeneous model. However, if it is a tree-hetero model
then you may want to get that Comp object so that you can place
it on the tree explicitly with setModelThing(), like this::
c0 = newComp(partNum=0, free=1, spec='empirical')
c1 = newComp(partNum=0, free=1, spec='empirical')
myTree.setModelThing(c0, node=myTree.root, clade=1)
myTree.setModelThing(c1, node=5, clade=1)
myTree.setModelThing(c1, node=18, clade=0)
Alternatively, you can simply let p4 place them randomly::
newComp(partNum=0, free=1, spec='empirical')
newComp(partNum=0, free=1, spec='empirical')
myTree.setModelThingsRandomly()
Calculation of probability matrices for likelihood calcs etc are
wrong when there are any comp values that are zero, so that is not
allowed. Any zeros are converted to var.PIVEC_MIN, which is 1e-18
this week. Hopefully close enough to zero for you.
"""
gm = ['Tree.newComp()']
self._checkModelThing(partNum, symbol, gm[0])
if self.model.cModel:
self.deleteCStuff()
mt = Comp()
mt.partNum = partNum
#mt.dim = self.data.parts[partNum].dim
mt.free = free
# spec
if spec not in var.compSpecs:
gm.append("The spec should be one of %s" % var.compSpecs)
raise Glitch(gm)
mt.spec = spec
mt.num = len(self.model.parts[partNum].comps)
if symbol:
mt.symbol = symbol
else:
mt.symbol = var.modelSymbols[mt.num]
self.model.parts[partNum].comps.append(mt)
# assign val
dim = self.model.parts[partNum].dim
if spec == 'equal':
oneComp = 1.0 / dim
mt.val = []
for i in range(dim):
mt.val.append(oneComp)
elif spec == 'empirical':
mt.val = None
elif spec == 'specified':
if not val:
gm.append("Specified comp, but no val.")
raise Glitch(gm)
try:
val = list(val)
except TypeError:
gm.append("The 'val' arg should be a list or tuple.")
raise Glitch(gm)
if len(val) == dim or len(val) == dim - 1:
pass
else:
gm.append("Bad length for val arg. Should be dim or dim-1 long.")
gm.append("(Dim for this part is %i)" % dim)
raise Glitch(gm)
# I allow val's of dim or dim-1.
if len(val) == dim - 1:
lastVal = 1.0 - sum(val)
if lastVal > 0.0:
val = val + [1.0 - sum(val)]
else:
gm.append("Bad comp vals %s" % val)
gm.append("sum to 1.0 or more.")
raise Glitch(gm)
else: # len = dim
theSum = sum(val)
theDiff = math.fabs(theSum - 1.0)
# How big to make the delta? With reasonably good,
# normalized protein comps (where all the values had just
# been divided by the total, so it should have summed to
# 1.0 at that point) I kept getting 1.1e-16. So make it
# 5.e-16
if theDiff > 5.e-16: # 1e-17 was too small for protein
gm.append("Bad comp vals %s" % val)
gm.append("does not sum to 1.0")
gm.append("The sum = %f" % theSum)
gm.append("abs(1.0 - theSum) = %g" % theDiff)
raise Glitch(gm)
# Are any specified values less than PIVEC_MIN?
needsNormalizing = 0
for i in range(len(val)):
thisVal = val[i]
if thisVal < var.PIVEC_MIN:
print(gm[0])
print(" Specifying a comp of zero for a character is not allowed.")
print(" Re-setting to %g" % var.PIVEC_MIN)
val[i] = var.PIVEC_MIN
needsNormalizing = 1
if needsNormalizing:
theSum = sum(val)
for i in range(len(val)):
val[i] /= theSum
if math.fabs(sum(val) - 1.0) > 5.e-16:
gm.append("Bad comp vals %s" % val)
gm.append("does not sum to 1.0")
raise Glitch(gm)
#print "sum(val) - 1.0 = %f (%g)" % (sum(val) - 1.0, sum(val) - 1.0)
mt.val = val
# Empirical protein comps are from the dat files in PAML. Thanks, Ziheng!
elif spec == 'cpREV':
mt.val = [0.0755, 0.0621, 0.0410, 0.0371, 0.0091,
0.0382, 0.0495, 0.0838, 0.0246, 0.0806,
0.1011, 0.0504, 0.0220, 0.0506, 0.0431,
0.0622, 0.0543, 0.0181, 0.0307, 0.0660]
#theSum = sum(mt.val)
#for i in range(len(mt.val)):
# mt.val[i] /= theSum
elif spec == 'd78':
# These first values have a couple more decimal places. I
# think I got these from some obscure code in a back corner of
# the NCBI ftp site. I believe it is obtainable by raising
# the d78 matrix to a high power. It is a more precise comp,
# but is not used here because it is not standard.
#mt.val = [0.08713, 0.04090, 0.04043, 0.04687, 0.03347,
# 0.03826, 0.04953, 0.08861, 0.03362, 0.03689,
# 0.08536, 0.08048, 0.01475, 0.03977, 0.05068,
# 0.06958, 0.05854, 0.01049, 0.02992, 0.06472]
# This next set of values is from her paper, and is the set
# that everybody uses.
#mt.val = [0.087, 0.041, 0.040, 0.047, 0.033,
# 0.038, 0.05, 0.089, 0.034, 0.037,
# 0.085, 0.08, 0.015, 0.04, 0.051,
# 0.07, 0.058, 0.01, 0.03, 0.065]
# These values are from Goldman's recommendations (Kosiol & Goldman)
mt.val = [0.087127, 0.040904, 0.040432, 0.046872, 0.033474,
0.038255, 0.049530, 0.088612, 0.033619, 0.036886,
0.085357, 0.080481, 0.014753, 0.039772, 0.050680,
0.069577, 0.058542, 0.010494, 0.029916, 0.064718]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'jtt':
#mt.val = [0.077,0.051, 0.043, 0.052, 0.02,
# 0.041, 0.062, 0.074, 0.023, 0.052,
# 0.091, 0.059, 0.024, 0.04, 0.051,
# 0.069, 0.059, 0.014, 0.032, 0.066]
# again, a Goldman recommendation
mt.val = [0.076862, 0.051057, 0.042546, 0.051269, 0.020279,
0.041061, 0.061820, 0.074714, 0.022983, 0.052569,
0.091111, 0.059498, 0.023414, 0.040530, 0.050532,
0.068225, 0.058518, 0.014336, 0.032303, 0.066374]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'mtREV24':
mt.val = [0.072, 0.019, 0.039, 0.019, 0.006,
0.025, 0.024, 0.056, 0.028, 0.088,
0.168, 0.023, 0.054, 0.061, 0.054,
0.072, 0.086, 0.029, 0.033, 0.043]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'mtmam':
mt.val = [0.0692, 0.0184, 0.0400, 0.0186, 0.0065,
0.0238, 0.0236, 0.0557, 0.0277, 0.0905,
0.1675, 0.0221, 0.0561, 0.0611, 0.0536,
0.0725, 0.0870, 0.0293, 0.0340, 0.0428]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'wag':
mt.val = [0.0866279, 0.043972, 0.0390894, 0.0570451, 0.0193078,
0.0367281, 0.0580589, 0.0832518, 0.0244313, 0.048466,
0.086209, 0.0620286, 0.0195027, 0.0384319, 0.0457631,
0.0695179, 0.0610127, 0.0143859, 0.0352742, 0.0708956]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'rtRev':
mt.val = [0.0646, 0.0453, 0.0376, 0.0422, 0.0114, 0.0606,
0.0607, 0.0639, 0.0273, 0.0679, 0.1018, 0.0751,
0.0150, 0.0287, 0.0681, 0.0488, 0.0622, 0.0251,
0.0318, 0.0619]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'tmjtt94':
mt.val = [0.105068479, 0.015695291, 0.018494452, 0.008897331,
0.021893432, 0.014095771, 0.009697091, 0.075777267,
0.016794962, 0.118764371, 0.163450965, 0.011196641,
0.033290013, 0.077676697, 0.025992202, 0.056782965,
0.052284315, 0.022293312, 0.032390283, 0.119464161]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'tmlg99':
mt.val = [0.100632, 0.014017, 0.014706, 0.010371, 0.030668,
0.015152, 0.011343, 0.069235, 0.017501, 0.107722,
0.155161, 0.009723, 0.038730, 0.086453, 0.031761,
0.064333, 0.044847, 0.028277, 0.036988, 0.112380]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'lg':
mt.val = [0.079066, 0.055941, 0.041977, 0.053052, 0.012937,
0.040767, 0.071586, 0.057337, 0.022355, 0.062157,
0.099081, 0.064600, 0.022951, 0.042302, 0.044040,
0.061197, 0.053287, 0.012066, 0.034155, 0.069147]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'blosum62':
mt.val = [0.074, 0.052, 0.045, 0.054, 0.025,
0.034, 0.054, 0.074, 0.026, 0.068,
0.099, 0.058, 0.025, 0.047, 0.039,
0.057, 0.051, 0.013, 0.032, 0.073]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'hivb':
mt.val = [0.060490222, 0.066039665, 0.044127815, 0.042109048, 0.020075899,
0.053606488, 0.071567447, 0.072308239, 0.022293943, 0.069730629,
0.098851122, 0.056968211, 0.019768318, 0.028809447, 0.046025282,
0.05060433, 0.053636813, 0.033011601, 0.028350243, 0.061625237]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'mtart':
mt.val = [0.054116, 0.018227, 0.039903, 0.020160, 0.009709,
0.018781, 0.024289, 0.068183, 0.024518, 0.092638,
0.148658, 0.021718, 0.061453, 0.088668, 0.041826,
0.091030, 0.049194, 0.029786, 0.039443, 0.057700]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
elif spec == 'mtzoa':
mt.val = [0.068880, 0.021037, 0.030390, 0.020696, 0.009966,
0.018623, 0.024989, 0.071968, 0.026814, 0.085072,
0.156717, 0.019276, 0.050652, 0.081712, 0.044803,
0.080535, 0.056386, 0.027998, 0.037404, 0.066083]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
#Cymon was here! gcpREV (green chloroplast plant REV seeing how
#is estimated from green plant chloroplasts alone rather than all
#chloroplasts).
elif spec == 'gcpREV':
mt.val = [0.079510, 0.056001, 0.040459, 0.033220, 0.009051,
0.037505, 0.049675, 0.080233, 0.021880, 0.080496,
0.107512, 0.049324, 0.020776, 0.047731, 0.039916,
0.073820, 0.053615, 0.016705, 0.030790, 0.071781]
theSum = sum(mt.val)
for i in range(len(mt.val)):
mt.val[i] /= theSum
return mt
def newRMatrix(self, partNum=0, free=0, spec='ones', val=None, symbol=None):
"""Make, attach, and return a new RMatrix instance.
spec should be one of:
- 'ones' - for JC, poisson, F81
- '2p' - for k2p and hky
- 'specified'
- 'cpREV'
- 'd78'
- 'jtt'
- 'mtREV24'
- 'mtmam'
- 'wag'
- 'rtRev'
- 'tmjtt94'
- 'tmlg99'
- 'lg'
- 'blosum62'
- 'hivb'
- 'mtart'
- 'mtzoa'
You do not set the 'val' arg unless the spec is 'specified' or
'2p'. If spec='2p', then you set val to kappa.
If the spec is 'specified', you specify all the numerical values
in a list given as the 'val' arg. The length of that list will be
(((dim * dim) - dim) / 2) - 1, so for DNA, where dim=4, you would
specify a list containing 5 numbers. """
## not implemented:
## 'blosum62a'
## 'blosum62b'
## 'phat70'
complaintHead = '\nTree.newRMatrix()'
gm = [complaintHead]
self._checkModelThing(partNum, symbol, complaintHead)
if self.model.cModel:
self.deleteCStuff()
mt = RMatrix()
mt.partNum = partNum
#mt.dim = self.data.parts[partNum].dim
mt.free = free
if spec not in var.rMatrixSpecs:
gm.append("Got unknown rMatrix spec '%s'." % spec)
gm.append("Should be one of: %s" % var.rMatrixSpecs)
raise Glitch(gm)
mt.spec = spec
mt.num = len(self.model.parts[partNum].rMatrices)
if symbol:
mt.symbol = symbol
else:
mt.symbol = var.modelSymbols[mt.num]
self.model.parts[partNum].rMatrices.append(mt)
# assign val
dim = self.model.parts[partNum].dim
if var.rMatrixNormalizeTo1:
goodLen = (((dim * dim) - dim) / 2)
else:
goodLen = (((dim * dim) - dim) / 2) - 1
v = None
if spec == 'specified':
if val:
if len(val) == goodLen: # should check that values are all floats
v = numpy.array(val, numpy.float)
if var.rMatrixNormalizeTo1:
v /= v.sum()
elif var.rMatrixNormalizeTo1 and len(val) == goodLen - 1:
gm.append("var.rMatrixNormalizeTo1 is set, val length should be %i, got %i" % (goodLen, len(val)))
raise Glitch(gm)
else:
gm.append("Bad length for arg val. Length %i, should be %i" % (len(val), goodLen))
raise Glitch(gm)
else:
gm.append("spec is 'specified', but there are no specified rMatrix values.")
gm.append("Specify rMatrix values by eg val=[2.0, 3.0, 4.0, 5.0,6.0]")
raise Glitch(gm)
elif spec == 'ones':
v = numpy.array([1.0] * goodLen)
if var.rMatrixNormalizeTo1:
v /= v.sum()
elif spec == '2p':
try:
v = float(val)
except (ValueError, TypeError):
gm.append("Kappa ('val' arg) should be a float. Setting to 2.0")
v = 2.0
if v < var.KAPPA_MIN:
gm.append("Kappa is too small. Setting to %f" % var.KAPPA_MIN)
v = var.KAPPA_MIN
elif v > var.KAPPA_MAX:
gm.append("Kappa is too big. Setting to %f" % var.KAPPA_MAX)
v = var.KAPPA_MAX
v = numpy.array([v], numpy.float)
elif spec in var.rMatrixProteinSpecs:
if self.data.parts[partNum].dataType != 'protein':
gm.append("A protein matrix has been specified, but the dataType for part %i is %s." % (
partNum, self.data.parts[partNum].dataType))
raise Glitch(gm)
if free:
gm.append('The rMatrix should not be free if it is an empirical protein matrix.')
raise Glitch(gm)
mt.val = v # type numpy.ndarray, or None for protein
return mt
def newGdasrv(self, partNum=0, free=0, val=None, symbol=None):
gm = ['Tree.newGdasrv()']
if not self.model:
gm.append("Set the data first. Eg myTree.data = Data()")
raise Glitch(gm)
if self.model.cModel:
self.deleteCStuff()
# check if there is an nGammaCat > 1:
if self.model.parts[partNum].nGammaCat == 1:
gm.append("For this part (%s), the number of nGammaCat has been set to 1." % partNum)
gm.append("So gdasrv won't work.")
gm.append("You can set the nGammaCat with yourTree.setNGammaCat(partNum=x, nGammaCat=y)")
raise Glitch(gm)
# check val
if val == None:
gm.append("Please specify a val, a positive float.")
raise Glitch(gm)
try:
v = float(val)
except:
gm.append("Arg val must be a float. Got '%s'" % val)
raise Glitch(gm)
# This week, we have in defines.h
#define GAMMA_SHAPE_MIN 0.000001
#define GAMMA_SHAPE_MAX 300.0
if v <= 0.000001 or v >= 300.0:
gm.append("Arg val must be between 0.000001 and 300. Got %f" % v)
raise Glitch(gm)
self._checkModelThing(partNum, symbol, gm[0])
if self.model.parts[partNum].isMixture:
gm.append("Don't do this if it is a mixture.")
raise Glitch(gm)
mt = Gdasrv()
mt.nGammaCat = self.model.parts[partNum].nGammaCat
mt.partNum = partNum
mt.free = free
# no spec or dim
mt.num = len(self.model.parts[partNum].gdasrvs)
if symbol:
mt.symbol = symbol
else:
mt.symbol = var.modelSymbols[mt.num]
self.model.parts[partNum].gdasrvs.append(mt)
mt.freqs = numpy.zeros(mt.nGammaCat, numpy.float)
mt.rates = numpy.zeros(mt.nGammaCat, numpy.float)
mt._val[0] = v
mt.calcRates()
return mt
def setPInvar(self, partNum=0, free=0, val=0.0):
complaintHead = '\nTree.setPInvar()'
gm = [complaintHead]
# check val
try:
v = float(val)
except:
gm.append("Arg val must be a float. Got '%s'" % val)
raise Glitch(gm)
if v < 0.0 or v >= 1.0:
gm.append("Arg val must be zero or more, and less than 1. Got %f" % v)
raise Glitch(gm)
self._checkModelThing(partNum, None, complaintHead)
if self.model.cModel:
self.deleteCStuff()
mt = PInvar()
mt.partNum = partNum
mt.free = free
mt.val = v
self.model.parts[partNum].pInvar = mt
def setRelRate(self, partNum=0, val=0.0):
complaintHead = '\nTree.setRelRate()'
gm = [complaintHead]
# check val
try:
v = float(val)
except:
gm.append("Arg val must be a float. Got '%s'" % val)
raise Glitch(gm)
if v <= 0.0 or v >= 1000.0:
gm.append("Arg val must be more than zero, and less than 1000 (arbitrarily). Got %f" % v)
raise Glitch(gm)
self._checkModelThing(partNum, None, complaintHead)
if self.model.cModel:
self.deleteCStuff()
self.model.parts[partNum].relRate = v
def setRjComp(self, partNum=0, val=True):
if self.model.cModel:
self.deleteCStuff()
self.model.parts[partNum].rjComp = val
def setRjRMatrix(self, partNum=0, val=True):
if self.model.cModel:
self.deleteCStuff()
self.model.parts[partNum].rjRMatrix = val
def setModelThing(self, theModelThing, node=None, clade=1):
complaintHead = '\nTree.setModelThing()'
gm = [complaintHead]
if self.model.parts[theModelThing.partNum].isMixture:
gm.append("Don't do this if the part uses a mixture model.")
raise Glitch(gm)
if theModelThing and \
(isinstance(theModelThing, Comp) or \
isinstance(theModelThing, RMatrix) or \
isinstance(theModelThing, Gdasrv)):
pass
else:
gm.append("Expecting a model thing instance of some sort.")
gm.append("Ie a comp, rMatrix, or gdasrv, instance.")
gm.append("Got theModelThing = %s" % theModelThing)
raise Glitch(gm)
if self.model.cModel:
self.deleteCStuff()
partNum = theModelThing.partNum
if node == None:
theNode = self.root
else:
theNode = self.node(node)
isBad = 0
if isinstance(theModelThing, Comp):
if theModelThing != self.model.parts[partNum].comps[theModelThing.num]:
isBad = 1
elif isinstance(theModelThing, RMatrix):
if theModelThing != self.model.parts[partNum].rMatrices[theModelThing.num]:
isBad = 1
elif isinstance(theModelThing, Gdasrv):
if theModelThing != self.model.parts[partNum].gdasrvs[theModelThing.num]:
isBad = 1
else: # This will never happen-- we checked above. Overkill.
gm.append("I don't recognise theModelThing.")
raise Glitch(gm)
if isBad:
gm.append("The modelThing can only be set on the tree that made it.")
raise Glitch(gm)
# For the root, we set comps and nothing else. For other nodes we
# set anything.
if theNode == self.root:
if isinstance(theModelThing, Comp):
theNode.parts[partNum].compNum = theModelThing.num
else:
if isinstance(theModelThing, Comp):
theNode.parts[partNum].compNum = theModelThing.num
elif isinstance(theModelThing, RMatrix):
theNode.br.parts[partNum].rMatrixNum = theModelThing.num
elif isinstance(theModelThing, Gdasrv):
theNode.br.parts[partNum].gdasrvNum = theModelThing.num
if clade:
aboves = self.getNodeNumsAbove(theNode, leavesOnly=0)
for i in aboves:
if isinstance(theModelThing, Comp):
self.nodes[i].parts[partNum].compNum = theModelThing.num
elif isinstance(theModelThing, RMatrix):
self.nodes[i].br.parts[partNum].rMatrixNum = theModelThing.num
elif isinstance(theModelThing, Gdasrv):
self.nodes[i].br.parts[partNum].gdasrvNum = theModelThing.num
def setModelThingsRandomly(self, forceRepresentation=2):
"""Place model things (semi-)randomly on the tree.
For example, if there are 2 compositions in model part partNum,
this method will decorate each node of the tree with zeros and
ones, randomly. The actual thing set is
node.parts[partNum].compNum. If the model thing is homogeneous,
it will just put zeros in all the nodes.
We want to have each model thing on the tree somewhere, and so it
is not really randomly set. If the model thing numbers were
assigned randomly on the tree, it may occur that some model thing
numbers by chance would not be represented. This is not allowed,
and you can set forceRepresentation to some positive integer, 1 or
more. That number will be the lower limit allowed on the number
of nodes that get assigned the model thing number. For example,
if forceRepresentation is set to 2, then each model thing must get
assigned to at least 2 nodes."""
gm = ['Tree.setModelThingsRandomly()']
if not self.model or not self.model.nParts:
gm.append("No model parts?")
raise Glitch(gm)
if self.model.cModel:
self.deleteCStuff()
#self.model.dump()
if type(forceRepresentation) != type(1) or forceRepresentation < 1:
gm.append("Arg 'forceRepresentation' should be 1 or more.")
gm.append("Got forceRepresentation = %s" % forceRepresentation)
raise Glitch(gm)
for i in self.preOrder:
if i == var.NO_ORDER:
gm.append("This method does not work if any nodes are not used in the tree.")
raise Glitch(gm)
for pNum in range(self.model.nParts):
mp = self.model.parts[pNum]
# First do comps
if mp.nComps == 1:
for n in self.nodes:
n.parts[pNum].compNum = 0
elif mp.nComps > 1:
nNodes = len(self.nodes)
if (mp.nComps * forceRepresentation) > nNodes:
gm.append("Part %i" % pNum)
gm.append("There are not enough nodes (%i) to put %i" % (nNodes, mp.nComps))
gm.append("comps on at least forceRepresentation (%i) nodes." % forceRepresentation)
raise Glitch(gm)
nList = self.nodes[:]
random.shuffle(nList)
# get the forceRepresentation out of the way first
for mtNum in range(mp.nComps):
for fr in range(forceRepresentation):
n = nList.pop()
n.parts[pNum].compNum = mtNum
# Now do the rest
for n in nList:
n.parts[pNum].compNum = random.randrange(mp.nComps)
else:
gm.append("No comps in part %i" % pNum)
raise Glitch(gm)
# Second do rMatrices
if mp.nRMatrices == 1:
for n in self.nodes:
if n != self.root:
n.br.parts[pNum].rMatrixNum = 0
elif mp.nRMatrices > 1:
nNodes = len(self.nodes) - 1
if (mp.nRMatrices * forceRepresentation) > nNodes:
gm.append("Part %i" % pNum)
gm.append("There are not enough nodes (%i) to put %i" % (nNodes, mp.nRMatrices))
gm.append("rMatrices on at least forceRepresentation (%i) nodes." % forceRepresentation)
raise Glitch(gm)
nList = self.nodes[:]
nList.remove(self.root)
random.shuffle(nList)
# get the forceRepresentation out of the way first
for mtNum in range(mp.nRMatrices):
for fr in range(forceRepresentation):
n = nList.pop()
n.br.parts[pNum].rMatrixNum = mtNum
# Now do the rest
for n in nList:
n.br.parts[pNum].rMatrixNum = random.randrange(mp.nRMatrices)
else:
gm.append("No rMatrices in part %i" % pNum)
raise Glitch(gm)
# Third do gdasrvs
if mp.nGammaCat > 1:
if mp.nGdasrvs == 1:
for n in self.nodes:
if n != self.root:
n.br.parts[pNum].gdasrvNum = 0
elif mp.nGdasrvs > 1:
nNodes = len(self.nodes) - 1
if (mp.nGdasrvs * forceRepresentation) > nNodes:
gm.append("Part %i" % pNum)
gm.append("There are not enough nodes (%i) to put %i" % (nNodes, mp.nGdasrvs))
gm.append("gdasrvs on at least forceRepresentation (%i) nodes." % forceRepresentation)
raise Glitch(gm)
nList = self.nodes[:]
nList.remove(self.root)
random.shuffle(nList)
# get the forceRepresentation out of the way first
for mtNum in range(mp.nGdasrvs):
for fr in range(forceRepresentation):
n = nList.pop()
n.br.parts[pNum].gdasrvNum = mtNum
# Now do the rest
for n in nList:
n.br.parts[pNum].gdasrvNum = random.randrange(mp.nGdasrvs)
else:
gm.append("No gdasrvs in part %i and yet nGammaCat > 1" % pNum)
raise Glitch(gm)
#self.dump(model=True)
def setModelThingsNNodes(self):
"""Set nNodes for all modelThings"""
gm = ['Tree.setModelThingsNNodes()']
if not self.model or not self.model.nParts:
gm.append("No model parts?")
raise Glitch(gm)
for pNum in range(self.model.nParts):
mp = self.model.parts[pNum]
if not mp.nComps:
gm.append("No comps in model part %i." % pNum)
raise Glitch(gm)
elif not mp.nRMatrices:
gm.append("No rMatrices in model part %i." % pNum)
raise Glitch(gm)
for pNum in range(self.model.nParts):
mp = self.model.parts[pNum]
# First do comps
if mp.nComps == 1:
pass
elif mp.nComps > 1:
for mtNum in range(mp.nComps):
mp.comps[mtNum].nNodes = 0
for n in self.iterNodes():
mp.comps[n.parts[pNum].compNum].nNodes += 1
# Second do rMatrices
if mp.nRMatrices == 1:
pass
elif mp.nRMatrices > 1:
for mtNum in range(mp.nRMatrices):
mp.rMatrices[mtNum].nNodes = 0
for n in self.iterNodesNoRoot():
mp.rMatrices[n.br.parts[pNum].rMatrixNum].nNodes += 1
# Third do gdasrvs
if mp.nGammaCat > 1:
if mp.nGdasrvs == 1:
pass
elif mp.nGdasrvs > 1:
for mtNum in range(mp.nGdasrvs):
mp.gdasrvs[mtNum].nNodes = 0
for n in self.iterNodesNoRoot():
mp.gdasrvs[n.br.parts[pNum].gdasrvNum].nNodes += 1
else:
gm.append("No gdasrvs in part %i" % pNum)
raise Glitch(gm)
def summarizeModelThingsNNodes(self):
"""Summarize nNodes for all modelThings if isHet"""
gm = ['Tree.summarizeModelThingsNNodes()']
if not self.model or not self.model.nParts:
gm.append("No model parts?")
raise Glitch(gm)
if not self.model.isHet:
gm.append("This method is for hetero models")
raise Glitch(gm)
for pNum in range(self.model.nParts):
mp = self.model.parts[pNum]
if not mp.nComps:
gm.append("No comps in model part %i." % pNum)
raise Glitch(gm)
elif not mp.nRMatrices:
gm.append("No rMatrices in model part %i." % pNum)
raise Glitch(gm)
for pNum in range(self.model.nParts):
print("\n%6s %s:" % ("Part", pNum))
mp = self.model.parts[pNum]
# First do comps
if mp.nComps == 1:
pass
elif mp.nComps > 1:
for mtNum in range(mp.nComps):
#print " comp %i nNodes=%i" % (mtNum, mp.comps[mtNum].nNodes)
print("%16s %i %s = %i" % ("composition", mtNum, "nNodes",
mp.comps[mtNum].nNodes))
# Second do rMatrices
if mp.nRMatrices == 1:
pass
elif mp.nRMatrices > 1:
for mtNum in range(mp.nRMatrices):
print("%16s %i %s = %i" % ("rate matrix", mtNum,
"nNodes", mp.rMatrices[mtNum].nNodes))
# Third do gdasrvs
if mp.nGammaCat > 1:
if mp.nGdasrvs == 1:
pass
elif mp.nGdasrvs > 1:
for mtNum in range(mp.nGdasrvs):
print(" gdasrv %i nNodes =%i" % (mtNum, mp.gdasrvs[mtNum].nNodes))
else:
gm.append("No gdasrvs in part %i" % pNum)
raise Glitch(gm)
def setTextDrawSymbol(self, theSymbol='-', node=None, clade=1):
gm = ['\nTree.setTextDrawString()']
if not theSymbol or type(theSymbol) != type('c') or len(theSymbol) != 1:
gm.append("theSymbol should be a single character string.")
raise Glitch(gm)
if not node:
theNode = self.root
else:
theNode = self.node(node)
if theNode == self.root:
pass
else:
theNode.br.textDrawSymbol = theSymbol
if clade:
aboves = self.getNodeNumsAbove(theNode, leavesOnly=0)
for i in aboves:
self.nodes[i].br.textDrawSymbol = theSymbol
def setNGammaCat(self, partNum=0, nGammaCat=1):
gm = ['\nTree.setNGammaCat()']
if not self.data or not self.model:
gm.append("No data?")
raise Glitch(gm)
if self.model.cModel:
self.deleteCStuff()
if partNum < 0 or partNum >= self.model.nParts:
gm.append("PartNum %s is out of range of %s parts." % (partNum, self.model.nParts))
raise Glitch(gm)
if self.model.parts[partNum].isMixture:
gm.append("Don't do this if the part uses a mixture model.")
raise Glitch(gm)
try:
x = int(nGammaCat)
except ValueError:
gm.append("'%s' does not appear to be an integer." % i)
raise Glitch(gm)
if x < 1:
gm.append("nGammaCat should not be less than 1.")
raise Glitch(gm)
elif x > 16:
gm.append("nGammaCat '%s' exceeds the arbitrary limit of 16." % x)
raise Glitch(gm)
self.model.parts[partNum].nGammaCat = nGammaCat
# def setMixture(self, partNum=0, free=0, freqs=None, rates=None):
# complaintHead = '\nTree.setMixture()'
# gm = [complaintHead]
# if 1:
# gm.append("Sorry -- not turned on yet.")
# raise Glitch, gm
# assert self.model and self.data
# if self.model.cModel:
# self.deleteCStuff()
# if partNum < 0 or partNum >= self.model.nParts:
# gm.append("PartNum %s is out of range of %s parts." % (partNum, self.model.nParts))
# raise Glitch, gm
# mp = self.model.parts[partNum]
# isHet = 0
# for n in self.nodes:
# if n.parts[partNum].compNum >= 0:
# isHet = 1
# break
# if n != self.root:
# if n.br.parts[partNum].rMatrixNum >= 0 or n.br.parts[partNum].gdasrvNum >= 0:
# isHet = 1
# break
# if isHet:
# gm.append("Can't be both hetero on the tree and a mixture model.")
# gm.append("There seems to have been a previous setModelThing()")
# raise Glitch, gm
# mp.isMixture = 1
# mp.mixture = Mixture()
# mp.mixture.free = free
# #if freqs and rates:
# nMix = mp.nComps * mp.nRMatrices
# mp.mixture.freqs = numpy.zeros(nMix, numpy.float)
# mp.mixture.rates = numpy.zeros(nMix, numpy.float)
# try:
# for i in range(nMix):
# mp.mixture.freqs[i] = float(freqs[i])
# mp.mixture.rates[i] = float(rates[i])
# except:
# gm.append('Args freqs and rates should be sequences of floats, each nComps * nMatrices long.')
# raise Glitch, gm
# if len(freqs) != nMix or len(rates) != nMix:
# gm.append('Args freqs and rates should be sequences of floats, each nComps * nMatrices long.')
# raise Glitch, gm
def modelSanityCheck(self, resetEmpiricalComps=True):
"""Check that the tree, data, and model specs are good to go.
Complain and exit if there is anything wrong that might prevent a
likelihood evaluation from being done. We are assuming that a
data object exists and is attached, and that model stuff has been
set.
Check that each part has at least 1 each from comps, rMatrices,
and gdasrvs (if nGammaCat is > 1).
If it is not a mixture model for a particular part, check that
each node has a comp, rMatrix, and gdasr. Check that all comps,
rMatrices, gdasrvs are used on a node somewhere.
Here relRate, ie the relative rate of each data partition, is
adjusted based on the size of the data partitions.
newRelRate_p = oldRelRate_p * (Sum_p[oldRelRate_i * partLen_i] / Sum[partLen_i])
That ensures that Sum(relRate_i * partLen_i) = totalDataLength, ie
that the weighted mean of the rates is 1.0.
This method also tallies up the number of free prams in the whole
model, and sets self.model.nFreePrams.
"""
complaintHead = '\nTree.modelSanityCheck()'
gm = [complaintHead]
#print "\nTree.modelSanityCheck() here. self.model.nParts=%s" % self.model.nParts
#print "\nTree.modelSanityCheck() here. resetEmpiricalComps=%s" % resetEmpiricalComps
isBad = 0
complaints = []
if not self.data:
complaints.append(' No data.')
isBad = 1
if not self.model:
complaints.append(' No model.')
isBad = 1
# Set isHet.
for pNum in range(self.model.nParts):
mp = self.model.parts[pNum]
mp.isHet = 0
if mp.nComps > 1 or mp.nRMatrices > 1:
mp.isHet = 1
if mp.nGammaCat > 1 and mp.nGdasrvs > 1:
mp.isHet = 1
# Check that all parts have all the required stuff. Make a list
# of errors. If there is something missing or wrong, don't die
# right away, but add the problem to the list, and write it all
# out at the end. It gives the user a chance to fix more than one
# error at a time.
for pNum in range(self.model.nParts):
complaints.append(' Part %i' % pNum)
partIsBad = 0
mp = self.model.parts[pNum]
# Check if essential things have been set
if not mp.nComps:
complaints.append(' No comps in part %s' % pNum)
partIsBad = 1
if not mp.nRMatrices:
complaints.append(' No rMatrices in part %s' % pNum)
partIsBad = 1
if mp.nGammaCat > 1:
if not mp.nGdasrvs:
complaints.append(' No gdasrvs in part %s' % pNum)
partIsBad = 1
if mp.nGammaCat == 1:
if mp.nGdasrvs:
complaints.append(' There should be no gdasrvs in part %s, with nGammaCat=1' % pNum)
partIsBad = 1
if not mp.pInvar:
complaints.append(' No pInvar in part %s' % pNum)
partIsBad = 1
if partIsBad:
gm.append(" (Indices are zero-based.)")
gm += complaints
raise Glitch(gm)
# Check if comp values have been set.
for mt in mp.comps:
if mt.spec != 'empirical' or not resetEmpiricalComps:
if not mt.val:
complaints.append(' No composition val in part %s' % pNum)
partIsBad = 1
if len(mt.val) != mp.dim:
complaints.append(' Composition val is wrong length (%i), but dim is %i' % (
len(mt.val), mp.dim))
partIsBad = 1
# We don't want multiple rMatrices or free rMatrices if mp.dim is 2
if mp.dim == 2:
if mp.nRMatrices > 1:
complaints.append(' Part %s is dim 2, but we have more than one rMatrix' % pNum)
partIsBad = 1
mt = mp.rMatrices[0] # hopefully only one
if mt.free:
complaints.append(' Part %s is dim 2, but rMatrix 0 is free' % pNum)
partIsBad = 1
# If isMixture, then it may not have nGdasrvs
if mp.isMixture:
if mp.nGdasrvs:
complaints.append(' If it isMixture, then gdasrv may not be on.')
partIsBad = 1
# If isMixture, then it cannot be isHet
if mp.isMixture:
for n in self.nodes:
n.parts[pNum].compNum = -1
if n != self.root:
n.br.parts[pNum].rMatrixNum = -1
if mp.nGammaCat > 1:
n.br.parts[pNum].gdasrvNum = -1
if mp.isMixture:
mt = mp.mixture
if not mt.freqs or not mt.rates:
complaints.append(' This week, you must specify mixture freqs and rates.')
partIsBad = 1
print('mt.freqs = %s' % mt.freqs)
print('mt.rates = %s' % mt.rates)
if len(mt.freqs) != len(mt.rates):
complaints.append(' Lengths of mixture freqs and rates differ.')
partIsBad = 1
mp.nCat = mp.nComps * mp.nRMatrices
#if nCompsTimesNRMatrices == 1:
# complaints.append(' nComps * nRMatrices = 1, no point in having a mixture.')
# partIsBad = 1
if len(mt.freqs) != mp.nCat:
complaints.append(' Wrong length of mixture freqs and rates. Should be %i' % mp.nCat)
partIsBad = 1
#print "Freqs = %s" % mt.freqs
#print "Rates = %s" % mt.rates
theSum = sum(mt.freqs)
if theSum != 1.0:
for i in range(len(mt.freqs)):
mt.freqs[i] /= theSum
theSum = 0.0
for i in range(len(mt.freqs)):
theSum += mt.freqs[i] * mt.rates[i]
#print "Mixture mean = %f (un-normalized)" % theSum
if theSum != 1.0:
for i in range(len(mt.freqs)):
mt.rates[i] /= theSum
if 1:
theSum = 0
for i in range(len(mt.freqs)):
theSum += mt.freqs[i] * mt.rates[i]
if theSum < 1.0 - 1.0e-9 or theSum > 1.0 + 1.0e-9:
gm.append("Failed to normalize mixture rates. Sum = %19.17f" % theSum)
raise Glitch(gm)
#else:
# print "...successfully normalized mixture rates."
#print "Freqs = %s" % mt.freqs
#print "Rates = %s" % mt.rates
else: # not isMixture
mp.nCat = mp.nGammaCat
# If the model part isHet, we need to check that all nodes
# have something assigned, and that all model things are
# used. If the model part is not het, we can skip that,
# but we need to check that all the
# node.parts[pNum].compNum are 0, and all the
# node.br.parts[pNum].rMatrixNum and
# node.br.parts[pNum].gdasrvNum are set to 0.
if not mp.isHet:
#print "model part %i is not het" % pNum
for n in self.nodes:
#print "pNum = %i, n.nodeNum=%i, len n.parts = %i" % (pNum, n.nodeNum, len(n.parts))
n.parts[pNum].compNum = 0
if n != self.root:
n.br.parts[pNum].rMatrixNum = 0
if mp.nGammaCat > 1:
n.br.parts[pNum].gdasrvNum = 0
else: # isHet
# If there is only one comp, rMatrix, or gdasrv, then simply set it.
if mp.nComps == 1:
for n in self.nodes:
n.parts[pNum].compNum = 0
if mp.nRMatrices == 1:
for n in self.nodes:
if n != self.root:
n.br.parts[pNum].rMatrixNum = 0
if mp.nGammaCat > 1 and mp.nGdasrvs == 1:
for n in self.nodes:
if n != self.root:
n.br.parts[pNum].gdasrvNum = 0
#print "model part %i is het" % pNum
# New ad hoc attribute 'isUsed', to keep track of whether any node uses it.
for mt in mp.comps:
mt.isUsed = 0
for mt in mp.rMatrices:
mt.isUsed = 0
for mt in mp.gdasrvs:
mt.isUsed = 0
# Does every node have all required things?
for n in self.nodes:
mtNum = n.parts[pNum].compNum
if mtNum >= 0 and mtNum < mp.nComps:
mt = mp.comps[mtNum]
mt.isUsed = 1
else:
complaints.append(' Part %s, node %s has no comp.' % (pNum, n.nodeNum))
partIsBad = 1
if n != self.root:
mtNum = n.br.parts[pNum].rMatrixNum
if mtNum >= 0 and mtNum < mp.nRMatrices:
mt = mp.rMatrices[n.br.parts[pNum].rMatrixNum]
mt.isUsed = 1
else:
complaints.append(' Part %s, node %s has no rMatrix.' % (pNum, n.nodeNum))
partIsBad = 1
if mp.nGammaCat > 1:
mtNum = n.br.parts[pNum].gdasrvNum
if mtNum >= 0 and mtNum < mp.nGdasrvs:
mt = mp.gdasrvs[n.br.parts[pNum].gdasrvNum]
mt.isUsed = 1
else:
complaints.append(' Part %s, node %s has no gdasrv. nGammaCat=%s' % (
pNum, n.nodeNum, mp.nGammaCat))
partIsBad = 1
if mp.nGammaCat == 1:
if n.br.parts[pNum].gdasrvNum != -1:
complaints.append(' Part %s, node %s has a gdasrv, but nGammaCat is 1.' % (
pNum, n.nodeNum))
partIsBad = 1
# Is every model thing used?
if not mp.rjComp:
for mt in mp.comps:
if not mt.isUsed:
complaints.append(' Part %s, comp %s is not used.' % (pNum, mt.num))
partIsBad = 1
if not mp.rjRMatrix:
for mt in mp.rMatrices:
if not mt.isUsed:
complaints.append(' Part %s, rMatrix %s is not used.' % (pNum, mt.num))
partIsBad = 1
for mt in mp.gdasrvs:
if not mt.isUsed:
complaints.append(' Part %s, gdasrv %s is not used.' % (pNum, mt.num))
partIsBad = 1
# Clean up ad hoc attr 'isUsed'
for mt in mp.comps:
del(mt.isUsed)
for mt in mp.rMatrices:
del(mt.isUsed)
for mt in mp.gdasrvs:
del(mt.isUsed)
if partIsBad:
isBad = 1
else:
complaints.append(' ok')
# ##################################
if resetEmpiricalComps:
self.setEmpiricalComps()
# self.model.isHet if any part isHet
self.model.isHet = 0
for pNum in range(self.model.nParts):
if self.model.parts[pNum].isHet:
self.model.isHet = 1
break
# relativeRates
self.model.doRelRates = 0
if self.model.nParts > 1:
for p in self.model.parts:
if p.relRate != 1.0: # This week, the default relRate is 1.0
self.model.doRelRates = 1
break
if self.model.relRatesAreFree:
self.model.doRelRates = 1
if self.model.doRelRates:
totDataLen = 0
for p in self.data.parts:
totDataLen += p.nChar
fact = 0.0
for i in range(self.model.nParts):
fact += (self.model.parts[i].relRate * self.data.parts[i].nChar)
fact = float(totDataLen) / fact
for p in self.model.parts:
p.relRate *= fact
if 0:
print("RelativeRates (adjusted for length)")
for i in range(self.model.nParts):
p = self.model.parts[i]
print(" part %s, nChar %5s, relRate %s" % (p.num, self.data.parts[i].nChar, p.relRate))
if 1:
total = 0.0
for i in range(self.model.nParts):
total += (self.model.parts[i].relRate * (float(self.data.parts[i].nChar) / float(totDataLen)))
if abs(total - 1.0) > 1.0e-12:
gm.append('Error in relativeRate calculation (total=%s).' % total)
raise Glitch(gm)
#print "modelSanityCheck. relRatesAreFree=%s, doRelRates=%s" % (self.model.relRatesAreFree, self.model.doRelRates)
# tSCovarion
for p in self.model.parts:
if p.tSCovarion:
if p.nComps > 1 or p.nRMatrices > 1 or p.nGammaCat > 1:
gm.append("When tSCovarion is on, there should be no heterogeneity in comps, rMatrices, and gdasrv.")
raise Glitch(gm)
if p.pInvar.val > 0.0 or p.pInvar.free:
gm.append("When tSCovarion is on, you can't use pInvar. Turn it off.")
raise Glitch(gm)
# model.nFreePrams
self.model.nFreePrams = 0
for mp in self.model.parts:
for mt in mp.comps:
if mt.free:
self.model.nFreePrams += mp.dim - 1
for mt in mp.rMatrices:
if mt.free:
if mt.spec == '2p':
self.model.nFreePrams += 1
else:
self.model.nFreePrams += (((mp.dim * mp.dim) - mp.dim) / 2) - 1
for mt in mp.gdasrvs:
if mt.free:
self.model.nFreePrams += 1
if mp.pInvar.free:
self.model.nFreePrams += 1
if mp.doTSCovarion and mp.tSCovarion.free:
self.model.nFreePrams += 2
if mp.isMixture and mp.mixture.free:
self.model.nFreePrams += 2 * (len(mp.mixture.freqs) - 1)
#print "Tree.modelSanityCheck(). Counted %i free params." % self.model.nFreePrams
if self.model.doRelRates and self.model.relRatesAreFree:
self.model.nFreePrams += self.model.nParts - 1
if isBad:
gm.append("(Indices are zero-based.)")
gm += complaints
raise Glitch(gm)
def setEmpiricalComps(self):
"""Set any empirical model comps to the comp of the data.
This is done by self.modelSanityCheck(), but sometimes you may
want to do it at other times. For example, do this after
exchanging Data objects, or after simulating. In those cases
there does not seem to be a reasonable way to do it
automatically."""
complaintHead = '\nTree.setEmpiricalComps()'
gm = [complaintHead]
if not self.model:
gm.append("This tree has no model.")
raise Glitch(gm)
if not self.data:
gm.append("This tree has no data.")
raise Glitch(gm)
for mp in self.model.parts:
for c in mp.comps:
if c.spec == 'empirical':
#print "got empirical comp, comp %s in part %s. (nComps=%i, isHet=%s)" % (
# c.num, mp.num, mp.nComps, mp.isHet)
if not mp.isHet:
seqNums = None
elif mp.nComps == 1:
seqNums = None
else:
seqNums = []
#for n in self.nodes:
# print "node %2i seqNum=%3i n.parts[%i].compNum=%3i" % (
# n.nodeNum, n.seqNum, mp.num, n.parts[mp.num].compNum)
for n in self.nodes:
if n.parts[mp.num].compNum == c.num: # Is the comp used by the node?
#print "comp %s is used by node %s" % (c.num, n.nodeNum)
if n.isLeaf:
nodeNums = [n.nodeNum]
else:
nodeNums = self.getNodeNumsAbove(n, leavesOnly=1)
#gm.append("nodeNums for %s = %s" % (n.nodeNum, nodeNums)
for i in nodeNums:
seqNum = self.nodes[i].seqNum
if seqNum not in seqNums:
seqNums.append(seqNum)
#print "setEmpiricalComps() got seqNums = %s" % seqNums
if not seqNums:
gm.append("Something is wrong here. part %i, comp %i." % (mp.num, c.num))
gm.append("This comp object has no sequences from which to get the empirical comp.")
gm.append("Maybe you need to yourTree.setModelThing() or ")
gm.append("yourTree.setModelThingsRandomly()")
gm.append("Or maybe its an extra comp in an RJ MCMC? -- If so, fix")
gm.append("the comp val to eg 'equal'.")
raise Glitch(gm)
c.val = self.data.parts[mp.num].composition(seqNums) # dim long, not dim - 1
#print " seqNums=%s, c.val=%s" % (seqNums, c.val)
needsNormalizing = 0
theSum = 0.0
for i in range(len(c.val)):
if c.val[i] < var.PIVEC_MIN:
c.val[i] = var.PIVEC_MIN + (0.2 * var.PIVEC_MIN) + (var.PIVEC_MIN * random.random())
needsNormalizing = 1
theSum += c.val[i]
#print "setEmpiricalComps(). Got theSum = %i" % theSum
# We may have asked for the comp of an empty sequence,
# in which case val is all zeros. Check for that.
if abs(1.0 - theSum) > 0.1:
gm.append("Something is very wrong here. Empirical comp vals should sum to 1.0")
gm.append("The sum of the comp vals for part %s, comp %s, is %s" % (mp.num, c.num, theSum))
gm.append("Probably the sequences from which the composition was taken were blank.")
raise Glitch(gm)
if needsNormalizing or abs(theSum - 1.0) > 1e-16:
for i in range(len(c.val)):
c.val[i] /= theSum
class RMatrix(object):
def __init__(self):
self.num = -1
self.partNum = None
self.free=None
self.spec=None
self.symbol=None
self.val=None
self.nNodes = 0
self.rj_isInPool = False
self.rj_f = 0.0
class Gdasrv(object):
def __init__(self):
self.num = -1
self.partNum = None
self.free = None
self.symbol = None
#self.val=None
self._val = numpy.zeros(1, numpy.float)
self.freqs = None
self.rates = None
self.nGammaCat = None
self.c = None # a p4_gdasrvStruct, if it exists.
self.nNodes = 0
def _setVal(self, theVal):
if theVal < 1.e-16:
gm = ["Gdasrv._setVal()"]
gm.append("Attempt to set Gdasrv.val (ie alpha) to %g" % theVal)
gm.append("However, we cannot calculate the discrete categories with a value so low.")
raise Glitch(gm)
self._val[0] = theVal
self.calcRates()
val = property(lambda self: self._val, _setVal)
def calcRates(self):
# Use either the p4_gdasrvStruct, or just use the NumPy
# array vals (np = NumPy).
#print "self.c = %s" % self.c
if self.c:
pf.gdasrvCalcRates(self.c)
else:
pf.gdasrvCalcRates_np(self.nGammaCat, self._val[0], self.freqs, self.rates)
#print 'xxx self.rates = %s, val=%s' % (self.rates, self._val[0])
class Comp(object):
def __init__(self):
self.num = -1
self.partNum = None
self.free=None
self.spec=None
self.symbol=None
self.val=None
self.nNodes = 0
self.rj_isInPool = False
self.rj_f = 0.0
class PInvar(object):
def __init__(self):
self.num = -1
self.partNum = None
self.free=None
self.val=None
class TSCovarion(object):
def __init__(self):
self.partNum = None
self.free = None
self.s1 = None
self.s2 = None
class Mixture(object):
def __init__(self):
self.partNum = None
self.free = None
self.freqs = None
self.rates = None
self.nMix = None
| gpl-2.0 |
charbeljc/OCB | addons/point_of_sale/wizard/pos_details.py | 225 | 2386 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv, fields
class pos_details(osv.osv_memory):
_name = 'pos.details'
_description = 'Sales Details'
_columns = {
'date_start': fields.date('Date Start', required=True),
'date_end': fields.date('Date End', required=True),
'user_ids': fields.many2many('res.users', 'pos_details_report_user_rel', 'user_id', 'wizard_id', 'Salespeople'),
}
_defaults = {
'date_start': fields.date.context_today,
'date_end': fields.date.context_today,
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['date_start', 'date_end', 'user_ids'], context=context)
res = res and res[0] or {}
datas['form'] = res
if res.get('id',False):
datas['ids']=[res['id']]
return self.pool['report'].get_action(cr, uid, [], 'point_of_sale.report_detailsofsales', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
iModels/metamds | setup.py | 1 | 1916 | """metaMDS """
from __future__ import print_function
import os
import sys
from setuptools import setup, find_packages
#####################################
VERSION = "0.1.0"
ISRELEASED = False
if ISRELEASED:
__version__ = VERSION
else:
__version__ = VERSION + '.dev0'
#####################################
with open('metamds/version.py', 'w') as version_file:
version_file.write('version="{0}"\n'.format(__version__))
with open('__conda_version__.txt', 'w') as conda_version:
conda_version.write(__version__)
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
with open('requirements.txt') as reqs_file:
reqs = [line.strip() for line in reqs_file]
setup(
name='metamds',
version=__version__,
description=__doc__.split('\n')[0],
long_description=__doc__,
author='Janos Sallai, Christoph Klein',
author_email='janos.sallai@vanderbilt.edu, christoph.klein@vanderbilt.edu',
url='https://github.com/imodels/metamds',
download_url='https://github.com/imodels/metamds/tarball/{}'.format(__version__),
packages=find_packages(),
package_dir={'metamds': 'metamds'},
include_package_data=True,
install_requires=reqs,
license="MIT",
zip_safe=False,
keywords='metamds',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License,'
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Chemistry',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
test_suite='tests',
)
| mit |
rsivapr/scikit-learn | sklearn/metrics/pairwise.py | 2 | 37180 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils import safe_asarray
from ..utils.extmath import safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = atleast2d_or_csr(X)
else:
X = atleast2d_or_csr(X)
Y = atleast2d_or_csr(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if not (X.dtype == Y.dtype == np.float32):
if Y is X:
X = Y = safe_asarray(X, dtype=np.float)
else:
X = safe_asarray(X, dtype=np.float)
Y = safe_asarray(Y, dtype=np.float)
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype='int32')
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
dist_chunk = np.dot(X_chunk, Y_chunk.T)
dist_chunk *= -2
dist_chunk += (X_chunk * X_chunk
).sum(axis=1)[:, np.newaxis]
dist_chunk += (Y_chunk * Y_chunk
).sum(axis=1)[np.newaxis, :]
np.maximum(dist_chunk, 0, dist_chunk)
else:
dist_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
dist_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = dist_chunk.argmin(axis=1)
min_values = dist_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x] = np.where(
flags, min_indices + chunk_y.start, indices[chunk_x])
values[chunk_x] = np.where(
flags, min_values, values[chunk_x])
if metric == "euclidean" and not metric_kwargs.get("squared", False):
values = np.sqrt(values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
size_threshold : int, default=5e8
Avoid creating temporary matrices bigger than size_threshold (in
bytes). If the problem size gets too big, the implementation then
breaks it down in smaller problems.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if issparse(X) or issparse(Y):
raise ValueError("manhattan_distance does not support sparse"
" matrices.")
X, Y = check_pairwise_arrays(X, Y)
temporary_size = X.size * Y.shape[-1]
# Convert to bytes
temporary_size *= X.itemsize
if temporary_size > size_threshold and sum_over_features:
# Broadcasting the full thing would be too big: it's on the order
# of magnitude of the gigabyte
D = np.empty((X.shape[0], Y.shape[0]), dtype=X.dtype)
index = 0
increment = 1 + int(size_threshold / float(temporary_size) *
X.shape[0])
while index < X.shape[0]:
this_slice = slice(index, index + increment)
tmp = X[this_slice, np.newaxis, :] - Y[np.newaxis, :, :]
tmp = np.abs(tmp, tmp)
tmp = np.sum(tmp, axis=2)
D[this_slice] = tmp
index += increment
else:
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((-1, X.shape[1]))
return D
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to
'euclidean', 'l2' and 'cosine'.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
| bsd-3-clause |
strogo/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/gfortran.py | 61 | 2302 | """SCons.Tool.gfortran
Tool-specific initialization for gfortran, the GNU Fortran 95/Fortran
2003 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gfortran.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
import fortran
def generate(env):
"""Add Builders and construction variables for gfortran to an
Environment."""
fortran.generate(env)
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
env['%s' % dialect] = 'gfortran'
env['SH%s' % dialect] = '$%s' % dialect
if env['PLATFORM'] in ['cygwin', 'win32']:
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS' % dialect)
else:
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
env['INC%sPREFIX' % dialect] = "-I"
env['INC%sSUFFIX' % dialect] = ""
def exists(env):
return env.Detect('gfortran')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
sergiohgz/incubator-airflow | tests/contrib/sensors/test_ftp_sensor.py | 15 | 2332 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from ftplib import error_perm
from mock import MagicMock
from airflow.contrib.hooks.ftp_hook import FTPHook
from airflow.contrib.sensors.ftp_sensor import FTPSensor
class TestFTPSensor(unittest.TestCase):
def setUp(self):
super(TestFTPSensor, self).setUp()
self._create_hook_orig = FTPSensor._create_hook
self.hook_mock = MagicMock(spec=FTPHook)
def _create_hook_mock(sensor):
mock = MagicMock()
mock.__enter__ = lambda x: self.hook_mock
return mock
FTPSensor._create_hook = _create_hook_mock
def tearDown(self):
FTPSensor._create_hook = self._create_hook_orig
super(TestFTPSensor, self).tearDown()
def test_poke(self):
op = FTPSensor(path="foobar.json", ftp_conn_id="bob_ftp",
task_id="test_task")
self.hook_mock.get_mod_time.side_effect = \
[error_perm("550: Can't check for file existence"), None]
self.assertFalse(op.poke(None))
self.assertTrue(op.poke(None))
def test_poke_fails_due_error(self):
op = FTPSensor(path="foobar.json", ftp_conn_id="bob_ftp",
task_id="test_task")
self.hook_mock.get_mod_time.side_effect = \
error_perm("530: Login authentication failed")
with self.assertRaises(error_perm) as context:
op.execute(None)
self.assertTrue("530" in str(context.exception))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kaixinjxq/crosswalk-test-suite | apptools/apptools-windows-tests/apptools/init_manifest.py | 30 | 2793 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import comm
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_init_manifest_defaultPlatforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-app manifest " + \
comm.XwalkPath + "org.xwalk.test"
os.system(cmd)
with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file:
data = json.load(json_file)
comm.clear("org.xwalk.test")
self.assertEquals(data['xwalk_target_platforms'][0].strip(os.linesep), "android")
def test_init_manifest_invalidPlatforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-app manifest " + \
comm.XwalkPath + "org.xwalk.test --platform=invalid"
return_code = os.system(cmd)
comm.clear("org.xwalk.test")
self.assertNotEquals(return_code, 0)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
vnsofthe/odoo-dev | addons/web_d3_chart/base.py | 1 | 5165 | # -*- coding: utf-8 -*-
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.addons.base.ir.ir_actions import VIEW_TYPES
from logging import getLogger
from lxml import etree
_logger = getLogger(__name__)
VIEW_TYPE = ('chart-d3', _('Chart D3'))
VIEW_TYPES.append(VIEW_TYPE)
class iruiview(osv.Model):
_name = "ir.ui.view"
_inherit = ['ir.ui.view']
_columns={}
def __init__(self, pool,cr):
res = super(iruiview, self).__init__(pool,cr)
return res
#_logger.info(type(self._columns))
select = [k for k, v in self._columns['type'].selection]
if VIEW_TYPE[0] not in select:
self._columns['type'].selection.append(VIEW_TYPE)
return res
def valid_type_chart_d3_field_exist(self, cr, uid, model, field,
context=None):
domain = [
('model', '=', model),
('name', '=', field),
]
if not self.pool.get('ir.model.fields').search(cr, uid, domain,
context=context):
return False
return True
def valid_type_chart_d3_options(self, cr, uid, arch, context=None):
res = True
# TODO
return res
def valid_type_chart_d3_x_axis(self, cr, uid, arch, model, context=None):
axis = 'x-axis'
res = True
_axis = arch.xpath(axis)
if not _axis:
res = False
_logger.error("The %r node must have %r node" % (
VIEW_TYPE[0], axis))
elif len(_axis) > 1:
res = False
_logger.error(
"the %r node must only have 1 %r node" % (VIEW_TYPE[0], axis))
else:
field = _axis[0].attrib.get('field')
if not self.valid_type_chart_d3_field_exist(cr, uid, model, field,
context=context):
res = False
_logger.error(
"the field %r in the %r node doesn't exist" %
(_axis[0].text, axis))
return res
def valid_type_chart_d3_y_axis(self, cr, uid, arch, model, context=None):
axis = 'y-axis'
res = True
_axis = arch.xpath(axis)
if not _axis:
res = False
_logger.error("The %r node must have %r node" % (
VIEW_TYPE[0], axis))
elif len(_axis) > 1:
res = False
_logger.error(
"the %r node must only have 1 %r node" % (VIEW_TYPE[0], axis))
else:
fields = _axis[0].getchildren()
if not fields:
res = False
_logger.error("the %r.%r node must have %r nodes" % (
VIEW_TYPE[0], axis, 'field'))
for field in fields:
fname = field.attrib.get('name')
if not self.valid_type_chart_d3_field_exist(cr, uid, model,
fname,
context=context):
res = False
_logger.error(
"the field %r in the %r.%r node doesn't exist" %
(fname, axis, 'field'))
return res
def valid_type_chart_d3(self, cr, uid, arch, model, context=None):
res = True
if arch.tag == VIEW_TYPE[0] and not arch.attrib.get('type'):
res = False
_logger.error(
"The %r node must have 'type' attribute" % VIEW_TYPE[0])
if not self.valid_type_chart_d3_x_axis(cr, uid, arch, model,
context=context):
res = False
if not self.valid_type_chart_d3_y_axis(cr, uid, arch, model,
context=context):
res = False
if not self.valid_type_chart_d3_options(cr, uid, arch, context=context):
res = False
return res
def _check_xml_chart_d3(self, cr, uid, ids, context=None):
domain = [
('id', 'in', ids),
('type', '=', VIEW_TYPE[0]),
]
view_ids = self.search(cr, uid, domain, context=context)
for view in self.browse(cr, uid, view_ids, context=context):
fvg = self.pool.get(view.model).fields_view_get(
cr, uid, view_id=view.id, view_type=view.type, context=context)
view_arch_utf8 = fvg['arch']
view_docs = [etree.fromstring(view_arch_utf8)]
if view_docs[0].tag == 'data':
view_docs = view_docs[0]
for view_arch in view_docs:
if not self.valid_type_chart_d3(cr, uid, view_arch, view.model,
context=context):
return False
return True
_constraints = [
(
_check_xml_chart_d3,
'Invalide XML for chart D3 view architecture',
['arch'],
),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
timpalpant/calibre | src/calibre/library/catalogs/epub_mobi.py | 14 | 25308 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import datetime, os, time
from collections import namedtuple
from calibre import strftime
from calibre.customize import CatalogPlugin
from calibre.customize.conversion import OptionRecommendation, DummyReporter
from calibre.library import current_library_name
from calibre.library.catalogs import AuthorSortMismatchException, EmptyCatalogException
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.localization import calibre_langcode_to_name, canonicalize_lang, get_lang
Option = namedtuple('Option', 'option, default, dest, action, help')
class EPUB_MOBI(CatalogPlugin):
'ePub catalog generator'
name = 'Catalog_EPUB_MOBI'
description = 'AZW3/EPUB/MOBI catalog generator'
supported_platforms = ['windows', 'osx', 'linux']
minimum_calibre_version = (0, 7, 40)
author = 'Greg Riker'
version = (1, 0, 0)
file_types = set(['azw3', 'epub', 'mobi'])
THUMB_SMALLEST = "1.0"
THUMB_LARGEST = "2.0"
cli_options = [Option('--catalog-title', # {{{
default='My Books',
dest='catalog_title',
action=None,
help=_('Title of generated catalog used as title in metadata.\n'
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--cross-reference-authors',
default=False,
dest='cross_reference_authors',
action='store_true',
help=_("Create cross-references in Authors section for books with multiple authors.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--debug-pipeline',
default=None,
dest='debug_pipeline',
action=None,
help=_("Save the output from different stages of the conversion "
"pipeline to the specified "
"directory. Useful if you are unsure at which stage "
"of the conversion process a bug is occurring.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--exclude-genre',
default='\[.+\]|^\+$',
dest='exclude_genre',
action=None,
help=_("Regex describing tags to exclude as genres.\n"
"Default: '%default' excludes bracketed tags, e.g. '[Project Gutenberg]', and '+', the default tag for read books.\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--exclusion-rules',
default="(('Catalogs','Tags','Catalog'),)",
dest='exclusion_rules',
action=None,
help=_("Specifies the rules used to exclude books from the generated catalog.\n"
"The model for an exclusion rule is either\n('<rule name>','Tags','<comma-separated list of tags>') or\n"
"('<rule name>','<custom column>','<pattern>').\n"
"For example:\n"
"(('Archived books','#status','Archived'),)\n"
"will exclude a book with a value of 'Archived' in the custom column 'status'.\n"
"When multiple rules are defined, all rules will be applied.\n"
"Default: \n" + '"' + '%default' + '"' + "\n"
"Applies to AZW3, ePub, MOBI output formats")),
Option('--generate-authors',
default=False,
dest='generate_authors',
action='store_true',
help=_("Include 'Authors' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-descriptions',
default=False,
dest='generate_descriptions',
action='store_true',
help=_("Include 'Descriptions' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-genres',
default=False,
dest='generate_genres',
action='store_true',
help=_("Include 'Genres' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-titles',
default=False,
dest='generate_titles',
action='store_true',
help=_("Include 'Titles' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-series',
default=False,
dest='generate_series',
action='store_true',
help=_("Include 'Series' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--generate-recently-added',
default=False,
dest='generate_recently_added',
action='store_true',
help=_("Include 'Recently Added' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--genre-source-field',
default=_('Tags'),
dest='genre_source_field',
action=None,
help=_("Source field for Genres section.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--header-note-source-field',
default='',
dest='header_note_source_field',
action=None,
help=_("Custom field containing note text to insert in Description header.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--merge-comments-rule',
default='::',
dest='merge_comments_rule',
action=None,
help=_("#<custom field>:[before|after]:[True|False] specifying:\n"
" <custom field> Custom field containing notes to merge with Comments\n"
" [before|after] Placement of notes with respect to Comments\n"
" [True|False] - A horizontal rule is inserted between notes and Comments\n"
"Default: '%default'\n"
"Applies to AZW3, ePub, MOBI output formats")),
Option('--output-profile',
default=None,
dest='output_profile',
action=None,
help=_("Specifies the output profile. In some cases, an output profile is required to optimize the catalog for the device. For example, 'kindle' or 'kindle_dx' creates a structured Table of Contents with Sections and Articles.\n" # noqa
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--prefix-rules',
default="(('Read books','tags','+','\u2713'),('Wishlist item','tags','Wishlist','\u00d7'))",
dest='prefix_rules',
action=None,
help=_("Specifies the rules used to include prefixes indicating read books, wishlist items and other user-specified prefixes.\n"
"The model for a prefix rule is ('<rule name>','<source field>','<pattern>','<prefix>').\n"
"When multiple rules are defined, the first matching rule will be used.\n"
"Default:\n" + '"' + '%default' + '"' + "\n"
"Applies to AZW3, ePub, MOBI output formats")),
Option('--preset',
default=None,
dest='preset',
action=None,
help=_("Use a named preset created with the GUI Catalog builder.\n"
"A preset specifies all settings for building a catalog.\n"
"Default: '%default'\n"
"Applies to AZW3, ePub, MOBI output formats")),
Option('--use-existing-cover',
default=False,
dest='use_existing_cover',
action='store_true',
help=_("Replace existing cover when generating the catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, ePub, MOBI output formats")),
Option('--thumb-width',
default='1.0',
dest='thumb_width',
action=None,
help=_("Size hint (in inches) for book covers in catalog.\n"
"Range: 1.0 - 2.0\n"
"Default: '%default'\n"
"Applies to AZW3, ePub, MOBI output formats")),
]
# }}}
def run(self, path_to_output, opts, db, notification=DummyReporter()):
from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder
from calibre.utils.logging import default_log as log
from calibre.utils.config import JSONConfig
# If preset specified from the cli, insert stored options from JSON file
if hasattr(opts, 'preset') and opts.preset:
available_presets = JSONConfig("catalog_presets")
if not opts.preset in available_presets:
if available_presets:
print(_('Error: Preset "%s" not found.' % opts.preset))
print(_('Stored presets: %s' % ', '.join([p for p in sorted(available_presets.keys())])))
else:
print(_('Error: No stored presets.'))
return 1
# Copy the relevant preset values to the opts object
for item in available_presets[opts.preset]:
if not item in ['exclusion_rules_tw', 'format', 'prefix_rules_tw']:
setattr(opts, item, available_presets[opts.preset][item])
# Provide an unconnected device
opts.connected_device = {
'is_device_connected': False,
'kind': None,
'name': None,
'save_template': None,
'serial': None,
'storage': None,
}
# Convert prefix_rules and exclusion_rules from JSON lists to tuples
prs = []
for rule in opts.prefix_rules:
prs.append(tuple(rule))
opts.prefix_rules = tuple(prs)
ers = []
for rule in opts.exclusion_rules:
ers.append(tuple(rule))
opts.exclusion_rules = tuple(ers)
opts.log = log
opts.fmt = self.fmt = path_to_output.rpartition('.')[2]
# Add local options
opts.creator = '%s, %s %s, %s' % (strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y'))
opts.creator_sort_as = '%s %s' % ('calibre', strftime('%Y-%m-%d'))
opts.connected_kindle = False
# Finalize output_profile
op = opts.output_profile
if op is None:
op = 'default'
if opts.connected_device['name'] and 'kindle' in opts.connected_device['name'].lower():
opts.connected_kindle = True
if opts.connected_device['serial'] and \
opts.connected_device['serial'][:4] in ['B004', 'B005']:
op = "kindle_dx"
else:
op = "kindle"
opts.description_clip = 380 if op.endswith('dx') or 'kindle' not in op else 100
opts.author_clip = 100 if op.endswith('dx') or 'kindle' not in op else 60
opts.output_profile = op
opts.basename = "Catalog"
opts.cli_environment = not hasattr(opts, 'sync')
# Hard-wired to always sort descriptions by author, with series after non-series
opts.sort_descriptions_by_author = True
build_log = []
build_log.append(u"%s('%s'): Generating %s %sin %s environment, locale: '%s'" %
(self.name,
current_library_name(),
self.fmt,
'for %s ' % opts.output_profile if opts.output_profile else '',
'CLI' if opts.cli_environment else 'GUI',
calibre_langcode_to_name(canonicalize_lang(get_lang()), localize=False))
)
# If exclude_genre is blank, assume user wants all tags as genres
if opts.exclude_genre.strip() == '':
#opts.exclude_genre = '\[^.\]'
#build_log.append(" converting empty exclude_genre to '\[^.\]'")
opts.exclude_genre = 'a^'
build_log.append(" converting empty exclude_genre to 'a^'")
if opts.connected_device['is_device_connected'] and \
opts.connected_device['kind'] == 'device':
if opts.connected_device['serial']:
build_log.append(u" connected_device: '%s' #%s%s " %
(opts.connected_device['name'],
opts.connected_device['serial'][0:4],
'x' * (len(opts.connected_device['serial']) - 4)))
for storage in opts.connected_device['storage']:
if storage:
build_log.append(u" mount point: %s" % storage)
else:
build_log.append(u" connected_device: '%s'" % opts.connected_device['name'])
try:
for storage in opts.connected_device['storage']:
if storage:
build_log.append(u" mount point: %s" % storage)
except:
build_log.append(u" (no mount points)")
else:
build_log.append(u" connected_device: '%s'" % opts.connected_device['name'])
opts_dict = vars(opts)
if opts_dict['ids']:
build_log.append(" book count: %d" % len(opts_dict['ids']))
sections_list = []
if opts.generate_authors:
sections_list.append('Authors')
if opts.generate_titles:
sections_list.append('Titles')
if opts.generate_series:
sections_list.append('Series')
if opts.generate_genres:
sections_list.append('Genres')
if opts.generate_recently_added:
sections_list.append('Recently Added')
if opts.generate_descriptions:
sections_list.append('Descriptions')
if not sections_list:
if opts.cli_environment:
opts.log.warn('*** No Section switches specified, enabling all Sections ***')
opts.generate_authors = True
opts.generate_titles = True
opts.generate_series = True
opts.generate_genres = True
opts.generate_recently_added = True
opts.generate_descriptions = True
sections_list = ['Authors', 'Titles', 'Series', 'Genres', 'Recently Added', 'Descriptions']
else:
opts.log.warn('\n*** No enabled Sections, terminating catalog generation ***')
return ["No Included Sections", "No enabled Sections.\nCheck E-book options tab\n'Included sections'\n"]
if opts.fmt == 'mobi' and sections_list == ['Descriptions']:
warning = _("\n*** Adding 'By Authors' Section required for MOBI output ***")
opts.log.warn(warning)
sections_list.insert(0, 'Authors')
opts.generate_authors = True
opts.log(u" Sections: %s" % ', '.join(sections_list))
opts.section_list = sections_list
# Limit thumb_width to 1.0" - 2.0"
try:
if float(opts.thumb_width) < float(self.THUMB_SMALLEST):
log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST))
opts.thumb_width = self.THUMB_SMALLEST
if float(opts.thumb_width) > float(self.THUMB_LARGEST):
log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_LARGEST))
opts.thumb_width = self.THUMB_LARGEST
opts.thumb_width = "%.2f" % float(opts.thumb_width)
except:
log.error("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST))
opts.thumb_width = "1.0"
# eval prefix_rules if passed from command line
if type(opts.prefix_rules) is not tuple:
try:
opts.prefix_rules = eval(opts.prefix_rules)
except:
log.error("malformed --prefix-rules: %s" % opts.prefix_rules)
raise
for rule in opts.prefix_rules:
if len(rule) != 4:
log.error("incorrect number of args for --prefix-rules: %s" % repr(rule))
# eval exclusion_rules if passed from command line
if type(opts.exclusion_rules) is not tuple:
try:
opts.exclusion_rules = eval(opts.exclusion_rules)
except:
log.error("malformed --exclusion-rules: %s" % opts.exclusion_rules)
raise
for rule in opts.exclusion_rules:
if len(rule) != 3:
log.error("incorrect number of args for --exclusion-rules: %s" % repr(rule))
# Display opts
keys = sorted(opts_dict.keys())
build_log.append(" opts:")
for key in keys:
if key in ['catalog_title', 'author_clip', 'connected_kindle', 'creator',
'cross_reference_authors', 'description_clip', 'exclude_book_marker',
'exclude_genre', 'exclude_tags', 'exclusion_rules', 'fmt',
'genre_source_field', 'header_note_source_field', 'merge_comments_rule',
'output_profile', 'prefix_rules', 'preset', 'read_book_marker',
'search_text', 'sort_by', 'sort_descriptions_by_author', 'sync',
'thumb_width', 'use_existing_cover', 'wishlist_tag']:
build_log.append(" %s: %s" % (key, repr(opts_dict[key])))
if opts.verbose:
log('\n'.join(line for line in build_log))
# Capture start_time
opts.start_time = time.time()
self.opts = opts
if opts.verbose:
log.info(" Begin catalog source generation (%s)" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
# Launch the Catalog builder
catalog = CatalogBuilder(db, opts, self, report_progress=notification)
try:
catalog.build_sources()
if opts.verbose:
log.info(" Completed catalog source generation (%s)\n" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
except (AuthorSortMismatchException, EmptyCatalogException), e:
log.error(" *** Terminated catalog generation: %s ***" % e)
except:
log.error(" unhandled exception in catalog generator")
raise
else:
recommendations = []
recommendations.append(('remove_fake_margins', False,
OptionRecommendation.HIGH))
recommendations.append(('comments', '', OptionRecommendation.HIGH))
"""
>>> Use to debug generated catalog code before pipeline conversion <<<
"""
GENERATE_DEBUG_EPUB = False
if GENERATE_DEBUG_EPUB:
catalog_debug_path = os.path.join(os.path.expanduser('~'), 'Desktop', 'Catalog debug')
setattr(opts, 'debug_pipeline', os.path.expanduser(catalog_debug_path))
dp = getattr(opts, 'debug_pipeline', None)
if dp is not None:
recommendations.append(('debug_pipeline', dp,
OptionRecommendation.HIGH))
if opts.output_profile and opts.output_profile.startswith("kindle"):
recommendations.append(('output_profile', opts.output_profile,
OptionRecommendation.HIGH))
recommendations.append(('book_producer', opts.output_profile,
OptionRecommendation.HIGH))
if opts.fmt == 'mobi':
recommendations.append(('no_inline_toc', True,
OptionRecommendation.HIGH))
recommendations.append(('verbose', 2,
OptionRecommendation.HIGH))
# Use existing cover or generate new cover
cpath = None
existing_cover = False
try:
search_text = 'title:"%s" author:%s' % (
opts.catalog_title.replace('"', '\\"'), 'calibre')
matches = db.search(search_text, return_matches=True, sort_results=False)
if matches:
cpath = db.cover(matches[0], index_is_id=True, as_path=True)
if cpath and os.path.exists(cpath):
existing_cover = True
except:
pass
if self.opts.use_existing_cover and not existing_cover:
log.warning("no existing catalog cover found")
if self.opts.use_existing_cover and existing_cover:
recommendations.append(('cover', cpath, OptionRecommendation.HIGH))
log.info("using existing catalog cover")
else:
from calibre.ebooks.covers import calibre_cover2
log.info("replacing catalog cover")
new_cover_path = PersistentTemporaryFile(suffix='.jpg')
new_cover = calibre_cover2(opts.catalog_title, 'calibre')
new_cover_path.write(new_cover)
new_cover_path.close()
recommendations.append(('cover', new_cover_path.name, OptionRecommendation.HIGH))
# Run ebook-convert
from calibre.ebooks.conversion.plumber import Plumber
plumber = Plumber(os.path.join(catalog.catalog_path, opts.basename + '.opf'),
path_to_output, log, report_progress=notification,
abort_after_input_dump=False)
plumber.merge_ui_recommendations(recommendations)
plumber.run()
try:
os.remove(cpath)
except:
pass
if GENERATE_DEBUG_EPUB:
from calibre.ebooks.epub import initialize_container
from calibre.ebooks.tweak import zip_rebuilder
from calibre.utils.zipfile import ZipFile
input_path = os.path.join(catalog_debug_path, 'input')
epub_shell = os.path.join(catalog_debug_path, 'epub_shell.zip')
initialize_container(epub_shell, opf_name='content.opf')
with ZipFile(epub_shell, 'r') as zf:
zf.extractall(path=input_path)
os.remove(epub_shell)
zip_rebuilder(input_path, os.path.join(catalog_debug_path, 'input.epub'))
if opts.verbose:
log.info(" Catalog creation complete (%s)\n" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
# returns to gui2.actions.catalog:catalog_generated()
return catalog.error
| gpl-3.0 |
ForensicArtifacts/artifacts | artifacts/registry.py | 4 | 7303 | # -*- coding: utf-8 -*-
"""The artifact definitions registry."""
from __future__ import unicode_literals
from artifacts import definitions
from artifacts import errors
from artifacts import source_type
class ArtifactDefinitionsRegistry(object):
"""Artifact definitions registry."""
_source_type_classes = {
definitions.TYPE_INDICATOR_ARTIFACT_GROUP:
source_type.ArtifactGroupSourceType,
definitions.TYPE_INDICATOR_COMMAND: source_type.CommandSourceType,
definitions.TYPE_INDICATOR_DIRECTORY: source_type.DirectorySourceType,
definitions.TYPE_INDICATOR_FILE: source_type.FileSourceType,
definitions.TYPE_INDICATOR_PATH: source_type.PathSourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY:
source_type.WindowsRegistryKeySourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE:
source_type.WindowsRegistryValueSourceType,
definitions.TYPE_INDICATOR_WMI_QUERY: source_type.WMIQuerySourceType,
}
def __init__(self):
"""Initializes an artifact definitions registry."""
super(ArtifactDefinitionsRegistry, self).__init__()
self._artifact_definitions = {}
self._artifact_name_references = set()
self._defined_artifact_names = set()
@classmethod
def CreateSourceType(cls, type_indicator, attributes):
"""Creates a source type object.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
"""
if type_indicator not in cls._source_type_classes:
raise errors.FormatError(
'Unsupported type indicator: {0:s}.'.format(type_indicator))
return cls._source_type_classes[type_indicator](**attributes)
def DeregisterDefinition(self, artifact_definition):
"""Deregisters an artifact definition.
Artifact definitions are identified based on their lower case name.
Args:
artifact_definition (ArtifactDefinition): an artifact definition.
Raises:
KeyError: if an artifact definition is not set for the corresponding name.
"""
artifact_definition_name = artifact_definition.name.lower()
if artifact_definition_name not in self._artifact_definitions:
raise KeyError(
'Artifact definition not set for name: {0:s}.'.format(
artifact_definition.name))
del self._artifact_definitions[artifact_definition_name]
@classmethod
def DeregisterSourceType(cls, source_type_class):
"""Deregisters a source type.
Source types are identified based on their type indicator.
Args:
source_type_class (type): source type.
Raises:
KeyError: if a source type is not set for the corresponding type
indicator.
"""
if source_type_class.TYPE_INDICATOR not in cls._source_type_classes:
raise KeyError(
'Source type not set for type: {0:s}.'.format(
source_type_class.TYPE_INDICATOR))
del cls._source_type_classes[source_type_class.TYPE_INDICATOR]
def GetDefinitionByName(self, name):
"""Retrieves a specific artifact definition by name.
Args:
name (str): name of the artifact definition.
Returns:
ArtifactDefinition: an artifact definition or None if not available.
"""
if not name:
return None
return self._artifact_definitions.get(name.lower(), None)
def GetDefinitions(self):
"""Retrieves the artifact definitions.
Returns:
list[ArtifactDefinition]: artifact definitions.
"""
return self._artifact_definitions.values()
def GetUndefinedArtifacts(self):
"""Retrieves the names of undefined artifacts used by artifact groups.
Returns:
set[str]: undefined artifacts names.
"""
return self._artifact_name_references - self._defined_artifact_names
def RegisterDefinition(self, artifact_definition):
"""Registers an artifact definition.
Artifact definitions are identified based on their lower case name.
Args:
artifact_definition (ArtifactDefinition): an artifact definition.
Raises:
KeyError: if artifact definition is already set for the corresponding
name.
"""
artifact_definition_name = artifact_definition.name.lower()
if artifact_definition_name in self._artifact_definitions:
raise KeyError(
'Artifact definition already set for name: {0:s}.'.format(
artifact_definition.name))
self._artifact_definitions[artifact_definition_name] = artifact_definition
self._defined_artifact_names.add(artifact_definition.name)
for source in artifact_definition.sources:
if source.type_indicator == definitions.TYPE_INDICATOR_ARTIFACT_GROUP:
self._artifact_name_references.update(source.names)
@classmethod
def RegisterSourceType(cls, source_type_class):
"""Registers a source type.
Source types are identified based on their type indicator.
Args:
source_type_class (type): source type.
Raises:
KeyError: if source types is already set for the corresponding
type indicator.
"""
if source_type_class.TYPE_INDICATOR in cls._source_type_classes:
raise KeyError(
'Source type already set for type: {0:s}.'.format(
source_type_class.TYPE_INDICATOR))
cls._source_type_classes[source_type_class.TYPE_INDICATOR] = (
source_type_class)
@classmethod
def RegisterSourceTypes(cls, source_type_classes):
"""Registers source types.
Source types are identified based on their type indicator.
Args:
source_type_classes (list[type]): source types.
"""
for source_type_class in source_type_classes:
cls.RegisterSourceType(source_type_class)
def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'):
"""Reads artifact definitions into the registry from files in a directory.
This function does not recurse sub directories.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
path (str): path of the directory to read from.
extension (Optional[str]): extension of the filenames to read.
Raises:
KeyError: if a duplicate artifact definition is encountered.
"""
for artifact_definition in artifacts_reader.ReadDirectory(
path, extension=extension):
self.RegisterDefinition(artifact_definition)
def ReadFromFile(self, artifacts_reader, filename):
"""Reads artifact definitions into the registry from a file.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
filename (str): name of the file to read from.
"""
for artifact_definition in artifacts_reader.ReadFile(filename):
self.RegisterDefinition(artifact_definition)
def ReadFileObject(self, artifacts_reader, file_object):
"""Reads artifact definitions into the registry from a file-like object.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
file_object (file): file-like object to read from.
"""
for artifact_definition in artifacts_reader.ReadFileObject(file_object):
self.RegisterDefinition(artifact_definition)
| apache-2.0 |
ssonal/StringMatching | Algorithms/ZSearch.py | 1 | 2300 |
class ZSearch(object):
"""
Returns Z, the Fundamental Preprocessing of S. Z[i] is the length of the substring
beginning at i which is also a prefix of S. This pre-processing is done in O(n) time,
where n is the length of S.
"""
def fundamental_preprocess(self, S):
if len(S) == 0: # Handles case of empty string
return []
if len(S) == 1: # Handles case of single-character string
return [1]
z = [0 for x in S]
z[0] = len(S)
z[1] = match_length(S, 0, 1)
for i in range(2, 1 + z[1]):
z[i] = z[1] - i + 1
# Defines lower and upper limits of z-box
l = 0
r = 0
for i in range(2 + z[1], len(S)):
if i <= r: # i falls within existing z-box
k = i - l
b = z[k]
a = r - i + 1
if b < a: # b ends within existing z-box
z[i] = b
elif b > a: # Optimization from exercise 1-6
z[i] = min(b, len(S) - i)
l = i
r = i + z[i] - 1
else: # b ends exactly at end of existing z-box
z[i] = b + match_length(S, a, r + 1)
l = i
r = i + z[i] - 1
else: # i does not reside within existing z-box
z[i] = match_length(S, 0, i)
if z[i] > 0:
l = i
r = i + z[i] - 1
return z
"""
Searches for all instances of P in T, using the Z algorithm. By calculating the fundamental
preprocess of the string P$T, where $ is some character assumed not to be in P and T, matches
of P are easily found. The Z-values corresponding to T in P$T are considered, and if their
value is equivalent to the length of P then a match is found at that index.
"""
def search(self, string, substring):
if len(substring) == 0 or len(string) < len(substring):
return
S = substring + '$' + string
Z = self.fundamental_preprocess(S)
for i in range(len(substring) + 1, len(S)):
if Z[i] == len(substring):
yield i - len(substring) - 1
def __str__(self):
return 'Z'
| mit |
byt3bl33d3r/mitmproxy | test/test_utils.py | 11 | 4057 | import json
from libmproxy import utils
from netlib import odict
import tutils
utils.CERT_SLEEP_TIME = 0
def test_format_timestamp():
assert utils.format_timestamp(utils.timestamp())
def test_format_timestamp_with_milli():
assert utils.format_timestamp_with_milli(utils.timestamp())
def test_isBin():
assert not utils.isBin("testing\n\r")
assert utils.isBin("testing\x01")
assert utils.isBin("testing\x0e")
assert utils.isBin("testing\x7f")
def test_isXml():
assert not utils.isXML("foo")
assert utils.isXML("<foo")
assert utils.isXML(" \n<foo")
def test_clean_hanging_newline():
s = "foo\n"
assert utils.clean_hanging_newline(s) == "foo"
assert utils.clean_hanging_newline("foo") == "foo"
def test_pkg_data():
assert utils.pkg_data.path("console")
tutils.raises("does not exist", utils.pkg_data.path, "nonexistent")
def test_pretty_json():
s = json.dumps({"foo": 1})
assert utils.pretty_json(s)
assert not utils.pretty_json("moo")
def test_urldecode():
s = "one=two&three=four"
assert len(utils.urldecode(s)) == 2
def test_multipartdecode():
boundary = 'somefancyboundary'
headers = odict.ODict(
[('content-type', ('multipart/form-data; boundary=%s' % boundary))])
content = "--{0}\n" \
"Content-Disposition: form-data; name=\"field1\"\n\n" \
"value1\n" \
"--{0}\n" \
"Content-Disposition: form-data; name=\"field2\"\n\n" \
"value2\n" \
"--{0}--".format(boundary)
form = utils.multipartdecode(headers, content)
assert len(form) == 2
assert form[0] == ('field1', 'value1')
assert form[1] == ('field2', 'value2')
def test_pretty_duration():
assert utils.pretty_duration(0.00001) == "0ms"
assert utils.pretty_duration(0.0001) == "0ms"
assert utils.pretty_duration(0.001) == "1ms"
assert utils.pretty_duration(0.01) == "10ms"
assert utils.pretty_duration(0.1) == "100ms"
assert utils.pretty_duration(1) == "1.00s"
assert utils.pretty_duration(10) == "10.0s"
assert utils.pretty_duration(100) == "100s"
assert utils.pretty_duration(1000) == "1000s"
assert utils.pretty_duration(10000) == "10000s"
assert utils.pretty_duration(1.123) == "1.12s"
assert utils.pretty_duration(0.123) == "123ms"
def test_LRUCache():
cache = utils.LRUCache(2)
class Foo:
ran = False
def gen(self, x):
self.ran = True
return x
f = Foo()
assert not f.ran
assert cache.get(f.gen, 1) == 1
assert f.ran
f.ran = False
assert cache.get(f.gen, 1) == 1
assert not f.ran
f.ran = False
assert cache.get(f.gen, 1) == 1
assert not f.ran
assert cache.get(f.gen, 2) == 2
assert cache.get(f.gen, 3) == 3
assert f.ran
f.ran = False
assert cache.get(f.gen, 1) == 1
assert f.ran
assert len(cache.cacheList) == 2
assert len(cache.cache) == 2
def test_unparse_url():
assert utils.unparse_url("http", "foo.com", 99, "") == "http://foo.com:99"
assert utils.unparse_url("http", "foo.com", 80, "") == "http://foo.com"
assert utils.unparse_url("https", "foo.com", 80, "") == "https://foo.com:80"
assert utils.unparse_url("https", "foo.com", 443, "") == "https://foo.com"
def test_parse_size():
assert not utils.parse_size("")
assert utils.parse_size("1") == 1
assert utils.parse_size("1k") == 1024
assert utils.parse_size("1m") == 1024**2
assert utils.parse_size("1g") == 1024**3
tutils.raises(ValueError, utils.parse_size, "1f")
tutils.raises(ValueError, utils.parse_size, "ak")
def test_parse_content_type():
p = utils.parse_content_type
assert p("text/html") == ("text", "html", {})
assert p("text") is None
v = p("text/html; charset=UTF-8")
assert v == ('text', 'html', {'charset': 'UTF-8'})
def test_safe_subn():
assert utils.safe_subn("foo", u"bar", "\xc2foo")
def test_urlencode():
assert utils.urlencode([('foo', 'bar')])
| mit |
nathanial/lettuce | tests/integration/lib/Django-1.2.5/django/db/__init__.py | 44 | 4272 | from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import ConnectionHandler, ConnectionRouter, load_backend, DEFAULT_DB_ALIAS, \
DatabaseError, IntegrityError
from django.utils.functional import curry
__all__ = ('backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'DEFAULT_DB_ALIAS')
# For backwards compatibility - Port any old database settings over to
# the new values.
if not settings.DATABASES:
import warnings
warnings.warn(
"settings.DATABASE_* is deprecated; use settings.DATABASES instead.",
PendingDeprecationWarning
)
settings.DATABASES[DEFAULT_DB_ALIAS] = {
'ENGINE': settings.DATABASE_ENGINE,
'HOST': settings.DATABASE_HOST,
'NAME': settings.DATABASE_NAME,
'OPTIONS': settings.DATABASE_OPTIONS,
'PASSWORD': settings.DATABASE_PASSWORD,
'PORT': settings.DATABASE_PORT,
'USER': settings.DATABASE_USER,
'TEST_CHARSET': settings.TEST_DATABASE_CHARSET,
'TEST_COLLATION': settings.TEST_DATABASE_COLLATION,
'TEST_NAME': settings.TEST_DATABASE_NAME,
}
if DEFAULT_DB_ALIAS not in settings.DATABASES:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
for alias, database in settings.DATABASES.items():
if 'ENGINE' not in database:
raise ImproperlyConfigured("You must specify a 'ENGINE' for database '%s'" % alias)
if database['ENGINE'] in ("postgresql", "postgresql_psycopg2", "sqlite3", "mysql", "oracle"):
import warnings
if 'django.contrib.gis' in settings.INSTALLED_APPS:
warnings.warn(
"django.contrib.gis is now implemented as a full database backend. "
"Modify ENGINE in the %s database configuration to select "
"a backend from 'django.contrib.gis.db.backends'" % alias,
PendingDeprecationWarning
)
if database['ENGINE'] == 'postgresql_psycopg2':
full_engine = 'django.contrib.gis.db.backends.postgis'
elif database['ENGINE'] == 'sqlite3':
full_engine = 'django.contrib.gis.db.backends.spatialite'
else:
full_engine = 'django.contrib.gis.db.backends.%s' % database['ENGINE']
else:
warnings.warn(
"Short names for ENGINE in database configurations are deprecated. "
"Prepend %s.ENGINE with 'django.db.backends.'" % alias,
PendingDeprecationWarning
)
full_engine = "django.db.backends.%s" % database['ENGINE']
database['ENGINE'] = full_engine
connections = ConnectionHandler(settings.DATABASES)
router = ConnectionRouter(settings.DATABASE_ROUTERS)
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so
# we manually create the dictionary from the settings, passing only the
# settings that the database backends care about. Note that TIME_ZONE is used
# by the PostgreSQL backends.
# we load all these up for backwards compatibility, you should use
# connections['default'] instead.
connection = connections[DEFAULT_DB_ALIAS]
backend = load_backend(connection.settings_dict['ENGINE'])
# Register an event that closes the database connection
# when a Django request is finished.
def close_connection(**kwargs):
for conn in connections.all():
conn.close()
signals.request_finished.connect(close_connection)
# Register an event that resets connection.queries
# when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries = []
signals.request_started.connect(reset_queries)
# Register an event that rolls back the connections
# when a Django request has an exception.
def _rollback_on_exception(**kwargs):
from django.db import transaction
for conn in connections:
try:
transaction.rollback_unless_managed(using=conn)
except DatabaseError:
pass
signals.got_request_exception.connect(_rollback_on_exception)
| gpl-3.0 |
sonic2kk/dolphin | Externals/fmt/support/compute-powers.py | 50 | 1590 | #!/usr/bin/env python
# Compute 10 ** exp with exp in the range [min_exponent, max_exponent] and print
# normalized (with most-significant bit equal to 1) significands in hexadecimal.
from __future__ import print_function
min_exponent = -348
max_exponent = 340
step = 8
significand_size = 64
exp_offset = 2000
class fp:
pass
powers = []
for i, exp in enumerate(range(min_exponent, max_exponent + 1, step)):
result = fp()
n = 10 ** exp if exp >= 0 else 2 ** exp_offset / 10 ** -exp
k = significand_size + 1
# Convert to binary and round.
binary = '{:b}'.format(n)
result.f = (int('{:0<{}}'.format(binary[:k], k), 2) + 1) / 2
result.e = len(binary) - (exp_offset if exp < 0 else 0) - significand_size
powers.append(result)
# Sanity check.
exp_offset10 = 400
actual = result.f * 10 ** exp_offset10
if result.e > 0:
actual *= 2 ** result.e
else:
for j in range(-result.e):
actual /= 2
expected = 10 ** (exp_offset10 + exp)
precision = len('{}'.format(expected)) - len('{}'.format(actual - expected))
if precision < 19:
print('low precision:', precision)
exit(1)
print('Significands:', end='')
for i, fp in enumerate(powers):
if i % 3 == 0:
print(end='\n ')
print(' {:0<#16x}'.format(fp.f, ), end=',')
print('\n\nExponents:', end='')
for i, fp in enumerate(powers):
if i % 11 == 0:
print(end='\n ')
print(' {:5}'.format(fp.e), end=',')
print('\n\nMax exponent difference:',
max([x.e - powers[i - 1].e for i, x in enumerate(powers)][1:]))
| gpl-2.0 |
lsaffre/timtools | timtools/sdoc/environment.py | 1 | 10262 | ## Copyright 2003-2009 Luc Saffre
## This file is part of the TimTools project.
## TimTools is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## TimTools is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with TimTools; if not, see <http://www.gnu.org/licenses/>.
import types
import bookland
from timtools.misc.etc import isnumber
from timtools.misc import debug
from timtools.sdoc.memo import parseMemo
class ParseError(RuntimeError):
pass
"""
"""
class Writer:
def __init__(self):
self._txt = None
self._autoPara = True
def flush(self):
if self._txt is not None:
self.endPara()
def write(self,txt):
if self._txt is None:
self.beginPara()
self._txt += txt
if self._autoPara:
while True:
i = self._txt.find("\n\n")
if i == -1:
break
suite = self._txt[i+2:]
self._txt = self._txt[:i]
self.endPara()
self.beginPara()
self._txt = suite
def beginPara(self,style=None):
if self._txt is not None:
self.endPara()
if style is None:
style = self.getDefaultParaStyle()
self._paraStyle = style
self._txt = ""
# self._wrap = wrap
def endPara(self,nextParaStyle=None):
if self._txt is not None:
self.p(self._txt, self._paraStyle)
# self.addElement(elem)
self._txt = None
if nextParaStyle is not None:
self._paraStyle = nextParaStyle
class BaseEnvironment(Writer):
# abstract class. Base class for Document and Environment
"""
maintains the "current paragraph style"
"""
def __init__(self,document,paraStyle):
# debug.hello(self,"__init__()")
self.document = document
self._paraStyle = paraStyle.child()
self._paraStyle.setName("%s.defaultParaStyle" % \
self.__class__.__name__)
Writer.__init__(self)
def p(self,txt,style=None,wrap=True):
if type(txt) == types.UnicodeType:
txt=txt.encode('utf8')
else:
assert type(txt) == types.StringType, \
"%s is not a string" % repr(txt)
if style is None:
style = self.getDefaultParaStyle()
txt = self.document.feeder(txt)
# self.getRenderer().renderPara(txt, style)
# print "p() : style is %s" % str(style)
elem = self.document.renderer.compilePara(txt,style,wrap)
self.toStory(elem)
""" note that the toStory() called from p() is perhaps not the
one defined above, because TableInstance overrides it. """
def memo(self,txt):
parseMemo(self.document,txt)
def header(self,lvl,txt,**kw):
self.p(txt,
style=self.document.stylesheet["Heading"+str(lvl)],
**kw)
def h1(self,txt,**kw): self.header(1,txt,**kw)
def h2(self,txt,**kw): self.header(2,txt,**kw)
def h3(self,txt,**kw): self.header(3,txt,**kw)
def pre(self,txt,style=None):
if style is None:
style = self.document.stylesheet.Code
self.p(txt,style)
def img(self,filename,
width=None,height=None,
style=None):
assert self._txt is None
if style is None:
style = self.getDefaultParaStyle()
elem = self.document.renderer.compileImage(filename,
width,height,
style)
self.toStory(elem)
def barcode(self,txt,coding="EAN13",style=None):
if style is None:
style = self.getDefaultParaStyle()
if coding == 'EAN13':
barCodeSymbol = bookland.EAN13Symbol(txt)
else:
raise coding + " : coding not supported"
elem = self.document.renderer.compileBarcode(barCodeSymbol,
style)
self.toStory(elem)
def addBackgroundPainter(self,func):
assert self._txt is None
elem = self.document.renderer.compileBackgroundPainter(func)
self.toStory(elem)
## def getenv(self):
## if self._currentEnv is None:
## return self
## assert self._currentEnv is not self
## return self._currentEnv.getenv()
## """ during compileBeginEnvironment the current environment is still
## the old one. These elements will be added to the story of current
## environment where they introduce the new environment."""
## def toStory(self,elems):
## if self._currentEnv is None:
## print "Rendering %d elements..." % len(elems)
## self._renderer.render(elems)
## else:
## self._currentEnv.toStory(elems)
def getDefaultParaStyle(self):
return self._paraStyle
# def getRenderer(self):
# raise NotImplementedError
def formatParagraph(self,**kw):
# parent paragraph styles don't get updated by manual
# formattings
#self._paraStyle = self._paraStyle.child(**kw)
s = self.getDefaultParaStyle()
# print s
# s is now usually equal to self._paraStyle, but not for example
# in a TableRow where it could be the column's style
for k,v in kw.items():
setattr(s,k,v)
#def getTextWidth(self):
# raise NotImplementedError
def onBegin(self):
return None
def onEnd(self):
# print 'onEnd() : ' + str(self)
return None
## def append(self,elem):
## parent = self
## while True:
## if parent is None:
## raise "No story"
## if isinstance(parent,ElementContainer):
## parent.addElement(elem)
## return
## parent = parent._parent
class ChildEnvironment(BaseEnvironment):
def __init__(self,
doc,
parent,
width=None,
flowStyle=None,
paraStyle=None):
"""
- parent : the environment to which this environment belongs
- width : the outer width of this environment as a flowable.
- flowStyle (or outer style) is the paragraph style of this
Environment as a flowable inside its parent.
- paraStyle (or inner style) is the default paragraph style for
elements in this environment. Forwarded to BaseEnvironment.
A ChildEnvironment dynamically inherits attributs from its
parent. If somebody asks for some attribute from a
ChildEnvironment, and if the ChildEnvironment does not have this
attribut, then it will forward this request to its parent.
"""
if paraStyle is None:
paraStyle = parent.getDefaultParaStyle()
BaseEnvironment.__init__(self,doc,paraStyle)
self._parent = parent
if flowStyle is None:
flowStyle = parent.getDefaultParaStyle()
self._flowStyle = flowStyle.child()
self._flowStyle.setName("%s.flowStyle" % \
self.__class__.__name__)
if width is None:
width = parent.getTextWidth() \
- self._flowStyle.leftIndent \
- self._flowStyle.rightIndent # 20030417 - 20
assert isnumber(width)
self.width = width
def getFlowStyle(self):
return self._flowStyle
def getTextWidth(self):
return self.width
## return self.width \
## - self._flowStyle.leftIndent \
## - self._flowStyle.rightIndent
# def getRenderer(self):
# # overridden by Story
# return self._parent.getRenderer()
def getParent(self):
return self._parent
## parent = self._parent
## while True:
## if parent is None or parent.__class__ == self.__class__:
## return parent
## parent = parent.getParent()
def __getattr__(self,name):
return getattr(self._parent,name)
class Body(BaseEnvironment):
"""
Body is the main environment of a Document
"""
def __init__(self,doc):
BaseEnvironment.__init__(self, doc, doc.stylesheet.Normal)
def close(self):
pass
def getParent(self):
return None
def getTextWidth(self):
return self.document.getDocumentWidth()
def toStory(self,elem):
self.document.renderer.render(elem)
class ElementContainer:
"""
a Mixin for Environment
Collects elements of a "story". Each element is a Flowable.
"""
def __init__(self):
self._elements = []
def __str__(self):
s = self.__class__.__name__
s += '(elements=%s)' % str(self._elements)
return s
def getStory(self,flush=True):
elems = self._elements
if flush:
self._elements = []
return elems
def getElemCount(self):
return len(self._elements)
def toStory(self,elem):
# debug.hello(self,'toStory(%s)' % str(elem))
# print "ElementContainer.toStory()"
if elem is None:
return
# print '%s.toStory(%s)' % (self.__class__.__name__,str(elem))
if type(elem) is types.ListType:
self._elements += elem
else:
self._elements.append(elem)
class Story(BaseEnvironment,ElementContainer):
def __init__(self,doc,textWidth):
BaseEnvironment.__init__(self, doc, doc.stylesheet.Normal)
ElementContainer.__init__(self)
self._textWidth = textWidth
def getTextWidth(self):
return self._textWidth
def getParent(self):
return None
## ## def formatParagraph(self,**kw):
## ## return self._currentEnv.formatParagraph(**kw)
## ## def getDefaultParaStyle(self):
## ## return self._doc.getDefaultParaStyle()
## def onEnd(self):
## raise """don't call onEnd() replace by getElements?"""
| bsd-2-clause |
mantlepro/radcast | radcast/settings.py | 1 | 1887 | #!/usr/bin/env python2.7
"""settings.py: Project settings, preferences, and presets"""
__copyright__ = "Copyright 2017, Josh Wheeler"
__license__ = "GPL-3.0"
__status__ = "Development"
# radcast: radical podcast automation
# Copyright (C) 2017 Josh Wheeler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
presets = {
'720p x264': {
'vcodec': 'libx264',
'b:v': '7500k',
'preset': 'slow',
'acodec': 'aac',
'b:a': '320k',
'ffmpeg_params': ['movflags=faststart'],
},
'1080p x264': {
'vcodec': 'libx264',
'b:v': '15000k',
'preset': 'slow',
'acodec': 'aac',
'b:a': '320k',
'ffmpeg_params': ['movflags=faststart'],
},
'720p webm': {
'vcodec': 'libvpx',
'b:v': '2.5M',
'preset': 'slow',
'acodec': 'libvorbis',
'b:a': '320k',
'ffmpeg_params': ['crf=10'],
},
'1080p webm': {
'vcodec': 'libvpx',
'b:v': '7M',
'preset': 'slow',
'acodec': 'libvorbis',
'b:a': '320k',
'ffmpeg_params': ['crf=10'],
},
}
cfg = {
"mlt_profile": "atsc_720p_2997",
"preset": "720p x264",
"fps": 29.97,
"transition_length": .25,
"padding": True,
}
| gpl-3.0 |
RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/databases/ensembl/plants.py | 1 | 1573 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import attr
from rnacentral_pipeline.databases.ensembl.genomes import parser
from rnacentral_pipeline.databases.ensembl.genomes.data import Context
from rnacentral_pipeline.databases.helpers import publications as pubs
def as_tair_entry(entry):
database = "TAIR"
xrefs = dict(entry.xref_data)
if database in xrefs:
del xrefs[database]
return attr.evolve(
entry,
accession="%s:%s" % (database, entry.primary_id),
database=database,
xref_data=xrefs,
)
def inferred_entries(entry):
if entry.ncbi_tax_id != 3702 or not entry.primary_id.startswith("AT"):
return
yield as_tair_entry(entry)
def parse(handle, gff_file, **kwargs):
context = Context.build(
"ENSEMBL_PLANTS",
[pubs.reference(29092050)],
gff_file,
)
for entry in parser.parse(context, handle):
yield entry
for entry in inferred_entries(entry):
yield entry
| apache-2.0 |
diegocortassa/TACTIC | src/context/client/tactic-api-python-4.0.api04/Lib/pty.py | 18 | 5043 | """Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name).
SGI and generic BSD version, for when openpty() fails."""
try:
import sgi
except ImportError:
pass
else:
try:
tty_name, master_fd = sgi._getpty(os.O_RDWR, 0666, 0)
except IOError, msg:
raise os.error, msg
return master_fd, tty_name
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except os.error:
continue
return (fd, '/dev/tty' + x + y)
raise os.error, 'out of pty devices'
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except IOError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data != '':
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
while 1:
rfds, wfds, xfds = select(
[master_fd, STDIN_FILENO], [], [])
if master_fd in rfds:
data = master_read(master_fd)
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except (IOError, OSError):
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
| epl-1.0 |
matrixise/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Change.py | 293 | 4692 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from lib.logreport import *
from lib.rpc import *
from ServerParameter import *
database="test"
class Change( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
desktop=getDesktop()
log_detail(self)
self.logobj=Logger()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.protocol = {
'XML-RPC': 'http://',
'XML-RPC secure': 'https://',
'NET-RPC': 'socket://',
}
host=port=protocol=''
if docinfo.getUserFieldValue(0):
m = re.match('^(http[s]?://|socket://)([\w.\-]+):(\d{1,5})$', docinfo.getUserFieldValue(0) or '')
host = m.group(2)
port = m.group(3)
protocol = m.group(1)
if protocol:
for (key, value) in self.protocol.iteritems():
if value==protocol:
protocol=key
break
else:
protocol='XML-RPC'
self.win=DBModalDialog(60, 50, 120, 90, "Connect to Odoo Server")
self.win.addFixedText("lblVariable", 38, 12, 25, 15, "Server ")
self.win.addEdit("txtHost",-2,9,60,15, host or 'localhost')
self.win.addFixedText("lblReportName",45 , 31, 15, 15, "Port ")
self.win.addEdit("txtPort",-2,28,60,15, port or "8069")
self.win.addFixedText("lblLoginName", 2, 51, 60, 15, "Protocol Connection")
self.win.addComboListBox("lstProtocol", -2, 48, 60, 15, True)
self.lstProtocol = self.win.getControl( "lstProtocol" )
self.win.addButton( 'btnNext', -2, -5, 30, 15, 'Next', actionListenerProc = self.btnNext_clicked )
self.win.addButton( 'btnCancel', -2 - 30 - 5 ,-5, 30, 15, 'Cancel', actionListenerProc = self.btnCancel_clicked )
for i in self.protocol.keys():
self.lstProtocol.addItem(i,self.lstProtocol.getItemCount() )
self.win.doModalDialog( "lstProtocol", protocol)
def btnNext_clicked(self, oActionEvent):
global url
aVal=''
#aVal= Fetature used
try:
url = self.protocol[self.win.getListBoxSelectedItem("lstProtocol")]+self.win.getEditText("txtHost")+":"+self.win.getEditText("txtPort")
self.sock=RPCSession(url)
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
docinfo.setUserFieldValue(0,url)
res=self.sock.listdb()
self.win.endExecute()
ServerParameter(aVal,url)
except :
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ServerParameter', LOG_ERROR, info)
ErrorDialog("Connection to server is fail. Please check your Server Parameter.", "", "Error!")
self.win.endExecute()
def btnCancel_clicked(self,oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Change(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( Change, "org.openoffice.openerp.report.change", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
erikr/django | tests/transactions/tests.py | 6 | 20788 | from __future__ import unicode_literals
import os
import signal
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Reporter
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipIf(sys.platform.startswith('win'), "Windows doesn't have signals.")
def test_rollback_on_keyboardinterrupt(self):
try:
with transaction.atomic():
Reporter.objects.create(first_name='Tintin')
# Send SIGINT (simulate Ctrl-C). One call isn't enough.
os.kill(os.getpid(), signal.SIGINT)
os.kill(os.getpid(), signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertEqual(Reporter.objects.all().count(), 0)
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class NonAutocommitTests(TransactionTestCase):
available_apps = []
def test_orm_query_after_error_and_rollback(self):
"""
ORM queries are allowed after an error and a rollback in non-autocommit
mode (#27504).
"""
transaction.set_autocommit(False)
r1 = Reporter.objects.create(first_name='Archibald', last_name='Haddock')
r2 = Reporter(first_name='Cuthbert', last_name='Calculus', id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
transaction.rollback()
Reporter.objects.last()
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
| bsd-3-clause |
JioCloud/nova | nova/tests/unit/cells/fakes.py | 60 | 7427 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Cells tests.
"""
from oslo_config import cfg
from nova.cells import driver
from nova.cells import manager as cells_manager
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
import nova.db
from nova.db import base
from nova import exception
from nova import objects
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
# Fake Cell Hierarchy
FAKE_TOP_LEVEL_CELL_NAME = 'api-cell'
FAKE_CELL_LAYOUT = [{'child-cell1': []},
{'child-cell2': [{'grandchild-cell1': []}]},
{'child-cell3': [{'grandchild-cell2': []},
{'grandchild-cell3': []}]},
{'child-cell4': []}]
# build_cell_stub_infos() below will take the above layout and create
# a fake view of the DB from the perspective of each of the cells.
# For each cell, a CellStubInfo will be created with this info.
CELL_NAME_TO_STUB_INFO = {}
class FakeDBApi(object):
"""Cells uses a different DB in each cell. This means in order to
stub out things differently per cell, I need to create a fake DBApi
object that is instantiated by each fake cell.
"""
def __init__(self, cell_db_entries):
self.cell_db_entries = cell_db_entries
def __getattr__(self, key):
return getattr(nova.db, key)
def cell_get_all(self, ctxt):
return self.cell_db_entries
def instance_get_all_by_filters(self, ctxt, *args, **kwargs):
return []
def instance_get_by_uuid(self, ctxt, instance_uuid):
raise exception.InstanceNotFound(instance_id=instance_uuid)
class FakeCellsDriver(driver.BaseCellsDriver):
pass
class FakeCellState(cells_state.CellState):
def send_message(self, message):
message_runner = get_message_runner(self.name)
orig_ctxt = message.ctxt
json_message = message.to_json()
message = message_runner.message_from_json(json_message)
# Restore this so we can use mox and verify same context
message.ctxt = orig_ctxt
message.process()
class FakeCellStateManager(cells_state.CellStateManagerDB):
def __init__(self, *args, **kwargs):
super(FakeCellStateManager, self).__init__(*args,
cell_state_cls=FakeCellState, **kwargs)
class FakeCellsManager(cells_manager.CellsManager):
def __init__(self, *args, **kwargs):
super(FakeCellsManager, self).__init__(*args,
cell_state_manager=FakeCellStateManager,
**kwargs)
class CellStubInfo(object):
def __init__(self, test_case, cell_name, db_entries):
self.test_case = test_case
self.cell_name = cell_name
self.db_entries = db_entries
def fake_base_init(_self, *args, **kwargs):
_self.db = FakeDBApi(db_entries)
@staticmethod
def _fake_compute_node_get_all(context):
return []
@staticmethod
def _fake_service_get_by_binary(context, binary):
return []
test_case.stubs.Set(base.Base, '__init__', fake_base_init)
test_case.stubs.Set(objects.ComputeNodeList, 'get_all',
_fake_compute_node_get_all)
test_case.stubs.Set(objects.ServiceList, 'get_by_binary',
_fake_service_get_by_binary)
self.cells_manager = FakeCellsManager()
# Fix the cell name, as it normally uses CONF.cells.name
msg_runner = self.cells_manager.msg_runner
msg_runner.our_name = self.cell_name
self.cells_manager.state_manager.my_cell_state.name = self.cell_name
def _build_cell_transport_url(cur_db_id):
username = 'username%s' % cur_db_id
password = 'password%s' % cur_db_id
hostname = 'rpc_host%s' % cur_db_id
port = 3090 + cur_db_id
virtual_host = 'rpc_vhost%s' % cur_db_id
return 'rabbit://%s:%s@%s:%s/%s' % (username, password, hostname, port,
virtual_host)
def _build_cell_stub_info(test_case, our_name, parent_path, children):
cell_db_entries = []
cur_db_id = 1
sep_char = cells_utils.PATH_CELL_SEP
if parent_path:
cell_db_entries.append(
dict(id=cur_db_id,
name=parent_path.split(sep_char)[-1],
is_parent=True,
transport_url=_build_cell_transport_url(cur_db_id)))
cur_db_id += 1
our_path = parent_path + sep_char + our_name
else:
our_path = our_name
for child in children:
for child_name, grandchildren in child.items():
_build_cell_stub_info(test_case, child_name, our_path,
grandchildren)
cell_entry = dict(id=cur_db_id,
name=child_name,
transport_url=_build_cell_transport_url(
cur_db_id),
is_parent=False)
cell_db_entries.append(cell_entry)
cur_db_id += 1
stub_info = CellStubInfo(test_case, our_name, cell_db_entries)
CELL_NAME_TO_STUB_INFO[our_name] = stub_info
def _build_cell_stub_infos(test_case):
_build_cell_stub_info(test_case, FAKE_TOP_LEVEL_CELL_NAME, '',
FAKE_CELL_LAYOUT)
def init(test_case):
global CELL_NAME_TO_STUB_INFO
test_case.flags(driver='nova.tests.unit.cells.fakes.FakeCellsDriver',
group='cells')
CELL_NAME_TO_STUB_INFO = {}
_build_cell_stub_infos(test_case)
def _get_cell_stub_info(cell_name):
return CELL_NAME_TO_STUB_INFO[cell_name]
def get_state_manager(cell_name):
return _get_cell_stub_info(cell_name).cells_manager.state_manager
def get_cell_state(cur_cell_name, tgt_cell_name):
state_manager = get_state_manager(cur_cell_name)
cell = state_manager.child_cells.get(tgt_cell_name)
if cell is None:
cell = state_manager.parent_cells.get(tgt_cell_name)
return cell
def get_cells_manager(cell_name):
return _get_cell_stub_info(cell_name).cells_manager
def get_message_runner(cell_name):
return _get_cell_stub_info(cell_name).cells_manager.msg_runner
def stub_tgt_method(test_case, cell_name, method_name, method):
msg_runner = get_message_runner(cell_name)
tgt_msg_methods = msg_runner.methods_by_type['targeted']
setattr(tgt_msg_methods, method_name, method)
def stub_bcast_method(test_case, cell_name, method_name, method):
msg_runner = get_message_runner(cell_name)
tgt_msg_methods = msg_runner.methods_by_type['broadcast']
setattr(tgt_msg_methods, method_name, method)
def stub_bcast_methods(test_case, method_name, method):
for cell_name in CELL_NAME_TO_STUB_INFO.keys():
stub_bcast_method(test_case, cell_name, method_name, method)
| apache-2.0 |
youprofit/zato | code/zato-web-admin/src/zato/admin/web/views/notif/cloud/openstack/swift.py | 6 | 2604 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
# Zato
from zato.admin.web.forms.notif.cloud.openstack.swift import CreateForm, EditForm
from zato.admin.web.views import CreateEdit, Delete as _Delete, Index as _Index
from zato.common.odb.model import NotificationOpenStackSwift as NotifOSS
logger = logging.getLogger(__name__)
common_required = ('name', 'is_active', 'def_id', 'containers', 'interval', 'name_pattern', 'name_pattern_neg', 'get_data',
'get_data_patt_neg', 'service_name')
common_optional = ('get_data_patt',)
class Index(_Index):
method_allowed = 'GET'
url_name = 'notif-cloud-openstack-swift'
template = 'zato/notif/cloud/openstack/swift.html'
service_name = 'zato.notif.cloud.openstack.swift.get-list'
output_class = NotifOSS
class SimpleIO(_Index.SimpleIO):
input_required = ('cluster_id',)
output_required = ('id', 'def_name',) + common_required
output_optional = common_optional
output_repeated = True
def handle(self):
def_list = []
if self.req.zato.cluster_id:
service_name = 'zato.cloud.openstack.swift.get-list'
response = self.req.zato.client.invoke(service_name, {'cluster_id':self.req.zato.cluster_id})
if response.has_data:
def_list = response.data
return {
'create_form': CreateForm(def_list, req=self.req),
'edit_form': EditForm(def_list, prefix='edit', req=self.req),
}
class _CreateEdit(CreateEdit):
method_allowed = 'POST'
class SimpleIO(CreateEdit.SimpleIO):
input_required = ('cluster_id',) + common_required
input_optional = common_optional
output_required = ('id', 'name', 'def_name')
def success_message(self, item):
return 'Successfully {0} the OpenStack Swift notification [{1}]'.format(self.verb, item.name)
class Create(_CreateEdit):
url_name = 'notif-cloud-openstack-swift-create'
service_name = 'zato.notif.cloud.openstack.swift.create'
class Edit(_CreateEdit):
url_name = 'notif-cloud-openstack-swift-edit'
form_prefix = 'edit-'
service_name = 'zato.notif.cloud.openstack.swift.edit'
class Delete(_Delete):
url_name = 'notif-cloud-openstack-swift-delete'
error_message = 'Could not delete the OpenStack Swift notification'
service_name = 'zato.notif.cloud.openstack.swift.delete'
| gpl-3.0 |
qPCR4vir/orange3 | Orange/widgets/data/contexthandlers.py | 20 | 1303 | from Orange.widgets.settings import DomainContextHandler
class SelectAttributesDomainContextHandler(DomainContextHandler):
"""Select Columns widget has context settings in a specific format.
This context handler modifies match and clone_context to account for that.
"""
def match_value(self, setting, value, attrs, metas):
if setting.name == 'domain_role_hints':
value = self.decode_setting(setting, value)
matched = available = 0
for item, category in value.items():
role, role_idx = category
if role != 'available':
available += 1
if self._var_exists(setting, item, attrs, metas):
matched += 1
return matched, available
return super().match_value(setting, value, attrs, metas)
def filter_value(self, setting, data, domain, attrs, metas):
value = data.get(setting.name, None)
value = self.decode_setting(setting, value)
if isinstance(value, dict):
for item, category in list(value.items()):
if not self._var_exists(setting, item, attrs, metas):
del value[item]
else:
super().filter_value(setting, data, domain, attrs, metas)
| bsd-2-clause |
jwheare/digest | lib/reportlab/graphics/widgets/grids.py | 2 | 17880 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgets/grids.py
__version__=''' $Id: grids.py 3107 2007-06-30 11:43:41Z rgbecker $ '''
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, isColorOrNone, isBoolean, isListOfNumbers, OneOf, isListOfColors, isNumberOrNone
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.graphics.shapes import Drawing, Group, Line, Rect, LineShape, definePath, EmptyClipPath
from reportlab.graphics.widgetbase import Widget
def frange(start, end=None, inc=None):
"A range function, that does accept float increments..."
if end == None:
end = start + 0.0
start = 0.0
if inc == None:
inc = 1.0
L = []
end = end - inc*0.0001 #to avoid numrical problems
while 1:
next = start + len(L) * inc
if inc > 0 and next >= end:
break
elif inc < 0 and next <= end:
break
L.append(next)
return L
def makeDistancesList(list):
"""Returns a list of distances between adjacent numbers in some input list.
E.g. [1, 1, 2, 3, 5, 7] -> [0, 1, 1, 2, 2]
"""
d = []
for i in range(len(list[:-1])):
d.append(list[i+1] - list[i])
return d
class Grid(Widget):
"""This makes a rectangular grid of equidistant stripes.
The grid contains an outer border rectangle, and stripes
inside which can be drawn with lines and/or as solid tiles.
The drawing order is: outer rectangle, then lines and tiles.
The stripes' width is indicated as 'delta'. The sequence of
stripes can have an offset named 'delta0'. Both values need
to be positive!
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc="The grid's lower-left x position."),
y = AttrMapValue(isNumber, desc="The grid's lower-left y position."),
width = AttrMapValue(isNumber, desc="The grid's width."),
height = AttrMapValue(isNumber, desc="The grid's height."),
orientation = AttrMapValue(OneOf(('vertical', 'horizontal')),
desc='Determines if stripes are vertical or horizontal.'),
useLines = AttrMapValue(OneOf((0, 1)),
desc='Determines if stripes are drawn with lines.'),
useRects = AttrMapValue(OneOf((0, 1)),
desc='Determines if stripes are drawn with solid rectangles.'),
delta = AttrMapValue(isNumber,
desc='Determines the width/height of the stripes.'),
delta0 = AttrMapValue(isNumber,
desc='Determines the stripes initial width/height offset.'),
deltaSteps = AttrMapValue(isListOfNumbers,
desc='List of deltas to be used cyclically.'),
stripeColors = AttrMapValue(isListOfColors,
desc='Colors applied cyclically in the right or upper direction.'),
fillColor = AttrMapValue(isColorOrNone,
desc='Background color for entire rectangle.'),
strokeColor = AttrMapValue(isColorOrNone,
desc='Color used for lines.'),
strokeWidth = AttrMapValue(isNumber,
desc='Width used for lines.'),
rectStrokeColor = AttrMapValue(isColorOrNone, desc='Color for outer rect stroke.'),
rectStrokeWidth = AttrMapValue(isNumberOrNone, desc='Width for outer rect stroke.'),
)
def __init__(self):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.orientation = 'vertical'
self.useLines = 0
self.useRects = 1
self.delta = 20
self.delta0 = 0
self.deltaSteps = []
self.fillColor = colors.white
self.stripeColors = [colors.red, colors.green, colors.blue]
self.strokeColor = colors.black
self.strokeWidth = 2
def demo(self):
D = Drawing(100, 100)
g = Grid()
D.add(g)
return D
def makeOuterRect(self):
strokeColor = getattr(self,'rectStrokeColor',self.strokeColor)
strokeWidth = getattr(self,'rectStrokeWidth',self.strokeWidth)
if self.fillColor or (strokeColor and strokeWidth):
rect = Rect(self.x, self.y, self.width, self.height)
rect.fillColor = self.fillColor
rect.strokeColor = strokeColor
rect.strokeWidth = strokeWidth
return rect
else:
return None
def makeLinePosList(self, start, isX=0):
"Returns a list of positions where to place lines."
w, h = self.width, self.height
if isX:
length = w
else:
length = h
if self.deltaSteps:
r = [start + self.delta0]
i = 0
while 1:
if r[-1] > start + length:
del r[-1]
break
r.append(r[-1] + self.deltaSteps[i % len(self.deltaSteps)])
i = i + 1
else:
r = frange(start + self.delta0, start + length, self.delta)
r.append(start + length)
if self.delta0 != 0:
r.insert(0, start)
#print 'Grid.makeLinePosList() -> %s' % r
return r
def makeInnerLines(self):
# inner grid lines
group = Group()
w, h = self.width, self.height
if self.useLines == 1:
if self.orientation == 'vertical':
r = self.makeLinePosList(self.x, isX=1)
for x in r:
line = Line(x, self.y, x, self.y + h)
line.strokeColor = self.strokeColor
line.strokeWidth = self.strokeWidth
group.add(line)
elif self.orientation == 'horizontal':
r = self.makeLinePosList(self.y, isX=0)
for y in r:
line = Line(self.x, y, self.x + w, y)
line.strokeColor = self.strokeColor
line.strokeWidth = self.strokeWidth
group.add(line)
return group
def makeInnerTiles(self):
# inner grid lines
group = Group()
w, h = self.width, self.height
# inner grid stripes (solid rectangles)
if self.useRects == 1:
cols = self.stripeColors
if self.orientation == 'vertical':
r = self.makeLinePosList(self.x, isX=1)
elif self.orientation == 'horizontal':
r = self.makeLinePosList(self.y, isX=0)
dist = makeDistancesList(r)
i = 0
for j in range(len(dist)):
if self.orientation == 'vertical':
x = r[j]
stripe = Rect(x, self.y, dist[j], h)
elif self.orientation == 'horizontal':
y = r[j]
stripe = Rect(self.x, y, w, dist[j])
stripe.fillColor = cols[i % len(cols)]
stripe.strokeColor = None
group.add(stripe)
i = i + 1
return group
def draw(self):
# general widget bits
group = Group()
group.add(self.makeOuterRect())
group.add(self.makeInnerTiles())
group.add(self.makeInnerLines(),name='_gridLines')
return group
class DoubleGrid(Widget):
"""This combines two ordinary Grid objects orthogonal to each other.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc="The grid's lower-left x position."),
y = AttrMapValue(isNumber, desc="The grid's lower-left y position."),
width = AttrMapValue(isNumber, desc="The grid's width."),
height = AttrMapValue(isNumber, desc="The grid's height."),
grid0 = AttrMapValue(None, desc="The first grid component."),
grid1 = AttrMapValue(None, desc="The second grid component."),
)
def __init__(self):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
g0 = Grid()
g0.x = self.x
g0.y = self.y
g0.width = self.width
g0.height = self.height
g0.orientation = 'vertical'
g0.useLines = 1
g0.useRects = 0
g0.delta = 20
g0.delta0 = 0
g0.deltaSteps = []
g0.fillColor = colors.white
g0.stripeColors = [colors.red, colors.green, colors.blue]
g0.strokeColor = colors.black
g0.strokeWidth = 1
g1 = Grid()
g1.x = self.x
g1.y = self.y
g1.width = self.width
g1.height = self.height
g1.orientation = 'horizontal'
g1.useLines = 1
g1.useRects = 0
g1.delta = 20
g1.delta0 = 0
g1.deltaSteps = []
g1.fillColor = colors.white
g1.stripeColors = [colors.red, colors.green, colors.blue]
g1.strokeColor = colors.black
g1.strokeWidth = 1
self.grid0 = g0
self.grid1 = g1
## # This gives an AttributeError:
## # DoubleGrid instance has no attribute 'grid0'
## def __setattr__(self, name, value):
## if name in ('x', 'y', 'width', 'height'):
## setattr(self.grid0, name, value)
## setattr(self.grid1, name, value)
def demo(self):
D = Drawing(100, 100)
g = DoubleGrid()
D.add(g)
return D
def draw(self):
group = Group()
g0, g1 = self.grid0, self.grid1
# Order groups to make sure both v and h lines
# are visible (works only when there is only
# one kind of stripes, v or h).
G = g0.useRects == 1 and g1.useRects == 0 and (g0,g1) or (g1,g0)
for g in G:
group.add(g.makeOuterRect())
for g in G:
group.add(g.makeInnerTiles())
group.add(g.makeInnerLines(),name='_gridLines')
return group
class ShadedRect(Widget):
"""This makes a rectangle with shaded colors between two colors.
Colors are interpolated linearly between 'fillColorStart'
and 'fillColorEnd', both of which appear at the margins.
If 'numShades' is set to one, though, only 'fillColorStart'
is used.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc="The grid's lower-left x position."),
y = AttrMapValue(isNumber, desc="The grid's lower-left y position."),
width = AttrMapValue(isNumber, desc="The grid's width."),
height = AttrMapValue(isNumber, desc="The grid's height."),
orientation = AttrMapValue(OneOf(('vertical', 'horizontal')), desc='Determines if stripes are vertical or horizontal.'),
numShades = AttrMapValue(isNumber, desc='The number of interpolating colors.'),
fillColorStart = AttrMapValue(isColorOrNone, desc='Start value of the color shade.'),
fillColorEnd = AttrMapValue(isColorOrNone, desc='End value of the color shade.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color used for border line.'),
strokeWidth = AttrMapValue(isNumber, desc='Width used for lines.'),
cylinderMode = AttrMapValue(isBoolean, desc='True if shading reverses in middle.'),
)
def __init__(self,**kw):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.orientation = 'vertical'
self.numShades = 20
self.fillColorStart = colors.pink
self.fillColorEnd = colors.black
self.strokeColor = colors.black
self.strokeWidth = 2
self.cylinderMode = 0
self.setProperties(kw)
def demo(self):
D = Drawing(100, 100)
g = ShadedRect()
D.add(g)
return D
def _flipRectCorners(self):
"Flip rectangle's corners if width or height is negative."
x, y, width, height, fillColorStart, fillColorEnd = self.x, self.y, self.width, self.height, self.fillColorStart, self.fillColorEnd
if width < 0 and height > 0:
x = x + width
width = -width
if self.orientation=='vertical': fillColorStart, fillColorEnd = fillColorEnd, fillColorStart
elif height<0 and width>0:
y = y + height
height = -height
if self.orientation=='horizontal': fillColorStart, fillColorEnd = fillColorEnd, fillColorStart
elif height < 0 and height < 0:
x = x + width
width = -width
y = y + height
height = -height
return x, y, width, height, fillColorStart, fillColorEnd
def draw(self):
# general widget bits
group = Group()
x, y, w, h, c0, c1 = self._flipRectCorners()
numShades = self.numShades
if self.cylinderMode:
if not numShades%2: numShades = numShades+1
halfNumShades = (numShades-1)/2 + 1
num = float(numShades) # must make it float!
vertical = self.orientation == 'vertical'
if vertical:
if numShades == 1:
V = [x]
else:
V = frange(x, x + w, w/num)
else:
if numShades == 1:
V = [y]
else:
V = frange(y, y + h, h/num)
for v in V:
stripe = vertical and Rect(v, y, w/num, h) or Rect(x, v, w, h/num)
if self.cylinderMode:
if V.index(v)>=halfNumShades:
col = colors.linearlyInterpolatedColor(c1,c0,V[halfNumShades],V[-1], v)
else:
col = colors.linearlyInterpolatedColor(c0,c1,V[0],V[halfNumShades], v)
else:
col = colors.linearlyInterpolatedColor(c0,c1,V[0],V[-1], v)
stripe.fillColor = col
stripe.strokeColor = col
stripe.strokeWidth = 1
group.add(stripe)
if self.strokeColor and self.strokeWidth>=0:
rect = Rect(x, y, w, h)
rect.strokeColor = self.strokeColor
rect.strokeWidth = self.strokeWidth
rect.fillColor = None
group.add(rect)
return group
def colorRange(c0, c1, n):
"Return a range of intermediate colors between c0 and c1"
if n==1: return [c0]
C = []
if n>1:
lim = n-1
for i in range(n):
C.append(colors.linearlyInterpolatedColor(c0,c1,0,lim, i))
return C
def centroid(P):
'''compute average point of a set of points'''
return reduce(lambda x,y, fn=float(len(P)): (x[0]+y[0]/fn,x[1]+y[1]/fn),P,(0,0))
def rotatedEnclosingRect(P, angle, rect):
'''
given P a sequence P of x,y coordinate pairs and an angle in degrees
find the centroid of P and the axis at angle theta through it
find the extreme points of P wrt axis parallel distance and axis
orthogonal distance. Then compute the least rectangle that will still
enclose P when rotated by angle.
The class R
'''
from math import pi, cos, sin, tan
x0, y0 = centroid(P)
theta = (angle/180.)*pi
s,c=sin(theta),cos(theta)
def parallelAxisDist((x,y),s=s,c=c,x0=x0,y0=y0):
return (s*(y-y0)+c*(x-x0))
def orthogonalAxisDist((x,y),s=s,c=c,x0=x0,y0=y0):
return (c*(y-y0)+s*(x-x0))
L = map(parallelAxisDist,P)
L.sort()
a0, a1 = L[0], L[-1]
L = map(orthogonalAxisDist,P)
L.sort()
b0, b1 = L[0], L[-1]
rect.x, rect.width = a0, a1-a0
rect.y, rect.height = b0, b1-b0
g = Group(transform=(c,s,-s,c,x0,y0))
g.add(rect)
return g
class ShadedPolygon(Widget,LineShape):
_attrMap = AttrMap(BASE=LineShape,
angle = AttrMapValue(isNumber,desc="Shading angle"),
fillColorStart = AttrMapValue(isColorOrNone),
fillColorEnd = AttrMapValue(isColorOrNone),
numShades = AttrMapValue(isNumber, desc='The number of interpolating colors.'),
cylinderMode = AttrMapValue(isBoolean, desc='True if shading reverses in middle.'),
points = AttrMapValue(isListOfNumbers),
)
def __init__(self,**kw):
self.angle = 90
self.fillColorStart = colors.red
self.fillColorEnd = colors.green
self.cylinderMode = 0
self.numShades = 50
self.points = [-1,-1,2,2,3,-1]
LineShape.__init__(self,kw)
def draw(self):
P = self.points
P = map(lambda i, P=P:(P[i],P[i+1]),xrange(0,len(P),2))
path = definePath([('moveTo',)+P[0]]+map(lambda x: ('lineTo',)+x,P[1:])+['closePath'],
fillColor=None, strokeColor=None)
path.isClipPath = 1
g = Group()
g.add(path)
rect = ShadedRect(strokeWidth=0,strokeColor=None)
for k in 'fillColorStart', 'fillColorEnd', 'numShades', 'cylinderMode':
setattr(rect,k,getattr(self,k))
g.add(rotatedEnclosingRect(P, self.angle, rect))
g.add(EmptyClipPath)
path = path.copy()
path.isClipPath = 0
path.strokeColor = self.strokeColor
path.strokeWidth = self.strokeWidth
g.add(path)
return g
if __name__=='__main__': #noruntests
from reportlab.lib.colors import blue
from reportlab.graphics.shapes import Drawing
angle=45
D = Drawing(120,120)
D.add(ShadedPolygon(points=(10,10,60,60,110,10),strokeColor=None,strokeWidth=1,angle=90,numShades=50,cylinderMode=0))
D.save(formats=['gif'],fnRoot='shobj',outDir='/tmp')
| bsd-3-clause |
sobercoder/gem5 | src/mem/slicc/__init__.py | 92 | 1545 | # Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
ratanraj/salesforce-python-toolkit | sforce/base.py | 16 | 19979 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Written by: David Lanstein ( dlanstein gmail com )
import string
import sys
import os.path
from suds.client import Client
try:
# suds 0.3.8 and prior
from suds.transport.cache import FileCache
except:
# suds 0.3.9+
from suds.cache import FileCache
import suds.sudsobject
from suds.sax.element import Element
class SforceBaseClient(object):
_sforce = None
_sessionId = None
_location = None
_product = 'Python Toolkit'
_version = (0, 1, 3)
_objectNamespace = None
_strictResultTyping = False
_allowFieldTruncationHeader = None
_assignmentRuleHeader = None
_callOptions = None
_assignmentRuleHeader = None
_emailHeader = None
_localeOptions = None
_loginScopeHeader = None
_mruHeader = None
_packageVersionHeader = None
_queryOptions = None
_sessionHeader = None
_userTerritoryDeleteHeader = None
def __init__(self, wsdl, cacheDuration = 0, **kwargs):
'''
Connect to Salesforce
'wsdl' : Location of WSDL
'cacheDuration' : Duration of HTTP GET cache in seconds, or 0 for no cache
'proxy' : Dict of pair of 'protocol' and 'location'
e.g. {'http': 'my.insecure.proxy.example.com:80'}
'username' : Username for HTTP auth when using a proxy ONLY
'password' : Password for HTTP auth when using a proxy ONLY
'''
# Suds can only accept WSDL locations with a protocol prepended
if '://' not in wsdl:
# TODO windows users???
# check if file exists, else let bubble up to suds as is
# definitely don't want to assume http or https
if os.path.isfile(wsdl):
wsdl = 'file://' + os.path.abspath(wsdl)
if cacheDuration > 0:
cache = FileCache()
cache.setduration(seconds = cacheDuration)
else:
cache = None
self._sforce = Client(wsdl, cache = cache)
# Set HTTP headers
headers = {'User-Agent': 'Salesforce/' + self._product + '/' + '.'.join(str(x) for x in self._version)}
# This HTTP header will not work until Suds gunzips/inflates the content
# 'Accept-Encoding': 'gzip, deflate'
self._sforce.set_options(headers = headers)
if kwargs.has_key('proxy'):
# urllib2 cannot handle HTTPS proxies yet (see bottom of README)
if kwargs['proxy'].has_key('https'):
raise NotImplementedError('Connecting to a proxy over HTTPS not yet implemented due to a \
limitation in the underlying urllib2 proxy implementation. However, traffic from a proxy to \
Salesforce will use HTTPS.')
self._sforce.set_options(proxy = kwargs['proxy'])
if kwargs.has_key('username'):
self._sforce.set_options(username = kwargs['username'])
if kwargs.has_key('password'):
self._sforce.set_options(password = kwargs['password'])
# Toolkit-specific methods
def generateHeader(self, sObjectType):
'''
Generate a SOAP header as defined in:
http://www.salesforce.com/us/developer/docs/api/Content/soap_headers.htm
'''
try:
return self._sforce.factory.create(sObjectType)
except:
print 'There is not a SOAP header of type %s' % sObjectType
def generateObject(self, sObjectType):
'''
Generate a Salesforce object, such as a Lead or Contact
'''
obj = self._sforce.factory.create('ens:sObject')
obj.type = sObjectType
return obj
def _handleResultTyping(self, result):
'''
If any of the following calls return a single result, and self._strictResultTyping is true,
return the single result, rather than [(SaveResult) {...}]:
convertLead()
create()
delete()
emptyRecycleBin()
invalidateSessions()
merge()
process()
retrieve()
undelete()
update()
upsert()
describeSObjects()
sendEmail()
'''
if self._strictResultTyping == False and len(result) == 1:
return result[0]
else:
return result
def _marshallSObjects(self, sObjects, tag = 'sObjects'):
'''
Marshall generic sObjects into a list of SAX elements
This code is going away ASAP
tag param is for nested objects (e.g. MergeRequest) where
key: object must be in <key/>, not <sObjects/>
'''
if not isinstance(sObjects, (tuple, list)):
sObjects = (sObjects, )
if sObjects[0].type in ['LeadConvert', 'SingleEmailMessage', 'MassEmailMessage']:
nsPrefix = 'tns:'
else:
nsPrefix = 'ens:'
li = []
for obj in sObjects:
el = Element(tag)
el.set('xsi:type', nsPrefix + obj.type)
for k, v in obj:
if k == 'type':
continue
# This is here to avoid 'duplicate values' error when setting a field in fieldsToNull
# Even a tag like <FieldName/> will trigger it
if v == None:
# not going to win any awards for variable-naming scheme here
tmp = Element(k)
tmp.set('xsi:nil', 'true')
el.append(tmp)
elif isinstance(v, (list, tuple)):
for value in v:
el.append(Element(k).setText(value))
elif isinstance(v, suds.sudsobject.Object):
el.append(self._marshallSObjects(v, k))
else:
el.append(Element(k).setText(v))
li.append(el)
return li
def _setEndpoint(self, location):
'''
Set the endpoint after when Salesforce returns the URL after successful login()
'''
# suds 0.3.7+ supports multiple wsdl services, but breaks setlocation :(
# see https://fedorahosted.org/suds/ticket/261
try:
self._sforce.set_options(location = location)
except:
self._sforce.wsdl.service.setlocation(location)
self._location = location
def _setHeaders(self, call = None):
'''
Attach particular SOAP headers to the request depending on the method call made
'''
# All calls, including utility calls, set the session header
headers = {'SessionHeader': self._sessionHeader}
if call in ('convertLead',
'create',
'merge',
'process',
'undelete',
'update',
'upsert'):
if self._allowFieldTruncationHeader is not None:
headers['AllowFieldTruncationHeader'] = self._allowFieldTruncationHeader
if call in ('create',
'merge',
'update',
'upsert'):
if self._assignmentRuleHeader is not None:
headers['AssignmentRuleHeader'] = self._assignmentRuleHeader
# CallOptions will only ever be set by the SforcePartnerClient
if self._callOptions is not None:
if call in ('create',
'merge',
'queryAll',
'query',
'queryMore',
'retrieve',
'search',
'update',
'upsert',
'convertLead',
'login',
'delete',
'describeGlobal',
'describeLayout',
'describeTabs',
'describeSObject',
'describeSObjects',
'getDeleted',
'getUpdated',
'process',
'undelete',
'getServerTimestamp',
'getUserInfo',
'setPassword',
'resetPassword'):
headers['CallOptions'] = self._callOptions
if call in ('create',
'delete',
'resetPassword',
'update',
'upsert'):
if self._emailHeader is not None:
headers['EmailHeader'] = self._emailHeader
if call in ('describeSObject',
'describeSObjects'):
if self._localeOptions is not None:
headers['LocaleOptions'] = self._localeOptions
if call == 'login':
if self._loginScopeHeader is not None:
headers['LoginScopeHeader'] = self._loginScopeHeader
if call in ('create',
'merge',
'query',
'retrieve',
'update',
'upsert'):
if self._mruHeader is not None:
headers['MruHeader'] = self._mruHeader
if call in ('convertLead',
'create',
'delete',
'describeGlobal',
'describeLayout',
'describeSObject',
'describeSObjects',
'describeTabs',
'merge',
'process',
'query',
'retrieve',
'search',
'undelete',
'update',
'upsert'):
if self._packageVersionHeader is not None:
headers['PackageVersionHeader'] = self._packageVersionHeader
if call in ('query',
'queryAll',
'queryMore',
'retrieve'):
if self._queryOptions is not None:
headers['QueryOptions'] = self._queryOptions
if call == 'delete':
if self._userTerritoryDeleteHeader is not None:
headers['UserTerritoryDeleteHeader'] = self._userTerritoryDeleteHeader
self._sforce.set_options(soapheaders = headers)
def setStrictResultTyping(self, strictResultTyping):
'''
Set whether single results from any of the following calls return the result wrapped in a list,
or simply the single result object:
convertLead()
create()
delete()
emptyRecycleBin()
invalidateSessions()
merge()
process()
retrieve()
undelete()
update()
upsert()
describeSObjects()
sendEmail()
'''
self._strictResultTyping = strictResultTyping
def getSessionId(self):
return self._sessionId
def getLocation(self):
return self._location
def getConnection(self):
return self._sforce
def getLastRequest(self):
return str(self._sforce.last_sent())
def getLastResponse(self):
return str(self._sforce.last_received())
# Core calls
def convertLead(self, leadConverts):
'''
Converts a Lead into an Account, Contact, or (optionally) an Opportunity.
'''
self._setHeaders('convertLead')
return self._handleResultTyping(self._sforce.service.convertLead(leadConverts))
def create(self, sObjects):
self._setHeaders('create')
return self._handleResultTyping(self._sforce.service.create(sObjects))
def delete(self, ids):
'''
Deletes one or more objects
'''
self._setHeaders('delete')
return self._handleResultTyping(self._sforce.service.delete(ids))
def emptyRecycleBin(self, ids):
'''
Permanently deletes one or more objects
'''
self._setHeaders('emptyRecycleBin')
return self._handleResultTyping(self._sforce.service.emptyRecycleBin(ids))
def getDeleted(self, sObjectType, startDate, endDate):
'''
Retrieves the list of individual objects that have been deleted within the
given timespan for the specified object.
'''
self._setHeaders('getDeleted')
return self._sforce.service.getDeleted(sObjectType, startDate, endDate)
def getUpdated(self, sObjectType, startDate, endDate):
'''
Retrieves the list of individual objects that have been updated (added or
changed) within the given timespan for the specified object.
'''
self._setHeaders('getUpdated')
return self._sforce.service.getUpdated(sObjectType, startDate, endDate)
def invalidateSessions(self, sessionIds):
'''
Invalidate a Salesforce session
This should be used with extreme caution, for the following (undocumented) reason:
All API connections for a given user share a single session ID
This will call logout() WHICH LOGS OUT THAT USER FROM EVERY CONCURRENT SESSION
return invalidateSessionsResult
'''
self._setHeaders('invalidateSessions')
return self._handleResultTyping(self._sforce.service.invalidateSessions(sessionIds))
def login(self, username, password, token):
'''
Login to Salesforce.com and starts a client session.
Unlike other toolkits, token is a separate parameter, because
Salesforce doesn't explicitly tell you to append it when it gives
you a login error. Folks that are new to the API may not know this.
'username' : Username
'password' : Password
'token' : Token
return LoginResult
'''
self._setHeaders('login')
result = self._sforce.service.login(username, password + token)
# set session header
header = self.generateHeader('SessionHeader')
header.sessionId = result['sessionId']
self.setSessionHeader(header)
self._sessionId = result['sessionId']
# change URL to point from test.salesforce.com to something like cs2-api.salesforce.com
self._setEndpoint(result['serverUrl'])
# na0.salesforce.com (a.k.a. ssl.salesforce.com) requires ISO-8859-1 instead of UTF-8
if 'ssl.salesforce.com' in result['serverUrl'] or 'na0.salesforce.com' in result['serverUrl']:
# currently, UTF-8 is hard-coded in Suds, can't implement this yet
pass
return result
def logout(self):
'''
Logout from Salesforce.com
This should be used with extreme caution, for the following (undocumented) reason:
All API connections for a given user share a single session ID
Calling logout() LOGS OUT THAT USER FROM EVERY CONCURRENT SESSION
return LogoutResult
'''
self._setHeaders('logout')
return self._sforce.service.logout()
def merge(self, mergeRequests):
self._setHeaders('merge')
return self._handleResultTyping(self._sforce.service.merge(mergeRequests))
def process(self, processRequests):
self._setHeaders('process')
return self._handleResultTyping(self._sforce.service.process(processRequests))
def query(self, queryString):
'''
Executes a query against the specified object and returns data that matches
the specified criteria.
'''
self._setHeaders('query')
return self._sforce.service.query(queryString)
def queryAll(self, queryString):
'''
Retrieves data from specified objects, whether or not they have been deleted.
'''
self._setHeaders('queryAll')
return self._sforce.service.queryAll(queryString)
def queryMore(self, queryLocator):
'''
Retrieves the next batch of objects from a query.
'''
self._setHeaders('queryMore')
return self._sforce.service.queryMore(queryLocator)
def retrieve(self, fieldList, sObjectType, ids):
'''
Retrieves one or more objects based on the specified object IDs.
'''
self._setHeaders('retrieve')
return self._handleResultTyping(self._sforce.service.retrieve(fieldList, sObjectType, ids))
def search(self, searchString):
'''
Executes a text search in your organization's data.
'''
self._setHeaders('search')
return self._sforce.service.search(searchString)
def undelete(self, ids):
'''
Undeletes one or more objects
'''
self._setHeaders('undelete')
return self._handleResultTyping(self._sforce.service.undelete(ids))
def update(self, sObjects):
self._setHeaders('update')
return self._handleResultTyping(self._sforce.service.update(sObjects))
def upsert(self, externalIdFieldName, sObjects):
self._setHeaders('upsert')
return self._handleResultTyping(self._sforce.service.upsert(externalIdFieldName, sObjects))
# Describe calls
def describeGlobal(self):
'''
Retrieves a list of available objects in your organization
'''
self._setHeaders('describeGlobal')
return self._sforce.service.describeGlobal()
def describeLayout(self, sObjectType, recordTypeIds = None):
'''
Use describeLayout to retrieve information about the layout (presentation
of data to users) for a given object type. The describeLayout call returns
metadata about a given page layout, including layouts for edit and
display-only views and record type mappings. Note that field-level security
and layout editability affects which fields appear in a layout.
'''
self._setHeaders('describeLayout')
return self._sforce.service.describeLayout(sObjectType, recordTypeIds)
def describeSObject(self, sObjectsType):
'''
Describes metadata (field list and object properties) for the specified
object.
'''
self._setHeaders('describeSObject')
return self._sforce.service.describeSObject(sObjectsType)
def describeSObjects(self, sObjectTypes):
'''
An array-based version of describeSObject; describes metadata (field list
and object properties) for the specified object or array of objects.
'''
self._setHeaders('describeSObjects')
return self._handleResultTyping(self._sforce.service.describeSObjects(sObjectTypes))
# describeSoftphoneLayout not implemented
# From the docs: "Use this call to obtain information about the layout of a SoftPhone.
# Use only in the context of Salesforce CRM Call Center; do not call directly from client programs."
def describeTabs(self):
'''
The describeTabs call returns information about the standard apps and
custom apps, if any, available for the user who sends the call, including
the list of tabs defined for each app.
'''
self._setHeaders('describeTabs')
return self._sforce.service.describeTabs()
# Utility calls
def getServerTimestamp(self):
'''
Retrieves the current system timestamp (GMT) from the Web service.
'''
self._setHeaders('getServerTimestamp')
return self._sforce.service.getServerTimestamp()
def getUserInfo(self):
self._setHeaders('getUserInfo')
return self._sforce.service.getUserInfo()
def resetPassword(self, userId):
'''
Changes a user's password to a system-generated value.
'''
self._setHeaders('resetPassword')
return self._sforce.service.resetPassword(userId)
def sendEmail(self, emails):
self._setHeaders('sendEmail')
return self._handleResultTyping(self._sforce.service.sendEmail(emails))
def setPassword(self, userId, password):
'''
Sets the specified user's password to the specified value.
'''
self._setHeaders('setPassword')
return self._sforce.service.setPassword(userId, password)
# SOAP header-related calls
def setAllowFieldTruncationHeader(self, header):
self._allowFieldTruncationHeader = header
def setAssignmentRuleHeader(self, header):
self._assignmentRuleHeader = header
# setCallOptions() is only implemented in SforcePartnerClient
# http://www.salesforce.com/us/developer/docs/api/Content/sforce_api_header_calloptions.htm
def setEmailHeader(self, header):
self._emailHeader = header
def setLocaleOptions(self, header):
self._localeOptions = header
def setLoginScopeHeader(self, header):
self._loginScopeHeader = header
def setMruHeader(self, header):
self._mruHeader = header
def setPackageVersionHeader(self, header):
self._packageVersionHeader = header
def setQueryOptions(self, header):
self._queryOptions = header
def setSessionHeader(self, header):
self._sessionHeader = header
def setUserTerritoryDeleteHeader(self, header):
self._userTerritoryDeleteHeader = header
| lgpl-3.0 |
The-Compiler/qutebrowser | tests/unit/browser/webkit/network/test_pac.py | 1 | 8467 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import http.server
import threading
import logging
import pytest
from PyQt5.QtCore import QUrl
from PyQt5.QtNetwork import (QNetworkProxy, QNetworkProxyQuery, QHostInfo,
QHostAddress)
from qutebrowser.browser.network import pac
pytestmark = pytest.mark.usefixtures('qapp')
def _pac_common_test(test_str):
fun_str_f = """
function FindProxyForURL(domain, host) {{
{}
return "DIRECT; PROXY 127.0.0.1:8080; SOCKS 192.168.1.1:4444";
}}
"""
fun_str = fun_str_f.format(test_str)
res = pac.PACResolver(fun_str)
proxies = res.resolve(QNetworkProxyQuery(QUrl("https://example.com/test")))
assert len(proxies) == 3
assert proxies[0].type() == QNetworkProxy.NoProxy
assert proxies[1].type() == QNetworkProxy.HttpProxy
assert proxies[1].hostName() == "127.0.0.1"
assert proxies[1].port() == 8080
assert proxies[2].type() == QNetworkProxy.Socks5Proxy
assert proxies[2].hostName() == "192.168.1.1"
assert proxies[2].port() == 4444
def _pac_equality_test(call, expected):
test_str_f = """
var res = ({0});
var expected = ({1});
if(res !== expected) {{
throw new Error("failed test {0}: got '" + res + "', expected '" + expected + "'");
}}
"""
_pac_common_test(test_str_f.format(call, expected))
def _pac_except_test(caplog, call):
test_str_f = """
var thrown = false;
try {{
var res = ({0});
}} catch(e) {{
thrown = true;
}}
if(!thrown) {{
throw new Error("failed test {0}: got '" + res + "', expected exception");
}}
"""
with caplog.at_level(logging.ERROR):
_pac_common_test(test_str_f.format(call))
def _pac_noexcept_test(call):
test_str_f = """
var res = ({0});
"""
_pac_common_test(test_str_f.format(call))
# pylint: disable=invalid-name
@pytest.mark.parametrize("domain, expected", [
("known.domain", "'1.2.3.4'"),
("bogus.domain.foobar", "null")
])
def test_dnsResolve(monkeypatch, domain, expected):
def mock_fromName(host):
info = QHostInfo()
if host == "known.domain":
info.setAddresses([QHostAddress("1.2.3.4")])
return info
monkeypatch.setattr(QHostInfo, 'fromName', mock_fromName)
_pac_equality_test("dnsResolve('{}')".format(domain), expected)
def test_myIpAddress():
_pac_equality_test("isResolvable(myIpAddress())", "true")
@pytest.mark.parametrize("host, expected", [
("example", "true"),
("example.com", "false"),
("www.example.com", "false"),
])
def test_isPlainHostName(host, expected):
_pac_equality_test("isPlainHostName('{}')".format(host), expected)
def test_proxyBindings():
_pac_equality_test("JSON.stringify(ProxyConfig.bindings)", "'{}'")
def test_invalid_port():
test_str = """
function FindProxyForURL(domain, host) {
return "PROXY 127.0.0.1:FOO";
}
"""
res = pac.PACResolver(test_str)
with pytest.raises(pac.ParseProxyError):
res.resolve(QNetworkProxyQuery(QUrl("https://example.com/test")))
@pytest.mark.parametrize('string', ["", "{"])
def test_wrong_pac_string(string):
with pytest.raises(pac.EvalProxyError):
pac.PACResolver(string)
@pytest.mark.parametrize("value", [
"",
"DIRECT FOO",
"PROXY",
"SOCKS",
"FOOBAR",
])
def test_fail_parse(value):
test_str_f = """
function FindProxyForURL(domain, host) {{
return "{}";
}}
"""
res = pac.PACResolver(test_str_f.format(value))
with pytest.raises(pac.ParseProxyError):
res.resolve(QNetworkProxyQuery(QUrl("https://example.com/test")))
def test_fail_return():
test_str = """
function FindProxyForURL(domain, host) {
return null;
}
"""
res = pac.PACResolver(test_str)
with pytest.raises(pac.EvalProxyError):
res.resolve(QNetworkProxyQuery(QUrl("https://example.com/test")))
@pytest.mark.parametrize('url, has_secret', [
('http://example.com/secret', True), # path passed with HTTP
('http://example.com?secret=yes', True), # query passed with HTTP
('http://secret@example.com', False), # user stripped with HTTP
('http://user:secret@example.com', False), # password stripped with HTTP
('https://example.com/secret', False), # path stripped with HTTPS
('https://example.com?secret=yes', False), # query stripped with HTTPS
('https://secret@example.com', False), # user stripped with HTTPS
('https://user:secret@example.com', False), # password stripped with HTTPS
])
@pytest.mark.parametrize('from_file', [True, False])
def test_secret_url(url, has_secret, from_file):
"""Make sure secret parts in a URL are stripped correctly.
The following parts are considered secret:
- If the PAC info is loaded from a local file, nothing.
- If the URL to resolve is a HTTP URL, the username/password.
- If the URL to resolve is a HTTPS URL, the username/password, query
and path.
"""
test_str = """
function FindProxyForURL(domain, host) {{
has_secret = domain.indexOf("secret") !== -1;
expected_secret = {};
if (has_secret !== expected_secret) {{
throw new Error("Expected secret: " + expected_secret + ", found: " + has_secret + " in " + domain);
}}
return "DIRECT";
}}
""".format('true' if (has_secret or from_file) else 'false')
res = pac.PACResolver(test_str)
res.resolve(QNetworkProxyQuery(QUrl(url)), from_file=from_file)
def test_logging(qtlog):
"""Make sure console.log() works for PAC files."""
test_str = """
function FindProxyForURL(domain, host) {
console.log("logging test");
return "DIRECT";
}
"""
res = pac.PACResolver(test_str)
res.resolve(QNetworkProxyQuery(QUrl("https://example.com/test")))
assert len(qtlog.records) == 1
assert qtlog.records[0].message == 'logging test'
def fetcher_test(test_str):
class PACHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'application/x-ns-proxy-autoconfig')
self.end_headers()
self.wfile.write(test_str.encode("ascii"))
ready_event = threading.Event()
def serve():
httpd = http.server.HTTPServer(("127.0.0.1", 8081), PACHandler)
ready_event.set()
httpd.handle_request()
httpd.server_close()
serve_thread = threading.Thread(target=serve, daemon=True)
serve_thread.start()
try:
ready_event.wait()
fetcher = pac.PACFetcher(QUrl("pac+http://127.0.0.1:8081"))
fetcher.fetch()
assert fetcher.fetch_error() is None
finally:
serve_thread.join()
return fetcher
def test_fetch_success():
test_str = """
function FindProxyForURL(domain, host) {
return "DIRECT; PROXY 127.0.0.1:8080; SOCKS 192.168.1.1:4444";
}
"""
res = fetcher_test(test_str)
proxies = res.resolve(QNetworkProxyQuery(QUrl("https://example.com/test")))
assert len(proxies) == 3
def test_fetch_evalerror(caplog):
test_str = """
function FindProxyForURL(domain, host) {
return "FOO";
}
"""
res = fetcher_test(test_str)
with caplog.at_level(logging.ERROR):
proxies = res.resolve(QNetworkProxyQuery(QUrl("https://example.com/test")))
assert len(proxies) == 1
assert proxies[0].port() == 9
| gpl-3.0 |
40223244/cdb-1 | static/Brython3.1.1-20150328-091302/Lib/unittest/loader.py | 739 | 13883 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import functools
from fnmatch import fnmatch
from . import case, suite, util
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
def _jython_aware_splitext(path):
if path.lower().endswith('$py.class'):
return path[:-9]
return os.path.splitext(path)[0]
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = staticmethod(util.three_way_cmp)
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.FunctionType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
# static methods follow a different path
if not isinstance(getattr(inst, name), types.FunctionType):
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
if callable(obj):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
callable(getattr(testCaseClass, attrname))
testFnNames = list(filter(isTestMethod, dir(testCaseClass)))
if self.sortTestMethodsUsing:
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them and return all
tests found within them. Only test files that match the pattern will
be loaded. (Using shell style pattern matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
path = _jython_aware_splitext(os.path.normpath(path))
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = _jython_aware_splitext(os.path.realpath(mod_file))
fullpath_noext = _jython_aware_splitext(os.path.realpath(full_path))
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = _jython_aware_splitext(os.path.basename(full_path))
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=util.three_way_cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(
testCaseClass)
def findTestCases(module, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(\
module)
| gpl-3.0 |
tudennis/LeetCode---kamyu104-11-24-2015 | Python/rotate-array.py | 2 | 2999 | from __future__ import print_function
# Time: O(n)
# Space: O(1)
# Rotate an array of n elements to the right by k steps.
#
# For example, with n = 7 and k = 3, the array [1,2,3,4,5,6,7] is rotated to [5,6,7,1,2,3,4].
#
# Note:
# Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
class Solution(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
k %= len(nums)
self.reverse(nums, 0, len(nums))
self.reverse(nums, 0, k)
self.reverse(nums, k, len(nums))
def reverse(self, nums, start, end):
while start < end:
nums[start], nums[end - 1] = nums[end - 1], nums[start]
start += 1
end -= 1
# Time: O(n)
# Space: O(1)
from fractions import gcd
class Solution2(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
def apply_cycle_permutation(k, offset, cycle_len, nums):
tmp = nums[offset]
for i in xrange(1, cycle_len):
nums[(offset + i * k) % len(nums)], tmp = tmp, nums[(offset + i * k) % len(nums)]
nums[offset] = tmp
k %= len(nums)
num_cycles = gcd(len(nums), k)
cycle_len = len(nums) / num_cycles
for i in xrange(num_cycles):
apply_cycle_permutation(k, i, cycle_len, nums)
# Time: O(n)
# Space: O(1)
class Solution3(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
count = 0
start = 0
while count < len(nums):
curr = start
prev = nums[curr]
while True:
idx = (curr + k) % len(nums)
nums[idx], prev = prev, nums[idx]
curr = idx
count += 1
if start == curr:
break
start += 1
# Time: O(n)
# Space: O(n)
class Solution4(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums[:] = nums[len(nums) - k:] + nums[:len(nums) - k]
# Time: O(k * n)
# Space: O(1)
class Solution5(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
while k > 0:
nums.insert(0, nums.pop())
k -= 1
if __name__ == '__main__':
nums = [1, 2, 3, 4, 5, 6, 7]
Solution().rotate(nums, 3)
print(nums)
| mit |
wweiradio/django | tests/model_formsets_regress/models.py | 281 | 1226 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class User(models.Model):
username = models.CharField(max_length=12, unique=True)
serial = models.IntegerField()
class UserSite(models.Model):
user = models.ForeignKey(User, models.CASCADE, to_field="username")
data = models.IntegerField()
class UserProfile(models.Model):
user = models.ForeignKey(User, models.CASCADE, unique=True, to_field="username")
about = models.TextField()
class ProfileNetwork(models.Model):
profile = models.ForeignKey(UserProfile, models.CASCADE, to_field="user")
network = models.IntegerField()
identifier = models.IntegerField()
class Place(models.Model):
name = models.CharField(max_length=50)
class Restaurant(Place):
pass
class Manager(models.Model):
restaurant = models.ForeignKey(Restaurant, models.CASCADE)
name = models.CharField(max_length=50)
class Network(models.Model):
name = models.CharField(max_length=15)
@python_2_unicode_compatible
class Host(models.Model):
network = models.ForeignKey(Network, models.CASCADE)
hostname = models.CharField(max_length=25)
def __str__(self):
return self.hostname
| bsd-3-clause |
windyuuy/opera | chromium/src/third_party/mesa/src/src/mapi/glapi/gen/gl_x86_asm.py | 33 | 8741 | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import license
import gl_XML, glX_XML
import sys, getopt
class PrintGenericStubs(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_x86_asm.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004, 2005""", "BRIAN PAUL, IBM")
return
def get_stack_size(self, f):
size = 0
for p in f.parameterIterator():
if p.is_padding:
continue
size += p.get_stack_size()
return size
def printRealHeader(self):
print '#include "x86/assyntax.h"'
print '#include "glapi/glapioffsets.h"'
print ''
print '#if defined(STDCALL_API)'
print '# if defined(USE_MGL_NAMESPACE)'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n2))'
print '# else'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n2))'
print '# endif'
print '#else'
print '# if defined(USE_MGL_NAMESPACE)'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n))'
print '# define _glapi_Dispatch _mglapi_Dispatch'
print '# else'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n))'
print '# endif'
print '#endif'
print ''
print '#define GL_OFFSET(x) CODEPTR(REGOFF(4 * x, EAX))'
print ''
print '#if defined(GNU_ASSEMBLER) && !defined(__DJGPP__) && !defined(__MINGW32__) && !defined(__APPLE__)'
print '#define GLOBL_FN(x) GLOBL x ; .type x, function'
print '#else'
print '#define GLOBL_FN(x) GLOBL x'
print '#endif'
print ''
print '#if defined(PTHREADS) || defined(WIN32_THREADS) || defined(BEOS_THREADS)'
print '# define THREADS'
print '#endif'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '#ifdef GLX_X86_READONLY_TEXT'
print '# define CTX_INSNS MOV_L(GS:(EAX), EAX)'
print '#else'
print '# define CTX_INSNS NOP /* Pad for init_glapi_relocs() */'
print '#endif'
print ''
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tCALL(_x86_get_dispatch) ;\t\t\t\\'
print '\tCTX_INSNS ; \\'
print '\tJMP(GL_OFFSET(off))'
print ''
print '#elif defined(PTHREADS)'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tTEST_L(EAX, EAX) ;\t\t\t\t\\'
print '\tJE(1f) ;\t\t\t\t\t\\'
print '\tJMP(GL_OFFSET(off)) ;\t\t\t\t\\'
print '1:\tCALL(_x86_get_dispatch) ;\t\t\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#elif defined(THREADS)'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tTEST_L(EAX, EAX) ;\t\t\t\t\\'
print '\tJE(1f) ;\t\t\t\t\t\\'
print '\tJMP(GL_OFFSET(off)) ;\t\t\t\t\\'
print '1:\tCALL(_glapi_get_dispatch) ;\t\t\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#else /* Non-threaded version. */'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#endif'
print ''
print '#ifdef HAVE_ALIAS'
print '# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt)\t\\'
print '\t.globl\tGL_PREFIX(fn, fn_alt) ;\t\t\t\\'
print '\t.set\tGL_PREFIX(fn, fn_alt), GL_PREFIX(alias, alias_alt)'
print '#else'
print '# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt)\t\\'
print ' GL_STUB(fn, off, fn_alt)'
print '#endif'
print ''
print 'SEG_TEXT'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '\tGLOBL\tGLNAME(_x86_get_dispatch)'
print '\tHIDDEN(GLNAME(_x86_get_dispatch))'
print 'ALIGNTEXT16'
print 'GLNAME(_x86_get_dispatch):'
print '\tcall 1f'
print '1:\tpopl %eax'
print '\taddl $_GLOBAL_OFFSET_TABLE_+[.-1b], %eax'
print '\tmovl _glapi_tls_Dispatch@GOTNTPOFF(%eax), %eax'
print '\tret'
print ''
print '#elif defined(PTHREADS)'
print 'EXTERN GLNAME(_glapi_Dispatch)'
print 'EXTERN GLNAME(_gl_DispatchTSD)'
print 'EXTERN GLNAME(pthread_getspecific)'
print ''
print 'ALIGNTEXT16'
print 'GLNAME(_x86_get_dispatch):'
print '\tSUB_L(CONST(24), ESP)'
print '\tPUSH_L(GLNAME(_gl_DispatchTSD))'
print '\tCALL(GLNAME(pthread_getspecific))'
print '\tADD_L(CONST(28), ESP)'
print '\tRET'
print '#elif defined(THREADS)'
print 'EXTERN GLNAME(_glapi_get_dispatch)'
print '#endif'
print ''
print '#if defined( GLX_USE_TLS ) && !defined( GLX_X86_READONLY_TEXT )'
print '\t\t.section\twtext, "awx", @progbits'
print '#endif /* defined( GLX_USE_TLS ) */'
print ''
print '\t\tALIGNTEXT16'
print '\t\tGLOBL GLNAME(gl_dispatch_functions_start)'
print '\t\tHIDDEN(GLNAME(gl_dispatch_functions_start))'
print 'GLNAME(gl_dispatch_functions_start):'
print ''
return
def printRealFooter(self):
print ''
print '\t\tGLOBL\tGLNAME(gl_dispatch_functions_end)'
print '\t\tHIDDEN(GLNAME(gl_dispatch_functions_end))'
print '\t\tALIGNTEXT16'
print 'GLNAME(gl_dispatch_functions_end):'
print ''
print '#if defined(GLX_USE_TLS) && defined(__linux__)'
print ' .section ".note.ABI-tag", "a"'
print ' .p2align 2'
print ' .long 1f - 0f /* name length */'
print ' .long 3f - 2f /* data length */'
print ' .long 1 /* note length */'
print '0: .asciz "GNU" /* vendor name */'
print '1: .p2align 2'
print '2: .long 0 /* note data: the ABI tag */'
print ' .long 2,4,20 /* Minimum kernel version w/TLS */'
print '3: .p2align 2 /* pad out section */'
print '#endif /* GLX_USE_TLS */'
print ''
print '#if defined (__ELF__) && defined (__linux__)'
print ' .section .note.GNU-stack,"",%progbits'
print '#endif'
return
def printBody(self, api):
for f in api.functionIterateByOffset():
name = f.dispatch_name()
stack = self.get_stack_size(f)
alt = "%s@%u" % (name, stack)
print '\tGL_STUB(%s, _gloffset_%s, %s)' % (name, f.name, alt)
if not f.is_static_entry_point(f.name):
print '\tHIDDEN(GL_PREFIX(%s, %s))' % (name, alt)
for f in api.functionIterateByOffset():
name = f.dispatch_name()
stack = self.get_stack_size(f)
alt = "%s@%u" % (name, stack)
for n in f.entry_points:
if f.is_static_entry_point(n):
if n != f.name:
alt2 = "%s@%u" % (n, stack)
text = '\tGL_STUB_ALIAS(%s, _gloffset_%s, %s, %s, %s)' % (n, f.name, alt2, name, alt)
if f.has_different_protocol(n):
print '#ifndef GLX_INDIRECT_RENDERING'
print text
print '#endif'
else:
print text
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
mode = "generic"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "m:f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == '-m':
mode = val
elif arg == "-f":
file_name = val
if mode == "generic":
printer = PrintGenericStubs()
else:
print "ERROR: Invalid mode \"%s\" specified." % mode
show_usage()
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer.Print(api)
| bsd-3-clause |
NetApp/manila | manila/api/views/share_networks.py | 1 | 2764 | # Copyright 2014 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manila.api import common
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = 'share_networks'
_detail_version_modifiers = ["add_gateway", "add_mtu"]
def build_share_network(self, request, share_network):
"""View of a share network."""
return {'share_network': self._build_share_network_view(
request, share_network)}
def build_share_networks(self, request, share_networks, is_detail=True):
return {'share_networks':
[self._build_share_network_view(
request, share_network, is_detail)
for share_network in share_networks]}
def _build_share_network_view(self, request, share_network,
is_detail=True):
sn = {
'id': share_network.get('id'),
'name': share_network.get('name'),
}
if is_detail:
sn.update({
'project_id': share_network.get('project_id'),
'created_at': share_network.get('created_at'),
'updated_at': share_network.get('updated_at'),
'neutron_net_id': share_network.get('neutron_net_id'),
'neutron_subnet_id': share_network.get('neutron_subnet_id'),
'nova_net_id': share_network.get('nova_net_id'),
'network_type': share_network.get('network_type'),
'segmentation_id': share_network.get('segmentation_id'),
'cidr': share_network.get('cidr'),
'ip_version': share_network.get('ip_version'),
'description': share_network.get('description'),
})
self.update_versioned_resource_dict(request, sn, share_network)
return sn
@common.ViewBuilder.versioned_method("2.18")
def add_gateway(self, context, network_dict, network):
network_dict['gateway'] = network.get('gateway')
@common.ViewBuilder.versioned_method("2.20")
def add_mtu(self, context, network_dict, network):
network_dict['mtu'] = network.get('mtu')
| apache-2.0 |
holyangel/LGG3 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
jr7/pypiv | pypiv/piv/peak_detection.py | 1 | 4526 | import numpy as np
import numpy.linalg as nl
def find_peak(corr, method='gaussian'):
"""Peak detection algorithm switch
After loading the correlation window an maximum finder is invoked.
The correlation window is cut down to the necessary 9 points around the maximum.
Afterwards the maximum is checked not to be close to the boarder of the correlation frame.
This cropped window is used in along with the chosen method to interpolate the sub pixel shift.
Each interpolation method returns a tuple with the sub pixel shift in x and y direction.
The maximums position and the sub pixel shift are added and returned.
If an error occurred during the sub pixel interpolation the shift is set to nan.
Also if the interpolation method is unknown an exception in thrown.
:param corr: correlation window
:param method: peak finder algorithm (gaussian, centroid, parabolic, 9point)
:raises: Sub pixel interpolation method not found
:returns: shift in interrogation window
"""
i, j = np.unravel_index(corr.argmax(), corr.shape)
if check_peak_position(corr, i, j) is False:
return np.nan, np.nan
window = corr[i-1:i+2, j-1:j+2]
if method == 'gaussian':
subpixel_interpolation = gaussian
elif method == 'centroid':
subpixel_interpolation = centroid
elif method == 'parabolic':
subpixel_interpolation = parabolic
elif method == '9point':
subpixel_interpolation = gaussian2D
else:
raise Exception('Sub pixel interpolation method not found!')
try:
dx, dy = subpixel_interpolation(window)
except:
return np.nan, np.nan
else:
return (i + dx, j + dy)
def check_peak_position(corr, i, j):
"""Checking weather the maximum is at the boarder of the correlation window
:param corr: correlation window
:param i: first index position of the maximum
:param j: second index position of the maximum
:returns: true if maximum is inside the correlation window
"""
dist = 3
li, lj = corr.shape
i_inside = (i >= dist) & (i < li - dist)
j_inside = (j >= dist) & (j < lj - dist)
if i_inside and j_inside:
return True
else:
return False
def gaussian(window):
"""Gaussian interpolation for sub pixel shift"""
ip = lambda x : (np.log(x[0]) - np.log(x[2]))\
/(2*np.log(x[2]) - 4*np.log(x[1]) + 2*np.log(x[0]))
return ip(window[:, 1]), ip(window[1])
def centroid(window):
"""Centroid interpolation for sub pixel shift"""
ip = lambda x : (x[2] - x[0])/(x[0] + x[1] + x[2])
return ip(window[:, 1]), ip(window[1])
def parabolic(window):
"""Parabolic interpolation for sub pixel shift"""
ip = lambda x : (x[0] - x[2])/(2*x[0] - 4*x[1] + 2*x[2])
return ip(window[:, 1]), ip(window[1])
def gaussian2D(window):
"""Real 2D Gaussian interpolation for sub pixel shift"""
#ref on paper
w = np.ones((3, 3))*(1./9)
rhs = np.zeros(6)
M = np.zeros((6,6))
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
rhs = rhs + np.array([i*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
j*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
i*j*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
i*i*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
j*j*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
w[i+1, j+1]*np.log(np.abs(window[i+1, j+1]))], dtype='float')
M = M + w[i+1, j+1]*np.array([[ i*i, i*j, i*i*j, i*i*i, i*j*j, i],
[ i*j, j*j, i*j*j, i*i*j, j*j*j, j],
[i*i*j, i*j*j, i*i*j*j, i*i*i*j, i*j*j*j, i*j],
[i*i*i, i*i*j, i*i*i*j, i*i*i*i, i*i*j*j, i*i],
[i*j*j, j*j*j, i*j*j*j, i*i*j*j, j*j*j*j, j*j],
[ i, j, i*j, i*i, j*j, 1]], dtype='float')
solution = nl.solve(M, rhs)
dx = ( solution[2]*solution[1] - 2.0*solution[0]*solution[4])/ \
(4.0*solution[3]*solution[4] - solution[2]*solution[2])
dy = ( solution[2]*solution[0] - 2.0*solution[1]*solution[3])/ \
(4.0*solution[3]*solution[4] - solution[2]*solution[2])
return dx, dy
| bsd-3-clause |
mcus/SickRage | lib/sqlalchemy/orm/util.py | 78 | 35759 | # orm/util.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import sql, util, event, exc as sa_exc, inspection
from ..sql import expression, util as sql_util, operators
from .interfaces import PropComparator, MapperProperty
from . import attributes
import re
from .base import instance_str, state_str, state_class_str, attribute_str, \
state_attribute_str, object_mapper, object_state, _none_set
from .base import class_mapper, _class_to_mapper
from .base import _InspectionAttr
from .path_registry import PathRegistry
all_cascades = frozenset(("delete", "delete-orphan", "all", "merge",
"expunge", "save-update", "refresh-expire",
"none"))
class CascadeOptions(frozenset):
"""Keeps track of the options sent to relationship().cascade"""
_add_w_all_cascades = all_cascades.difference([
'all', 'none', 'delete-orphan'])
_allowed_cascades = all_cascades
def __new__(cls, arg):
values = set([
c for c
in re.split('\s*,\s*', arg or "")
if c
])
if values.difference(cls._allowed_cascades):
raise sa_exc.ArgumentError(
"Invalid cascade option(s): %s" %
", ".join([repr(x) for x in
sorted(
values.difference(cls._allowed_cascades)
)])
)
if "all" in values:
values.update(cls._add_w_all_cascades)
if "none" in values:
values.clear()
values.discard('all')
self = frozenset.__new__(CascadeOptions, values)
self.save_update = 'save-update' in values
self.delete = 'delete' in values
self.refresh_expire = 'refresh-expire' in values
self.merge = 'merge' in values
self.expunge = 'expunge' in values
self.delete_orphan = "delete-orphan" in values
if self.delete_orphan and not self.delete:
util.warn("The 'delete-orphan' cascade "
"option requires 'delete'.")
return self
def __repr__(self):
return "CascadeOptions(%r)" % (
",".join([x for x in sorted(self)])
)
def _validator_events(desc, key, validator, include_removes, include_backrefs):
"""Runs a validation method on an attribute value to be set or appended."""
if not include_backrefs:
def detect_is_backref(state, initiator):
impl = state.manager[key].impl
return initiator.impl is not impl
if include_removes:
def append(state, value, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value, False)
else:
return value
def set_(state, value, oldvalue, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value, False)
else:
return value
def remove(state, value, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
validator(state.obj(), key, value, True)
else:
def append(state, value, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value)
else:
return value
def set_(state, value, oldvalue, initiator):
if include_backrefs or not detect_is_backref(state, initiator):
return validator(state.obj(), key, value)
else:
return value
event.listen(desc, 'append', append, raw=True, retval=True)
event.listen(desc, 'set', set_, raw=True, retval=True)
if include_removes:
event.listen(desc, "remove", remove, raw=True, retval=True)
def polymorphic_union(table_map, typecolname,
aliasname='p_union', cast_nulls=True):
"""Create a ``UNION`` statement used by a polymorphic mapper.
See :ref:`concrete_inheritance` for an example of how
this is used.
:param table_map: mapping of polymorphic identities to
:class:`.Table` objects.
:param typecolname: string name of a "discriminator" column, which will be
derived from the query, producing the polymorphic identity for
each row. If ``None``, no polymorphic discriminator is generated.
:param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()`
construct generated.
:param cast_nulls: if True, non-existent columns, which are represented
as labeled NULLs, will be passed into CAST. This is a legacy behavior
that is problematic on some backends such as Oracle - in which case it
can be set to False.
"""
colnames = util.OrderedSet()
colnamemaps = {}
types = {}
for key in table_map:
table = table_map[key]
# mysql doesnt like selecting from a select;
# make it an alias of the select
if isinstance(table, sql.Select):
table = table.alias()
table_map[key] = table
m = {}
for c in table.c:
colnames.add(c.key)
m[c.key] = c
types[c.key] = c.type
colnamemaps[table] = m
def col(name, table):
try:
return colnamemaps[table][name]
except KeyError:
if cast_nulls:
return sql.cast(sql.null(), types[name]).label(name)
else:
return sql.type_coerce(sql.null(), types[name]).label(name)
result = []
for type, table in table_map.items():
if typecolname is not None:
result.append(
sql.select([col(name, table) for name in colnames] +
[sql.literal_column(sql_util._quote_ddl_expr(type)).
label(typecolname)],
from_obj=[table]))
else:
result.append(sql.select([col(name, table) for name in colnames],
from_obj=[table]))
return sql.union_all(*result).alias(aliasname)
def identity_key(*args, **kwargs):
"""Generate "identity key" tuples, as are used as keys in the
:attr:`.Session.identity_map` dictionary.
This function has several call styles:
* ``identity_key(class, ident)``
This form receives a mapped class and a primary key scalar or
tuple as an argument.
E.g.::
>>> identity_key(MyClass, (1, 2))
(<class '__main__.MyClass'>, (1, 2))
:param class: mapped class (must be a positional argument)
:param ident: primary key, may be a scalar or tuple argument.
* ``identity_key(instance=instance)``
This form will produce the identity key for a given instance. The
instance need not be persistent, only that its primary key attributes
are populated (else the key will contain ``None`` for those missing
values).
E.g.::
>>> instance = MyClass(1, 2)
>>> identity_key(instance=instance)
(<class '__main__.MyClass'>, (1, 2))
In this form, the given instance is ultimately run though
:meth:`.Mapper.identity_key_from_instance`, which will have the
effect of performing a database check for the corresponding row
if the object is expired.
:param instance: object instance (must be given as a keyword arg)
* ``identity_key(class, row=row)``
This form is similar to the class/tuple form, except is passed a
database result row as a :class:`.RowProxy` object.
E.g.::
>>> row = engine.execute("select * from table where a=1 and b=2").first()
>>> identity_key(MyClass, row=row)
(<class '__main__.MyClass'>, (1, 2))
:param class: mapped class (must be a positional argument)
:param row: :class:`.RowProxy` row returned by a :class:`.ResultProxy`
(must be given as a keyword arg)
"""
if args:
if len(args) == 1:
class_ = args[0]
try:
row = kwargs.pop("row")
except KeyError:
ident = kwargs.pop("ident")
elif len(args) == 2:
class_, ident = args
elif len(args) == 3:
class_, ident = args
else:
raise sa_exc.ArgumentError("expected up to three "
"positional arguments, got %s" % len(args))
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs))
mapper = class_mapper(class_)
if "ident" in locals():
return mapper.identity_key_from_primary_key(util.to_list(ident))
return mapper.identity_key_from_row(row)
instance = kwargs.pop("instance")
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs.keys))
mapper = object_mapper(instance)
return mapper.identity_key_from_instance(instance)
class ORMAdapter(sql_util.ColumnAdapter):
"""Extends ColumnAdapter to accept ORM entities.
The selectable is extracted from the given entity,
and the AliasedClass if any is referenced.
"""
def __init__(self, entity, equivalents=None, adapt_required=False,
chain_to=None):
info = inspection.inspect(entity)
self.mapper = info.mapper
selectable = info.selectable
is_aliased_class = info.is_aliased_class
if is_aliased_class:
self.aliased_class = entity
else:
self.aliased_class = None
sql_util.ColumnAdapter.__init__(self, selectable,
equivalents, chain_to,
adapt_required=adapt_required)
def replace(self, elem):
entity = elem._annotations.get('parentmapper', None)
if not entity or entity.isa(self.mapper):
return sql_util.ColumnAdapter.replace(self, elem)
else:
return None
class AliasedClass(object):
"""Represents an "aliased" form of a mapped class for usage with Query.
The ORM equivalent of a :func:`sqlalchemy.sql.expression.alias`
construct, this object mimics the mapped class using a
__getattr__ scheme and maintains a reference to a
real :class:`~sqlalchemy.sql.expression.Alias` object.
Usage is via the :func:`.orm.aliased` function, or alternatively
via the :func:`.orm.with_polymorphic` function.
Usage example::
# find all pairs of users with the same name
user_alias = aliased(User)
session.query(User, user_alias).\\
join((user_alias, User.id > user_alias.id)).\\
filter(User.name==user_alias.name)
The resulting object is an instance of :class:`.AliasedClass`.
This object implements an attribute scheme which produces the
same attribute and method interface as the original mapped
class, allowing :class:`.AliasedClass` to be compatible
with any attribute technique which works on the original class,
including hybrid attributes (see :ref:`hybrids_toplevel`).
The :class:`.AliasedClass` can be inspected for its underlying
:class:`.Mapper`, aliased selectable, and other information
using :func:`.inspect`::
from sqlalchemy import inspect
my_alias = aliased(MyClass)
insp = inspect(my_alias)
The resulting inspection object is an instance of :class:`.AliasedInsp`.
See :func:`.aliased` and :func:`.with_polymorphic` for construction
argument descriptions.
"""
def __init__(self, cls, alias=None,
name=None,
flat=False,
adapt_on_names=False,
# TODO: None for default here?
with_polymorphic_mappers=(),
with_polymorphic_discriminator=None,
base_alias=None,
use_mapper_path=False):
mapper = _class_to_mapper(cls)
if alias is None:
alias = mapper._with_polymorphic_selectable.alias(
name=name, flat=flat)
self._aliased_insp = AliasedInsp(
self,
mapper,
alias,
name,
with_polymorphic_mappers
if with_polymorphic_mappers
else mapper.with_polymorphic_mappers,
with_polymorphic_discriminator
if with_polymorphic_discriminator is not None
else mapper.polymorphic_on,
base_alias,
use_mapper_path,
adapt_on_names
)
self.__name__ = 'AliasedClass_%s' % mapper.class_.__name__
def __getattr__(self, key):
try:
_aliased_insp = self.__dict__['_aliased_insp']
except KeyError:
raise AttributeError()
else:
for base in _aliased_insp._target.__mro__:
try:
attr = object.__getattribute__(base, key)
except AttributeError:
continue
else:
break
else:
raise AttributeError(key)
if isinstance(attr, PropComparator):
ret = attr.adapt_to_entity(_aliased_insp)
setattr(self, key, ret)
return ret
elif hasattr(attr, 'func_code'):
is_method = getattr(_aliased_insp._target, key, None)
if is_method and is_method.__self__ is not None:
return util.types.MethodType(attr.__func__, self, self)
else:
return None
elif hasattr(attr, '__get__'):
ret = attr.__get__(None, self)
if isinstance(ret, PropComparator):
return ret.adapt_to_entity(_aliased_insp)
else:
return ret
else:
return attr
def __repr__(self):
return '<AliasedClass at 0x%x; %s>' % (
id(self), self._aliased_insp._target.__name__)
class AliasedInsp(_InspectionAttr):
"""Provide an inspection interface for an
:class:`.AliasedClass` object.
The :class:`.AliasedInsp` object is returned
given an :class:`.AliasedClass` using the
:func:`.inspect` function::
from sqlalchemy import inspect
from sqlalchemy.orm import aliased
my_alias = aliased(MyMappedClass)
insp = inspect(my_alias)
Attributes on :class:`.AliasedInsp`
include:
* ``entity`` - the :class:`.AliasedClass` represented.
* ``mapper`` - the :class:`.Mapper` mapping the underlying class.
* ``selectable`` - the :class:`.Alias` construct which ultimately
represents an aliased :class:`.Table` or :class:`.Select`
construct.
* ``name`` - the name of the alias. Also is used as the attribute
name when returned in a result tuple from :class:`.Query`.
* ``with_polymorphic_mappers`` - collection of :class:`.Mapper` objects
indicating all those mappers expressed in the select construct
for the :class:`.AliasedClass`.
* ``polymorphic_on`` - an alternate column or SQL expression which
will be used as the "discriminator" for a polymorphic load.
.. seealso::
:ref:`inspection_toplevel`
"""
def __init__(self, entity, mapper, selectable, name,
with_polymorphic_mappers, polymorphic_on,
_base_alias, _use_mapper_path, adapt_on_names):
self.entity = entity
self.mapper = mapper
self.selectable = selectable
self.name = name
self.with_polymorphic_mappers = with_polymorphic_mappers
self.polymorphic_on = polymorphic_on
self._base_alias = _base_alias or self
self._use_mapper_path = _use_mapper_path
self._adapter = sql_util.ClauseAdapter(selectable,
equivalents=mapper._equivalent_columns,
adapt_on_names=adapt_on_names)
self._adapt_on_names = adapt_on_names
self._target = mapper.class_
for poly in self.with_polymorphic_mappers:
if poly is not mapper:
setattr(self.entity, poly.class_.__name__,
AliasedClass(poly.class_, selectable, base_alias=self,
adapt_on_names=adapt_on_names,
use_mapper_path=_use_mapper_path))
is_aliased_class = True
"always returns True"
@property
def class_(self):
"""Return the mapped class ultimately represented by this
:class:`.AliasedInsp`."""
return self.mapper.class_
@util.memoized_property
def _path_registry(self):
if self._use_mapper_path:
return self.mapper._path_registry
else:
return PathRegistry.per_mapper(self)
def __getstate__(self):
return {
'entity': self.entity,
'mapper': self.mapper,
'alias': self.selectable,
'name': self.name,
'adapt_on_names': self._adapt_on_names,
'with_polymorphic_mappers':
self.with_polymorphic_mappers,
'with_polymorphic_discriminator':
self.polymorphic_on,
'base_alias': self._base_alias,
'use_mapper_path': self._use_mapper_path
}
def __setstate__(self, state):
self.__init__(
state['entity'],
state['mapper'],
state['alias'],
state['name'],
state['with_polymorphic_mappers'],
state['with_polymorphic_discriminator'],
state['base_alias'],
state['use_mapper_path'],
state['adapt_on_names']
)
def _adapt_element(self, elem):
return self._adapter.traverse(elem).\
_annotate({
'parententity': self.entity,
'parentmapper': self.mapper}
)
def _entity_for_mapper(self, mapper):
self_poly = self.with_polymorphic_mappers
if mapper in self_poly:
return getattr(self.entity, mapper.class_.__name__)._aliased_insp
elif mapper.isa(self.mapper):
return self
else:
assert False, "mapper %s doesn't correspond to %s" % (mapper, self)
def __repr__(self):
return '<AliasedInsp at 0x%x; %s>' % (
id(self), self.class_.__name__)
inspection._inspects(AliasedClass)(lambda target: target._aliased_insp)
inspection._inspects(AliasedInsp)(lambda target: target)
def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False):
"""Produce an alias of the given element, usually an :class:`.AliasedClass`
instance.
E.g.::
my_alias = aliased(MyClass)
session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id)
The :func:`.aliased` function is used to create an ad-hoc mapping
of a mapped class to a new selectable. By default, a selectable
is generated from the normally mapped selectable (typically a
:class:`.Table`) using the :meth:`.FromClause.alias` method.
However, :func:`.aliased` can also be used to link the class to
a new :func:`.select` statement. Also, the :func:`.with_polymorphic`
function is a variant of :func:`.aliased` that is intended to specify
a so-called "polymorphic selectable", that corresponds to the union
of several joined-inheritance subclasses at once.
For convenience, the :func:`.aliased` function also accepts plain
:class:`.FromClause` constructs, such as a :class:`.Table` or
:func:`.select` construct. In those cases, the :meth:`.FromClause.alias`
method is called on the object and the new :class:`.Alias` object
returned. The returned :class:`.Alias` is not ORM-mapped in this case.
:param element: element to be aliased. Is normally a mapped class,
but for convenience can also be a :class:`.FromClause` element.
:param alias: Optional selectable unit to map the element to. This should
normally be a :class:`.Alias` object corresponding to the :class:`.Table`
to which the class is mapped, or to a :func:`.select` construct that
is compatible with the mapping. By default, a simple anonymous
alias of the mapped table is generated.
:param name: optional string name to use for the alias, if not specified
by the ``alias`` parameter. The name, among other things, forms the
attribute name that will be accessible via tuples returned by a
:class:`.Query` object.
:param flat: Boolean, will be passed through to the :meth:`.FromClause.alias`
call so that aliases of :class:`.Join` objects don't include an enclosing
SELECT. This can lead to more efficient queries in many circumstances.
A JOIN against a nested JOIN will be rewritten as a JOIN against an aliased
SELECT subquery on backends that don't support this syntax.
.. versionadded:: 0.9.0
.. seealso:: :meth:`.Join.alias`
:param adapt_on_names: if True, more liberal "matching" will be used when
mapping the mapped columns of the ORM entity to those of the
given selectable - a name-based match will be performed if the
given selectable doesn't otherwise have a column that corresponds
to one on the entity. The use case for this is when associating
an entity with some derived selectable such as one that uses
aggregate functions::
class UnitPrice(Base):
__tablename__ = 'unit_price'
...
unit_id = Column(Integer)
price = Column(Numeric)
aggregated_unit_price = Session.query(
func.sum(UnitPrice.price).label('price')
).group_by(UnitPrice.unit_id).subquery()
aggregated_unit_price = aliased(UnitPrice,
alias=aggregated_unit_price, adapt_on_names=True)
Above, functions on ``aggregated_unit_price`` which refer to
``.price`` will return the
``fund.sum(UnitPrice.price).label('price')`` column, as it is
matched on the name "price". Ordinarily, the "price" function
wouldn't have any "column correspondence" to the actual
``UnitPrice.price`` column as it is not a proxy of the original.
.. versionadded:: 0.7.3
"""
if isinstance(element, expression.FromClause):
if adapt_on_names:
raise sa_exc.ArgumentError(
"adapt_on_names only applies to ORM elements"
)
return element.alias(name, flat=flat)
else:
return AliasedClass(element, alias=alias, flat=flat,
name=name, adapt_on_names=adapt_on_names)
def with_polymorphic(base, classes, selectable=False,
flat=False,
polymorphic_on=None, aliased=False,
innerjoin=False, _use_mapper_path=False):
"""Produce an :class:`.AliasedClass` construct which specifies
columns for descendant mappers of the given base.
.. versionadded:: 0.8
:func:`.orm.with_polymorphic` is in addition to the existing
:class:`.Query` method :meth:`.Query.with_polymorphic`,
which has the same purpose but is not as flexible in its usage.
Using this method will ensure that each descendant mapper's
tables are included in the FROM clause, and will allow filter()
criterion to be used against those tables. The resulting
instances will also have those columns already loaded so that
no "post fetch" of those columns will be required.
See the examples at :ref:`with_polymorphic`.
:param base: Base class to be aliased.
:param classes: a single class or mapper, or list of
class/mappers, which inherit from the base class.
Alternatively, it may also be the string ``'*'``, in which case
all descending mapped classes will be added to the FROM clause.
:param aliased: when True, the selectable will be wrapped in an
alias, that is ``(SELECT * FROM <fromclauses>) AS anon_1``.
This can be important when using the with_polymorphic()
to create the target of a JOIN on a backend that does not
support parenthesized joins, such as SQLite and older
versions of MySQL.
:param flat: Boolean, will be passed through to the :meth:`.FromClause.alias`
call so that aliases of :class:`.Join` objects don't include an enclosing
SELECT. This can lead to more efficient queries in many circumstances.
A JOIN against a nested JOIN will be rewritten as a JOIN against an aliased
SELECT subquery on backends that don't support this syntax.
Setting ``flat`` to ``True`` implies the ``aliased`` flag is
also ``True``.
.. versionadded:: 0.9.0
.. seealso:: :meth:`.Join.alias`
:param selectable: a table or select() statement that will
be used in place of the generated FROM clause. This argument is
required if any of the desired classes use concrete table
inheritance, since SQLAlchemy currently cannot generate UNIONs
among tables automatically. If used, the ``selectable`` argument
must represent the full set of tables and columns mapped by every
mapped class. Otherwise, the unaccounted mapped columns will
result in their table being appended directly to the FROM clause
which will usually lead to incorrect results.
:param polymorphic_on: a column to be used as the "discriminator"
column for the given selectable. If not given, the polymorphic_on
attribute of the base classes' mapper will be used, if any. This
is useful for mappings that don't have polymorphic loading
behavior by default.
:param innerjoin: if True, an INNER JOIN will be used. This should
only be specified if querying for one specific subtype only
"""
primary_mapper = _class_to_mapper(base)
mappers, selectable = primary_mapper.\
_with_polymorphic_args(classes, selectable,
innerjoin=innerjoin)
if aliased or flat:
selectable = selectable.alias(flat=flat)
return AliasedClass(base,
selectable,
with_polymorphic_mappers=mappers,
with_polymorphic_discriminator=polymorphic_on,
use_mapper_path=_use_mapper_path)
def _orm_annotate(element, exclude=None):
"""Deep copy the given ClauseElement, annotating each element with the
"_orm_adapt" flag.
Elements within the exclude collection will be cloned but not annotated.
"""
return sql_util._deep_annotate(element, {'_orm_adapt': True}, exclude)
def _orm_deannotate(element):
"""Remove annotations that link a column to a particular mapping.
Note this doesn't affect "remote" and "foreign" annotations
passed by the :func:`.orm.foreign` and :func:`.orm.remote`
annotators.
"""
return sql_util._deep_deannotate(element,
values=("_orm_adapt", "parententity")
)
def _orm_full_deannotate(element):
return sql_util._deep_deannotate(element)
class _ORMJoin(expression.Join):
"""Extend Join to support ORM constructs as input."""
__visit_name__ = expression.Join.__visit_name__
def __init__(self, left, right, onclause=None, isouter=False):
left_info = inspection.inspect(left)
left_orm_info = getattr(left, '_joined_from_info', left_info)
right_info = inspection.inspect(right)
adapt_to = right_info.selectable
self._joined_from_info = right_info
if isinstance(onclause, util.string_types):
onclause = getattr(left_orm_info.entity, onclause)
if isinstance(onclause, attributes.QueryableAttribute):
on_selectable = onclause.comparator._source_selectable()
prop = onclause.property
elif isinstance(onclause, MapperProperty):
prop = onclause
on_selectable = prop.parent.selectable
else:
prop = None
if prop:
if sql_util.clause_is_present(on_selectable, left_info.selectable):
adapt_from = on_selectable
else:
adapt_from = left_info.selectable
pj, sj, source, dest, \
secondary, target_adapter = prop._create_joins(
source_selectable=adapt_from,
dest_selectable=adapt_to,
source_polymorphic=True,
dest_polymorphic=True,
of_type=right_info.mapper)
if sj is not None:
if isouter:
# note this is an inner join from secondary->right
right = sql.join(secondary, right, sj)
onclause = pj
else:
left = sql.join(left, secondary, pj, isouter)
onclause = sj
else:
onclause = pj
self._target_adapter = target_adapter
expression.Join.__init__(self, left, right, onclause, isouter)
def join(self, right, onclause=None, isouter=False, join_to_left=None):
return _ORMJoin(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None, join_to_left=None):
return _ORMJoin(self, right, onclause, True)
def join(left, right, onclause=None, isouter=False, join_to_left=None):
"""Produce an inner join between left and right clauses.
:func:`.orm.join` is an extension to the core join interface
provided by :func:`.sql.expression.join()`, where the
left and right selectables may be not only core selectable
objects such as :class:`.Table`, but also mapped classes or
:class:`.AliasedClass` instances. The "on" clause can
be a SQL expression, or an attribute or string name
referencing a configured :func:`.relationship`.
:func:`.orm.join` is not commonly needed in modern usage,
as its functionality is encapsulated within that of the
:meth:`.Query.join` method, which features a
significant amount of automation beyond :func:`.orm.join`
by itself. Explicit usage of :func:`.orm.join`
with :class:`.Query` involves usage of the
:meth:`.Query.select_from` method, as in::
from sqlalchemy.orm import join
session.query(User).\\
select_from(join(User, Address, User.addresses)).\\
filter(Address.email_address=='foo@bar.com')
In modern SQLAlchemy the above join can be written more
succinctly as::
session.query(User).\\
join(User.addresses).\\
filter(Address.email_address=='foo@bar.com')
See :meth:`.Query.join` for information on modern usage
of ORM level joins.
.. versionchanged:: 0.8.1 - the ``join_to_left`` parameter
is no longer used, and is deprecated.
"""
return _ORMJoin(left, right, onclause, isouter)
def outerjoin(left, right, onclause=None, join_to_left=None):
"""Produce a left outer join between left and right clauses.
This is the "outer join" version of the :func:`.orm.join` function,
featuring the same behavior except that an OUTER JOIN is generated.
See that function's documentation for other usage details.
"""
return _ORMJoin(left, right, onclause, True)
def with_parent(instance, prop):
"""Create filtering criterion that relates this query's primary entity
to the given related instance, using established :func:`.relationship()`
configuration.
The SQL rendered is the same as that rendered when a lazy loader
would fire off from the given parent on that attribute, meaning
that the appropriate state is taken from the parent object in
Python without the need to render joins to the parent table
in the rendered statement.
.. versionchanged:: 0.6.4
This method accepts parent instances in all
persistence states, including transient, persistent, and detached.
Only the requisite primary key/foreign key attributes need to
be populated. Previous versions didn't work with transient
instances.
:param instance:
An instance which has some :func:`.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
"""
if isinstance(prop, util.string_types):
mapper = object_mapper(instance)
prop = getattr(mapper.class_, prop).property
elif isinstance(prop, attributes.QueryableAttribute):
prop = prop.property
return prop.compare(operators.eq,
instance,
value_is_parent=True)
def has_identity(object):
"""Return True if the given object has a database
identity.
This typically corresponds to the object being
in either the persistent or detached state.
.. seealso::
:func:`.was_deleted`
"""
state = attributes.instance_state(object)
return state.has_identity
def was_deleted(object):
"""Return True if the given object was deleted
within a session flush.
.. versionadded:: 0.8.0
"""
state = attributes.instance_state(object)
return state.deleted
def randomize_unitofwork():
"""Use random-ordering sets within the unit of work in order
to detect unit of work sorting issues.
This is a utility function that can be used to help reproduce
inconsistent unit of work sorting issues. For example,
if two kinds of objects A and B are being inserted, and
B has a foreign key reference to A - the A must be inserted first.
However, if there is no relationship between A and B, the unit of work
won't know to perform this sorting, and an operation may or may not
fail, depending on how the ordering works out. Since Python sets
and dictionaries have non-deterministic ordering, such an issue may
occur on some runs and not on others, and in practice it tends to
have a great dependence on the state of the interpreter. This leads
to so-called "heisenbugs" where changing entirely irrelevant aspects
of the test program still cause the failure behavior to change.
By calling ``randomize_unitofwork()`` when a script first runs, the
ordering of a key series of sets within the unit of work implementation
are randomized, so that the script can be minimized down to the fundamental
mapping and operation that's failing, while still reproducing the issue
on at least some runs.
This utility is also available when running the test suite via the
``--reversetop`` flag.
.. versionadded:: 0.8.1 created a standalone version of the
``--reversetop`` feature.
"""
from sqlalchemy.orm import unitofwork, session, mapper, dependency
from sqlalchemy.util import topological
from sqlalchemy.testing.util import RandomSet
topological.set = unitofwork.set = session.set = mapper.set = \
dependency.set = RandomSet
| gpl-3.0 |
pdebuyl/lammps | tools/i-pi/ipi/engine/normalmodes.py | 41 | 16476 | """Contains the classes that deal with the normal mode representation.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Deals with the normal mode transformation, including the complications
introduced by PA-CMD when the bead masses are rescaled. Also deals with
the change in the dynamics introduced by this mass-scaling, and has its
own functions to calculate the kinetic energy, and the exact propagator
in the normal mode representation under the ring polymer Hamiltonian.
Classes:
NormalModes: Deals with the normal mode transformation in RPMD and PA-CMD.
"""
import numpy as np
from ipi.utils.depend import *
from ipi.utils import units
from ipi.utils import nmtransform
from ipi.utils.messages import verbosity, warning, info
__all__ = [ "NormalModes" ]
class NormalModes(dobject):
""" A helper class to manipulate the path NM.
Normal-modes transformation, determination of path frequencies,
dynamical mass matrix change, etc.
Attributes:
natoms: The number of atoms.
nbeads: The number of beads.
beads: The beads object for which the normal mode transformation should
be done.
ensemble: The ensemble object, specifying the temperature to hold the
system to.
transform: A nm_trans object that contains the functions that are
required for the normal mode transformation.
Depend objects:
mode: A string specifying how the bead masses are chosen.
transform_method: A string specifying how to do the normal mode
transformation.
nm_freqs: An array that specifies how the normal mode frequencies
of the ring polymers are to be calculated, and thus how the
bead masses should be chosen.
qnm: The bead positions in the normal mode representation. Depends on
beads.q.
pnm: The bead momenta in the normal mode representation. Depends on
beads.p.
omegan: The effective vibrational frequency for the interaction
between the replicas. Depends on the simulation temperature.
omegan2: omegan**2.
omegak: The normal mode frequencies for the free ring polymer.
Depends on omegan.
prop_pq: An array holding the exact normal mode propagator for the
free ring polymer, using mass scaled coordinates.
See J. Chem. Phys. 133, 124101 (2010). Depends on the bead masses
and the timestep.
nm_factor: An array of dynamical mass factors associated with each of
the normal modes. Depends on nm_freqs and mode.
dynm3: An array that gives the dynamical masses of individual atoms in the
normal modes representation. Depends on nm_factor and beads.m3.
dynomegak: The scaled vibrational frequencies. Depends on nm_factor and
omegak.
kins: A list of the kinetic energy for each normal mode, as
calculated in the normal mode representation, using the
dynamical mass factors. Depends on beads.sm3, beads.p and nm_factor.
kin: The total kinetic energy, as calculated in the normal mode
representation, using the dynamical mass factors.
kstress: The kinetic stress tensor, as calculated in the normal mode
representation, using the dynamical mass factors. Depends on
beads.sm3, beads.p and nm_factor.
"""
def __init__(self, mode="rpmd", transform_method="fft", freqs=None):
"""Initializes NormalModes.
Sets the options for the normal mode transform.
Args:
mode: A string specifying how to calculate the bead masses.
transform_method: A string specifying how to do the normal mode
transformation.
freqs: A list of data used to calculate the dynamical mass factors.
"""
if freqs is None:
freqs = []
dset(self,"mode", depend_value(name='mode', value=mode))
dset(self,"transform_method",
depend_value(name='transform_method', value=transform_method))
dset(self,"nm_freqs",
depend_array(name="nm_freqs",value=np.asarray(freqs, float) ) )
def bind(self, beads, ensemble):
""" Initializes the normal modes object and binds to beads and ensemble.
Do all the work down here as we need a full-formed necklace and ensemble
to know how this should be done.
Args:
beads: A beads object to be bound.
ensemble: An ensemble object to be bound.
"""
self.nbeads = beads.nbeads
self.natoms = beads.natoms
# stores a reference to the bound beads and ensemble objects
self.beads = beads
self.ensemble = ensemble
# sets up what's necessary to perform nm transformation.
if self.transform_method == "fft":
self.transform = nmtransform.nm_fft(nbeads=self.nbeads, natoms=self.natoms)
elif self.transform_method == "matrix":
self.transform = nmtransform.nm_trans(nbeads=self.nbeads)
# creates arrays to store normal modes representation of the path.
# must do a lot of piping to create "ex post" a synchronization between the beads and the nm
sync_q = synchronizer()
sync_p = synchronizer()
dset(self,"qnm",
depend_array(name="qnm",
value=np.zeros((self.nbeads,3*self.natoms), float),
func={"q": (lambda : self.transform.b2nm(depstrip(self.beads.q)) ) },
synchro=sync_q ) )
dset(self,"pnm",
depend_array(name="pnm",
value=np.zeros((self.nbeads,3*self.natoms), float),
func={"p": (lambda : self.transform.b2nm(depstrip(self.beads.p)) ) },
synchro=sync_p ) )
# must overwrite the functions
dget(self.beads, "q")._func = { "qnm": (lambda : self.transform.nm2b(depstrip(self.qnm)) ) }
dget(self.beads, "p")._func = { "pnm": (lambda : self.transform.nm2b(depstrip(self.pnm)) ) }
dget(self.beads, "q").add_synchro(sync_q)
dget(self.beads, "p").add_synchro(sync_p)
# also within the "atomic" interface to beads
for b in range(self.nbeads):
dget(self.beads._blist[b],"q")._func = { "qnm": (lambda : self.transform.nm2b(depstrip(self.qnm)) ) }
dget(self.beads._blist[b],"p")._func = { "pnm": (lambda : self.transform.nm2b(depstrip(self.pnm)) ) }
dget(self.beads._blist[b],"q").add_synchro(sync_q)
dget(self.beads._blist[b],"p").add_synchro(sync_p)
# finally, we mark the beads as those containing the set positions
dget(self.beads, "q").update_man()
dget(self.beads, "p").update_man()
# create path-frequencies related properties
dset(self,"omegan",
depend_value(name='omegan', func=self.get_omegan,
dependencies=[dget(self.ensemble,"temp")]) )
dset(self,"omegan2", depend_value(name='omegan2',func=self.get_omegan2,
dependencies=[dget(self,"omegan")]) )
dset(self,"omegak", depend_array(name='omegak',
value=np.zeros(self.beads.nbeads,float),
func=self.get_omegak, dependencies=[dget(self,"omegan")]) )
# sets up "dynamical" masses -- mass-scalings to give the correct RPMD/CMD dynamics
dset(self,"nm_factor", depend_array(name="nmm",
value=np.zeros(self.nbeads, float), func=self.get_nmm,
dependencies=[dget(self,"nm_freqs"), dget(self,"mode") ]) )
dset(self,"dynm3", depend_array(name="dm3",
value=np.zeros((self.nbeads,3*self.natoms), float),func=self.get_dynm3,
dependencies=[dget(self,"nm_factor"), dget(self.beads, "m3")] ) )
dset(self,"dynomegak", depend_array(name="dynomegak",
value=np.zeros(self.nbeads, float), func=self.get_dynwk,
dependencies=[dget(self,"nm_factor"), dget(self,"omegak") ]) )
dset(self,"prop_pq",
depend_array(name='prop_pq',value=np.zeros((self.beads.nbeads,2,2)),
func=self.get_prop_pq,
dependencies=[dget(self,"omegak"), dget(self,"nm_factor"), dget(self.ensemble,"dt")]) )
# if the mass matrix is not the RPMD one, the MD kinetic energy can't be
# obtained in the bead representation because the masses are all mixed up
dset(self,"kins",
depend_array(name="kins",value=np.zeros(self.nbeads, float),
func=self.get_kins,
dependencies=[dget(self,"pnm"), dget(self.beads,"sm3"), dget(self, "nm_factor") ] ))
dset(self,"kin",
depend_value(name="kin", func=self.get_kin,
dependencies=[dget(self,"kins")] ))
dset(self,"kstress",
depend_array(name="kstress",value=np.zeros((3,3), float),
func=self.get_kstress,
dependencies=[dget(self,"pnm"), dget(self.beads,"sm3"), dget(self, "nm_factor") ] ))
def get_omegan(self):
"""Returns the effective vibrational frequency for the interaction
between replicas.
"""
return self.ensemble.temp*self.nbeads*units.Constants.kb/units.Constants.hbar
def get_omegan2(self):
"""Returns omegan**2."""
return self.omegan**2
def get_omegak(self):
"""Gets the normal mode frequencies.
Returns:
A list of the normal mode frequencies for the free ring polymer.
The first element is the centroid frequency (0.0).
"""
return 2*self.omegan*np.array([np.sin(k*np.pi/self.nbeads) for k in range(self.nbeads)])
def get_dynwk(self):
"""Gets the dynamical normal mode frequencies.
Returns:
A list of the scaled normal mode frequencies for the free ring polymer.
The first element is the centroid frequency (0.0).
"""
return self.omegak/np.sqrt(self.nm_factor)
def get_prop_pq(self):
"""Gets the normal mode propagator matrix.
Note the special treatment for the centroid normal mode, which is
propagated using the standard velocity Verlet algorithm as required.
Note that both the normal mode positions and momenta are propagated
using this matrix.
Returns:
An array of the form (nbeads, 2, 2). Each 2*2 array prop_pq[i,:,:]
gives the exact propagator for the i-th normal mode of the
ring polymer.
"""
dt = self.ensemble.dt
pqk = np.zeros((self.nbeads,2,2), float)
pqk[0] = np.array([[1,0], [dt,1]])
for b in range(1, self.nbeads):
sk = np.sqrt(self.nm_factor[b]) # NOTE THAT THE PROPAGATOR USES MASS-SCALED MOMENTA!
dtomegak = self.omegak[b]*dt/sk
c = np.cos(dtomegak)
s = np.sin(dtomegak)
pqk[b,0,0] = c
pqk[b,1,1] = c
pqk[b,0,1] = -s*self.omegak[b]*sk
pqk[b,1,0] = s/(self.omegak[b]*sk)
return pqk
def get_nmm(self):
"""Returns dynamical mass factors, i.e. the scaling of normal mode
masses that determine the path dynamics (but not statics)."""
# also checks that the frequencies and the mode given in init are
# consistent with the beads and ensemble
dmf = np.zeros(self.nbeads,float)
dmf[:] = 1.0
if self.mode == "rpmd":
if len(self.nm_freqs) > 0:
warning("nm.frequencies will be ignored for RPMD mode.", verbosity.low)
elif self.mode == "manual":
if len(self.nm_freqs) != self.nbeads-1:
raise ValueError("Manual path mode requires (nbeads-1) frequencies, one for each internal mode of the path.")
for b in range(1, self.nbeads):
sk = self.omegak[b]/self.nm_freqs[b-1]
dmf[b] = sk**2
elif self.mode == "pa-cmd":
if len(self.nm_freqs) > 1:
warning("Only the first element in nm.frequencies will be considered for PA-CMD mode.", verbosity.low)
if len(self.nm_freqs) == 0:
raise ValueError("PA-CMD mode requires the target frequency of all the internal modes.")
for b in range(1, self.nbeads):
sk = self.omegak[b]/self.nm_freqs[0]
info(" ".join(["NM FACTOR", str(b), str(sk), str(self.omegak[b]), str(self.nm_freqs[0])]), verbosity.medium)
dmf[b] = sk**2
elif self.mode == "wmax-cmd":
if len(self.nm_freqs) > 2:
warning("Only the first two element in nm.frequencies will be considered for WMAX-CMD mode.", verbosity.low)
if len(self.nm_freqs) < 2:
raise ValueError("WMAX-CMD mode requires [wmax, wtarget]. The normal modes will be scaled such that the first internal mode is at frequency wtarget and all the normal modes coincide at frequency wmax.")
wmax = self.nm_freqs[0]
wt = self.nm_freqs[1]
for b in range(1, self.nbeads):
sk = 1.0/np.sqrt((wt)**2*(1+(wmax/self.omegak[1])**2)/(wmax**2+(self.omegak[b])**2))
dmf[b] = sk**2
return dmf
def get_dynm3(self):
"""Returns an array with the dynamical masses of individual atoms in the normal modes representation."""
dm3 = np.zeros(self.beads.m3.shape,float)
for b in range(self.nbeads):
dm3[b] = self.beads.m3[b]*self.nm_factor[b]
return dm3
def free_qstep(self):
"""Exact normal mode propagator for the free ring polymer.
Note that the propagator works in mass scaled coordinates, so that the
propagator matrix can be determined independently from the particular
atom masses, and so the same propagator will work for all the atoms in
the system. All the ring polymers are propagated at the same time by a
matrix multiplication.
Also note that the centroid coordinate is propagated in qcstep, so is
not altered here.
"""
if self.nbeads == 1:
pass
else:
pq = np.zeros((2,self.natoms*3),float)
sm = depstrip(self.beads.sm3)[0]
prop_pq = depstrip(self.prop_pq)
for k in range(1,self.nbeads):
pq[0,:] = depstrip(self.pnm)[k]/sm
pq[1,:] = depstrip(self.qnm)[k]*sm
pq = np.dot(prop_pq[k],pq)
self.qnm[k] = pq[1,:]/sm
self.pnm[k] = pq[0,:]*sm
def get_kins(self):
"""Gets the MD kinetic energy for all the normal modes.
Returns:
A list of the kinetic energy for each NM.
"""
kmd = np.zeros(self.nbeads,float)
sm = depstrip(self.beads.sm3[0])
pnm = depstrip(self.pnm)
nmf = depstrip(self.nm_factor)
# computes the MD ke in the normal modes representation, to properly account for CMD mass scaling
for b in range(self.nbeads):
sp = pnm[b]/sm # mass-scaled momentum of b-th NM
kmd[b] = np.dot(sp,sp)*0.5/nmf[b] # include the partially adiabatic CMD mass scaling
return kmd
def get_kin(self):
"""Gets the total MD kinetic energy.
Note that this does not correspond to the quantum kinetic energy estimate
for the system.
Returns:
The sum of the kinetic energy of each NM in the path.
"""
return self.kins.sum()
def get_kstress(self):
"""Calculates the total MD kinetic stress tensor.
Note that this does not correspond to the quantum kinetic stress tensor
estimate for the system.
Returns:
The sum of the MD kinetic stress tensor contributions from each NM.
"""
kmd = np.zeros((3,3),float)
sm = depstrip(self.beads.sm3[0])
pnm = depstrip(self.pnm)
nmf = depstrip(self.nm_factor)
for b in range(self.nbeads):
sp = pnm[b]/sm # mass-scaled momentum of b-th NM
for i in range(3):
for j in range(3):
# computes the outer product of the p of various normal modes
# singling out Cartesian components to build the tensor
# also takes care of the possibility of having non-RPMD masses
kmd[i,j] += np.dot(sp[i:3*self.natoms:3],sp[j:3*self.natoms:3])/nmf[b]
return kmd
| gpl-2.0 |
ppries/tensorflow | tensorflow/contrib/slim/python/slim/queues.py | 35 | 2087 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a helper context for running queue runners.
@@NestedQueueRunnerError
@@QueueRunners
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import threading
from tensorflow.python.framework import ops
from tensorflow.python.training import coordinator
__all__ = [
'NestedQueueRunnerError',
'QueueRunners',
]
_queue_runner_lock = threading.Lock()
class NestedQueueRunnerError(Exception):
pass
@contextmanager
def QueueRunners(session):
"""Creates a context manager that handles starting and stopping queue runners.
Args:
session: the currently running session.
Yields:
a context in which queues are run.
Raises:
NestedQueueRunnerError: if a QueueRunners context is nested within another.
"""
if not _queue_runner_lock.acquire(False):
raise NestedQueueRunnerError('QueueRunners cannot be nested')
coord = coordinator.Coordinator()
threads = []
for qr in ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(session,
coord=coord,
daemon=True,
start=True))
try:
yield
finally:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
_queue_runner_lock.release()
| apache-2.0 |
boonchu/pykickstart | pykickstart/commands/eula.py | 8 | 2579 | #
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <vpodzime@redhat.com>
#
from pykickstart.base import KickstartCommand
from pykickstart.errors import KickstartValueError, formatErrorMsg
from pykickstart.options import KSOptionParser
from pykickstart.i18n import _
class F20_Eula(KickstartCommand):
"""The 'eula' kickstart command"""
def __init__(self, *args, **kwargs):
KickstartCommand.__init__(self, *args, **kwargs)
self.op = self._getParser()
self.agreed = kwargs.get("agreed", False)
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.agreed:
retval += "# License agreement\n"
retval += "eula %s\n" % self._getArgsAsStr()
return retval
def _getArgsAsStr(self):
if self.agreed:
return "--agreed"
else:
return ""
def _getParser(self):
op = KSOptionParser()
# people would struggle remembering the exact word
op.add_option("--agreed", "--agree", "--accepted", "--accept",
dest="agreed", action="store_true", default=False)
return op
def parse(self, args):
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
self._setToSelf(self.op, opts)
if len(extra) != 0:
raise KickstartValueError(formatErrorMsg(self.lineno, msg=_("Kickstart command %s does not take any arguments") % "eula"))
if not self.agreed:
raise KickstartValueError(formatErrorMsg(self.lineno, msg=_("Kickstart command eula expects the --agreed option")))
return self
| gpl-2.0 |
pkuyym/Paddle | python/paddle/fluid/transpiler/distributed_splitter.py | 2 | 1745 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def hash_name(varlist, pserver_endpoints):
"""
hash variable names to several endpoints.
Args:
varlist(list): a list of Variables
Returns(dict): a map of pserver endpoint -> varname
"""
def _hash_block(block_str, total):
return hash(block_str) % total
eplist = []
for var in varlist:
server_id = _hash_block(var.name(), len(pserver_endpoints))
server_for_param = pserver_endpoints[server_id]
eplist.append(server_for_param)
return eplist
def round_robin(varlist, pserver_endpoints):
"""
Distribute variables to several endpoints.
Args:
varlist(list): a list of variables
pserver_endpoints(list): a list of pserver endpoints
Returns(list[int]): the endpoint for each variable
"""
assert (len(varlist) >= len(pserver_endpoints))
eplist = []
pserver_idx = 0
for var in varlist:
server_for_param = pserver_endpoints[pserver_idx]
eplist.append(server_for_param)
pserver_idx += 1
if pserver_idx >= len(pserver_endpoints):
pserver_idx = 0
return eplist
| apache-2.0 |
nmercier/linux-cross-gcc | win32/bin/Lib/bsddb/dbshelve.py | 5 | 11939 | #------------------------------------------------------------------------
# Copyright (c) 1997-2001 by Total Control Software
# All Rights Reserved
#------------------------------------------------------------------------
#
# Module Name: dbShelve.py
#
# Description: A reimplementation of the standard shelve.py that
# forces the use of cPickle, and DB.
#
# Creation Date: 11/3/97 3:39:04PM
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# 13-Dec-2000: Updated to be used with the new bsddb3 package.
# Added DBShelfCursor class.
#
#------------------------------------------------------------------------
"""Manage shelves of pickled objects using bsddb database files for the
storage.
"""
#------------------------------------------------------------------------
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
import db
if sys.version_info[0] >= 3 :
import cPickle # Will be converted to "pickle" by "2to3"
else :
if sys.version_info < (2, 6) :
import cPickle
else :
# When we drop support for python 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
# warnings.filterwarnings(...)
# ...
#
# We can not use "with" as is, because it would be invalid syntax
# in python 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
import warnings
w = warnings.catch_warnings()
w.__enter__()
try :
warnings.filterwarnings('ignore',
message='the cPickle module has been removed in Python 3.0',
category=DeprecationWarning)
import cPickle
finally :
w.__exit__()
del w
HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL
def _dumps(object, protocol):
return cPickle.dumps(object, protocol=protocol)
if sys.version_info < (2, 6) :
from UserDict import DictMixin as MutableMapping
else :
import collections
MutableMapping = collections.MutableMapping
#------------------------------------------------------------------------
def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
dbenv=None, dbname=None):
"""
A simple factory function for compatibility with the standard
shleve.py module. It can be used like this, where key is a string
and data is a pickleable object:
from bsddb import dbshelve
db = dbshelve.open(filename)
db[key] = data
db.close()
"""
if type(flags) == type(''):
sflag = flags
if sflag == 'r':
flags = db.DB_RDONLY
elif sflag == 'rw':
flags = 0
elif sflag == 'w':
flags = db.DB_CREATE
elif sflag == 'c':
flags = db.DB_CREATE
elif sflag == 'n':
flags = db.DB_TRUNCATE | db.DB_CREATE
else:
raise db.DBError, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags"
d = DBShelf(dbenv)
d.open(filename, dbname, filetype, flags, mode)
return d
#---------------------------------------------------------------------------
class DBShelveError(db.DBError): pass
class DBShelf(MutableMapping):
"""A shelf to hold pickled objects, built upon a bsddb DB object. It
automatically pickles/unpickles data objects going to/from the DB.
"""
def __init__(self, dbenv=None):
self.db = db.DB(dbenv)
self._closed = True
if HIGHEST_PROTOCOL:
self.protocol = HIGHEST_PROTOCOL
else:
self.protocol = 1
def __del__(self):
self.close()
def __getattr__(self, name):
"""Many methods we can just pass through to the DB object.
(See below)
"""
return getattr(self.db, name)
#-----------------------------------
# Dictionary access methods
def __len__(self):
return len(self.db)
def __getitem__(self, key):
data = self.db[key]
return cPickle.loads(data)
def __setitem__(self, key, value):
data = _dumps(value, self.protocol)
self.db[key] = data
def __delitem__(self, key):
del self.db[key]
def keys(self, txn=None):
if txn is not None:
return self.db.keys(txn)
else:
return self.db.keys()
if sys.version_info >= (2, 6) :
def __iter__(self) : # XXX: Load all keys in memory :-(
for k in self.db.keys() :
yield k
# Do this when "DB" support iteration
# Or is it enough to pass thru "getattr"?
#
# def __iter__(self) :
# return self.db.__iter__()
def open(self, *args, **kwargs):
self.db.open(*args, **kwargs)
self._closed = False
def close(self, *args, **kwargs):
self.db.close(*args, **kwargs)
self._closed = True
def __repr__(self):
if self._closed:
return '<DBShelf @ 0x%x - closed>' % (id(self))
else:
return repr(dict(self.iteritems()))
def items(self, txn=None):
if txn is not None:
items = self.db.items(txn)
else:
items = self.db.items()
newitems = []
for k, v in items:
newitems.append( (k, cPickle.loads(v)) )
return newitems
def values(self, txn=None):
if txn is not None:
values = self.db.values(txn)
else:
values = self.db.values()
return map(cPickle.loads, values)
#-----------------------------------
# Other methods
def __append(self, value, txn=None):
data = _dumps(value, self.protocol)
return self.db.append(data, txn)
def append(self, value, txn=None):
if self.get_type() == db.DB_RECNO:
return self.__append(value, txn=txn)
raise DBShelveError, "append() only supported when dbshelve opened with filetype=dbshelve.db.DB_RECNO"
def associate(self, secondaryDB, callback, flags=0):
def _shelf_callback(priKey, priData, realCallback=callback):
# Safe in Python 2.x because expresion short circuit
if sys.version_info[0] < 3 or isinstance(priData, bytes) :
data = cPickle.loads(priData)
else :
data = cPickle.loads(bytes(priData, "iso8859-1")) # 8 bits
return realCallback(priKey, data)
return self.db.associate(secondaryDB, _shelf_callback, flags)
#def get(self, key, default=None, txn=None, flags=0):
def get(self, *args, **kw):
# We do it with *args and **kw so if the default value wasn't
# given nothing is passed to the extension module. That way
# an exception can be raised if set_get_returns_none is turned
# off.
data = self.db.get(*args, **kw)
try:
return cPickle.loads(data)
except (EOFError, TypeError, cPickle.UnpicklingError):
return data # we may be getting the default value, or None,
# so it doesn't need unpickled.
def get_both(self, key, value, txn=None, flags=0):
data = _dumps(value, self.protocol)
data = self.db.get(key, data, txn, flags)
return cPickle.loads(data)
def cursor(self, txn=None, flags=0):
c = DBShelfCursor(self.db.cursor(txn, flags))
c.protocol = self.protocol
return c
def put(self, key, value, txn=None, flags=0):
data = _dumps(value, self.protocol)
return self.db.put(key, data, txn, flags)
def join(self, cursorList, flags=0):
raise NotImplementedError
#----------------------------------------------
# Methods allowed to pass-through to self.db
#
# close, delete, fd, get_byteswapped, get_type, has_key,
# key_range, open, remove, rename, stat, sync,
# upgrade, verify, and all set_* methods.
#---------------------------------------------------------------------------
class DBShelfCursor:
"""
"""
def __init__(self, cursor):
self.dbc = cursor
def __del__(self):
self.close()
def __getattr__(self, name):
"""Some methods we can just pass through to the cursor object. (See below)"""
return getattr(self.dbc, name)
#----------------------------------------------
def dup(self, flags=0):
c = DBShelfCursor(self.dbc.dup(flags))
c.protocol = self.protocol
return c
def put(self, key, value, flags=0):
data = _dumps(value, self.protocol)
return self.dbc.put(key, data, flags)
def get(self, *args):
count = len(args) # a method overloading hack
method = getattr(self, 'get_%d' % count)
method(*args)
def get_1(self, flags):
rec = self.dbc.get(flags)
return self._extract(rec)
def get_2(self, key, flags):
rec = self.dbc.get(key, flags)
return self._extract(rec)
def get_3(self, key, value, flags):
data = _dumps(value, self.protocol)
rec = self.dbc.get(key, flags)
return self._extract(rec)
def current(self, flags=0): return self.get_1(flags|db.DB_CURRENT)
def first(self, flags=0): return self.get_1(flags|db.DB_FIRST)
def last(self, flags=0): return self.get_1(flags|db.DB_LAST)
def next(self, flags=0): return self.get_1(flags|db.DB_NEXT)
def prev(self, flags=0): return self.get_1(flags|db.DB_PREV)
def consume(self, flags=0): return self.get_1(flags|db.DB_CONSUME)
def next_dup(self, flags=0): return self.get_1(flags|db.DB_NEXT_DUP)
def next_nodup(self, flags=0): return self.get_1(flags|db.DB_NEXT_NODUP)
def prev_nodup(self, flags=0): return self.get_1(flags|db.DB_PREV_NODUP)
def get_both(self, key, value, flags=0):
data = _dumps(value, self.protocol)
rec = self.dbc.get_both(key, flags)
return self._extract(rec)
def set(self, key, flags=0):
rec = self.dbc.set(key, flags)
return self._extract(rec)
def set_range(self, key, flags=0):
rec = self.dbc.set_range(key, flags)
return self._extract(rec)
def set_recno(self, recno, flags=0):
rec = self.dbc.set_recno(recno, flags)
return self._extract(rec)
set_both = get_both
def _extract(self, rec):
if rec is None:
return None
else:
key, data = rec
# Safe in Python 2.x because expresion short circuit
if sys.version_info[0] < 3 or isinstance(data, bytes) :
return key, cPickle.loads(data)
else :
return key, cPickle.loads(bytes(data, "iso8859-1")) # 8 bits
#----------------------------------------------
# Methods allowed to pass-through to self.dbc
#
# close, count, delete, get_recno, join_item
#---------------------------------------------------------------------------
| bsd-3-clause |
sotdjin/glibglab | venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py | 334 | 49672 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md)
| mit |
FuzzyHobbit/letsencrypt | letsencrypt-nginx/letsencrypt_nginx/tests/configurator_test.py | 9 | 14928 | # pylint: disable=too-many-public-methods
"""Test for letsencrypt_nginx.configurator."""
import os
import shutil
import unittest
import mock
import OpenSSL
from acme import challenges
from acme import messages
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt_nginx.tests import util
class NginxConfiguratorTest(util.NginxTest):
"""Test a semi complex vhost configuration."""
def setUp(self):
super(NginxConfiguratorTest, self).setUp()
self.config = util.get_nginx_configurator(
self.config_path, self.config_dir, self.work_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
@mock.patch("letsencrypt_nginx.configurator.le_util.exe_exists")
def test_prepare_no_install(self, mock_exe_exists):
mock_exe_exists.return_value = False
self.assertRaises(
errors.NoInstallationError, self.config.prepare)
def test_prepare(self):
self.assertEquals((1, 6, 2), self.config.version)
self.assertEquals(5, len(self.config.parser.parsed))
@mock.patch("letsencrypt_nginx.configurator.socket.gethostbyaddr")
def test_get_all_names(self, mock_gethostbyaddr):
mock_gethostbyaddr.return_value = ('155.225.50.69.nephoscale.net', [], [])
names = self.config.get_all_names()
self.assertEqual(names, set(
["*.www.foo.com", "somename", "another.alias",
"alias", "localhost", ".example.com", r"~^(www\.)?(example|bar)\.",
"155.225.50.69.nephoscale.net", "*.www.example.com",
"example.*", "www.example.org", "myhost"]))
def test_supported_enhancements(self):
self.assertEqual(['redirect'], self.config.supported_enhancements())
def test_enhance(self):
self.assertRaises(
errors.PluginError, self.config.enhance, 'myhost', 'unknown_enhancement')
def test_get_chall_pref(self):
self.assertEqual([challenges.TLSSNI01],
self.config.get_chall_pref('myhost'))
def test_save(self):
filep = self.config.parser.abs_path('sites-enabled/example.com')
self.config.parser.add_server_directives(
filep, set(['.example.com', 'example.*']),
[['listen', '5001 ssl']])
self.config.save()
# pylint: disable=protected-access
parsed = self.config.parser._parse_files(filep, override=True)
self.assertEqual([[['server'], [['listen', '5001 ssl'],
['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', '.example.com'],
['server_name', 'example.*']]]],
parsed[0])
def test_choose_vhost(self):
localhost_conf = set(['localhost', r'~^(www\.)?(example|bar)\.'])
server_conf = set(['somename', 'another.alias', 'alias'])
example_conf = set(['.example.com', 'example.*'])
foo_conf = set(['*.www.foo.com', '*.www.example.com'])
results = {'localhost': localhost_conf,
'alias': server_conf,
'example.com': example_conf,
'example.com.uk.test': example_conf,
'www.example.com': example_conf,
'test.www.example.com': foo_conf,
'abc.www.foo.com': foo_conf,
'www.bar.co.uk': localhost_conf}
bad_results = ['www.foo.com', 'example', 't.www.bar.co',
'69.255.225.155']
for name in results:
self.assertEqual(results[name],
self.config.choose_vhost(name).names)
for name in bad_results:
self.assertEqual(set([name]), self.config.choose_vhost(name).names)
def test_more_info(self):
self.assertTrue('nginx.conf' in self.config.more_info())
def test_deploy_cert_stapling(self):
# Choose a version of Nginx greater than 1.3.7 so stapling code gets
# invoked.
self.config.version = (1, 9, 6)
example_conf = self.config.parser.abs_path('sites-enabled/example.com')
self.config.deploy_cert(
"www.example.com",
"example/cert.pem",
"example/key.pem",
"example/chain.pem",
"example/fullchain.pem")
self.config.save()
self.config.parser.load()
generated_conf = self.config.parser.parsed[example_conf]
self.assertTrue(util.contains_at_depth(generated_conf,
['ssl_stapling', 'on'], 2))
self.assertTrue(util.contains_at_depth(generated_conf,
['ssl_stapling_verify', 'on'], 2))
self.assertTrue(util.contains_at_depth(generated_conf,
['ssl_trusted_certificate', 'example/chain.pem'], 2))
def test_deploy_cert(self):
server_conf = self.config.parser.abs_path('server.conf')
nginx_conf = self.config.parser.abs_path('nginx.conf')
example_conf = self.config.parser.abs_path('sites-enabled/example.com')
# Choose a version of Nginx less than 1.3.7 so stapling code doesn't get
# invoked.
self.config.version = (1, 3, 1)
# Get the default SSL vhost
self.config.deploy_cert(
"www.example.com",
"example/cert.pem",
"example/key.pem",
"example/chain.pem",
"example/fullchain.pem")
self.config.deploy_cert(
"another.alias",
"/etc/nginx/cert.pem",
"/etc/nginx/key.pem",
"/etc/nginx/chain.pem",
"/etc/nginx/fullchain.pem")
self.config.save()
self.config.parser.load()
parsed_example_conf = util.filter_comments(self.config.parser.parsed[example_conf])
parsed_server_conf = util.filter_comments(self.config.parser.parsed[server_conf])
parsed_nginx_conf = util.filter_comments(self.config.parser.parsed[nginx_conf])
access_log = os.path.join(self.work_dir, "access.log")
error_log = os.path.join(self.work_dir, "error.log")
self.assertEqual([[['server'],
[['include', self.config.parser.loc["ssl_options"]],
['ssl_certificate_key', 'example/key.pem'],
['ssl_certificate', 'example/fullchain.pem'],
['error_log', error_log],
['access_log', access_log],
['listen', '5001 ssl'],
['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', '.example.com'],
['server_name', 'example.*']]]],
parsed_example_conf)
self.assertEqual([['server_name', 'somename alias another.alias']],
parsed_server_conf)
self.assertTrue(util.contains_at_depth(parsed_nginx_conf,
[['server'],
[['include', self.config.parser.loc["ssl_options"]],
['ssl_certificate_key', '/etc/nginx/key.pem'],
['ssl_certificate', '/etc/nginx/fullchain.pem'],
['error_log', error_log],
['access_log', access_log],
['listen', '5001 ssl'],
['listen', '8000'],
['listen', 'somename:8080'],
['include', 'server.conf'],
[['location', '/'],
[['root', 'html'],
['index', 'index.html index.htm']]]]],
2))
def test_get_all_certs_keys(self):
nginx_conf = self.config.parser.abs_path('nginx.conf')
example_conf = self.config.parser.abs_path('sites-enabled/example.com')
# Get the default SSL vhost
self.config.deploy_cert(
"www.example.com",
"example/cert.pem",
"example/key.pem",
"example/chain.pem",
"example/fullchain.pem")
self.config.deploy_cert(
"another.alias",
"/etc/nginx/cert.pem",
"/etc/nginx/key.pem",
"/etc/nginx/chain.pem",
"/etc/nginx/fullchain.pem")
self.config.save()
self.config.parser.load()
self.assertEqual(set([
('example/fullchain.pem', 'example/key.pem', example_conf),
('/etc/nginx/fullchain.pem', '/etc/nginx/key.pem', nginx_conf),
]), self.config.get_all_certs_keys())
@mock.patch("letsencrypt_nginx.configurator.tls_sni_01.NginxTlsSni01.perform")
@mock.patch("letsencrypt_nginx.configurator.NginxConfigurator.restart")
def test_perform(self, mock_restart, mock_perform):
# Only tests functionality specific to configurator.perform
# Note: As more challenges are offered this will have to be expanded
achall1 = achallenges.KeyAuthorizationAnnotatedChallenge(
challb=messages.ChallengeBody(
chall=challenges.TLSSNI01(token="kNdwjwOeX0I_A8DXt9Msmg"),
uri="https://ca.org/chall0_uri",
status=messages.Status("pending"),
), domain="localhost", account_key=self.rsa512jwk)
achall2 = achallenges.KeyAuthorizationAnnotatedChallenge(
challb=messages.ChallengeBody(
chall=challenges.TLSSNI01(token="m8TdO1qik4JVFtgPPurJmg"),
uri="https://ca.org/chall1_uri",
status=messages.Status("pending"),
), domain="example.com", account_key=self.rsa512jwk)
expected = [
achall1.response(self.rsa512jwk),
achall2.response(self.rsa512jwk),
]
mock_perform.return_value = expected
responses = self.config.perform([achall1, achall2])
self.assertEqual(mock_perform.call_count, 1)
self.assertEqual(responses, expected)
self.assertEqual(mock_restart.call_count, 1)
@mock.patch("letsencrypt_nginx.configurator.subprocess.Popen")
def test_get_version(self, mock_popen):
mock_popen().communicate.return_value = (
"", "\n".join(["nginx version: nginx/1.4.2",
"built by clang 6.0 (clang-600.0.56)"
" (based on LLVM 3.5svn)",
"TLS SNI support enabled",
"configure arguments: --prefix=/usr/local/Cellar/"
"nginx/1.6.2 --with-http_ssl_module"]))
self.assertEqual(self.config.get_version(), (1, 4, 2))
mock_popen().communicate.return_value = (
"", "\n".join(["nginx version: nginx/0.9",
"built by clang 6.0 (clang-600.0.56)"
" (based on LLVM 3.5svn)",
"TLS SNI support enabled",
"configure arguments: --with-http_ssl_module"]))
self.assertEqual(self.config.get_version(), (0, 9))
mock_popen().communicate.return_value = (
"", "\n".join(["blah 0.0.1",
"built by clang 6.0 (clang-600.0.56)"
" (based on LLVM 3.5svn)",
"TLS SNI support enabled",
"configure arguments: --with-http_ssl_module"]))
self.assertRaises(errors.PluginError, self.config.get_version)
mock_popen().communicate.return_value = (
"", "\n".join(["nginx version: nginx/1.4.2",
"TLS SNI support enabled"]))
self.assertRaises(errors.PluginError, self.config.get_version)
mock_popen().communicate.return_value = (
"", "\n".join(["nginx version: nginx/1.4.2",
"built by clang 6.0 (clang-600.0.56)"
" (based on LLVM 3.5svn)",
"configure arguments: --with-http_ssl_module"]))
self.assertRaises(errors.PluginError, self.config.get_version)
mock_popen().communicate.return_value = (
"", "\n".join(["nginx version: nginx/0.8.1",
"built by clang 6.0 (clang-600.0.56)"
" (based on LLVM 3.5svn)",
"TLS SNI support enabled",
"configure arguments: --with-http_ssl_module"]))
self.assertRaises(errors.NotSupportedError, self.config.get_version)
mock_popen.side_effect = OSError("Can't find program")
self.assertRaises(errors.PluginError, self.config.get_version)
@mock.patch("letsencrypt_nginx.configurator.subprocess.Popen")
def test_nginx_restart(self, mock_popen):
mocked = mock_popen()
mocked.communicate.return_value = ('', '')
mocked.returncode = 0
self.assertTrue(self.config.restart())
@mock.patch("letsencrypt_nginx.configurator.subprocess.Popen")
def test_nginx_restart_fail(self, mock_popen):
mocked = mock_popen()
mocked.communicate.return_value = ('', '')
mocked.returncode = 1
self.assertFalse(self.config.restart())
@mock.patch("letsencrypt_nginx.configurator.subprocess.Popen")
def test_no_nginx_start(self, mock_popen):
mock_popen.side_effect = OSError("Can't find program")
self.assertRaises(SystemExit, self.config.restart)
@mock.patch("letsencrypt_nginx.configurator.subprocess.Popen")
def test_config_test(self, mock_popen):
mocked = mock_popen()
mocked.communicate.return_value = ('', '')
mocked.returncode = 0
self.assertTrue(self.config.config_test())
def test_get_snakeoil_paths(self):
# pylint: disable=protected-access
cert, key = self.config._get_snakeoil_paths()
self.assertTrue(os.path.exists(cert))
self.assertTrue(os.path.exists(key))
with open(cert) as cert_file:
OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, cert_file.read())
with open(key) as key_file:
OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, key_file.read())
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
asm0dey/Flexget | tests/test_manipulate.py | 22 | 1990 | from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestManipulate(FlexGetBase):
__yaml__ = """
tasks:
test_1:
mock:
- {title: 'abc FOO'}
manipulate:
- title:
replace:
regexp: FOO
format: BAR
test_2:
mock:
- {title: '1234 abc'}
manipulate:
- title:
extract: \d+\s*(.*)
test_multiple_edits:
mock:
- {title: 'abc def'}
manipulate:
- title:
replace:
regexp: abc
format: "123"
- title:
extract: \d+\s+(.*)
test_phase:
mock:
- {title: '1234 abc'}
manipulate:
- title:
phase: metainfo
extract: \d+\s*(.*)
test_remove:
mock:
- {title: 'abc', description: 'def'}
manipulate:
- description: { remove: yes }
"""
def test_replace(self):
self.execute_task('test_1')
assert self.task.find_entry('entries', title='abc BAR'), 'replace failed'
def test_extract(self):
self.execute_task('test_2')
assert self.task.find_entry('entries', title='abc'), 'extract failed'
def test_multiple_edits(self):
self.execute_task('test_multiple_edits')
assert self.task.find_entry('entries', title='def'), 'multiple edits on 1 field failed'
def test_phase(self):
self.execute_task('test_phase')
assert self.task.find_entry('entries', title='abc'), 'extract failed at metainfo phase'
def test_remove(self):
self.execute_task('test_remove')
assert 'description' not in self.task.find_entry('entries', title='abc'), 'remove failed'
| mit |
veger/ansible | test/runner/lib/classification.py | 7 | 28287 | """Classify changes in Ansible code."""
from __future__ import absolute_import, print_function
import collections
import os
import re
import time
from lib.target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
analyze_integration_target_dependencies,
)
from lib.util import (
display,
)
from lib.import_analysis import (
get_python_module_utils_imports,
)
from lib.csharp_import_analysis import (
get_csharp_module_utils_imports,
)
from lib.powershell_import_analysis import (
get_powershell_module_utils_imports,
)
from lib.config import (
TestConfig,
IntegrationConfig,
)
from lib.metadata import (
ChangeDescription,
)
FOCUSED_TARGET = '__focused__'
def categorize_changes(args, paths, verbose_command=None):
"""
:type args: TestConfig
:type paths: list[str]
:type verbose_command: str
:rtype: ChangeDescription
"""
mapper = PathMapper(args)
commands = {
'sanity': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
focused_commands = collections.defaultdict(set)
deleted_paths = set()
original_paths = set()
additional_paths = set()
no_integration_paths = set()
for path in paths:
if not os.path.exists(path):
deleted_paths.add(path)
continue
original_paths.add(path)
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=1)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=1)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
for path in paths:
tests = mapper.classify(path)
if tests is None:
focused_target = False
display.info('%s -> all' % path, verbosity=1)
tests = all_tests(args) # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
tests = dict((key, value) for key, value in tests.items() if value)
if focused_target and not any('integration' in command for command in tests):
no_integration_paths.add(path) # path triggers no integration tests
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integration' in command for command in tests if command != verbose_command):
if focused_target:
result += ' (focused)'
result += ' (targeted)'
else:
result = '%s' % tests
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
if focused_target:
focused_commands[command].add(target)
for command in commands:
commands[command].discard('none')
if any(t == 'all' for t in commands[command]):
commands[command] = set(['all'])
commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands)
for command in commands:
if commands[command] == ['all']:
commands[command] = [] # changes require testing all targets, do not filter targets
changes = ChangeDescription()
changes.command = verbose_command
changes.changed_paths = sorted(original_paths)
changes.deleted_paths = sorted(deleted_paths)
changes.regular_command_targets = commands
changes.focused_command_targets = focused_commands
changes.no_integration_paths = sorted(no_integration_paths)
return changes
class PathMapper(object):
"""Map file paths to test commands and targets."""
def __init__(self, args):
"""
:type args: TestConfig
"""
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [t for t in self.sanity_targets if os.path.splitext(t.path)[1] == '.ps1']
self.csharp_targets = [t for t in self.sanity_targets if os.path.splitext(t.path)[1] == '.cs']
self.units_modules = set(t.module for t in self.units_targets if t.module)
self.units_paths = set(a for t in self.units_targets for a in t.aliases)
self.sanity_paths = set(t.path for t in self.sanity_targets)
self.module_names_by_path = dict((t.path, t.module) for t in self.module_targets)
self.integration_targets_by_name = dict((t.name, t) for t in self.integration_targets)
self.integration_targets_by_alias = dict((a, t) for t in self.integration_targets for a in t.aliases)
self.posix_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'posix/' in t.aliases for m in t.modules)
self.windows_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'windows/' in t.aliases for m in t.modules)
self.network_integration_by_module = dict((m, t.name) for t in self.integration_targets
if 'network/' in t.aliases for m in t.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
def get_dependent_paths(self, path):
"""
:type path: str
:rtype: list[str]
"""
ext = os.path.splitext(os.path.split(path)[1])[1]
if path.startswith('lib/ansible/module_utils/'):
if ext == '.py':
return self.get_python_module_utils_usage(path)
if ext == '.psm1':
return self.get_powershell_module_utils_usage(path)
if ext == '.cs':
return self.get_csharp_module_utils_usage(path)
if path.startswith('test/integration/targets/'):
return self.get_integration_target_usage(path)
return []
def get_python_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if path == 'lib/ansible/module_utils/__init__.py':
return []
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = os.path.splitext(path)[0].replace('/', '.')[4:]
if name.endswith('.__init__'):
name = name[:-9]
return sorted(self.python_module_utils_imports[name])
def get_powershell_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if not self.powershell_module_utils_imports:
display.info('Analyzing powershell module_utils imports...')
before = time.time()
self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
after = time.time()
display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
name = os.path.splitext(os.path.basename(path))[0]
return sorted(self.powershell_module_utils_imports[name])
def get_csharp_module_utils_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
if not self.csharp_module_utils_imports:
display.info('Analyzing C# module_utils imports...')
before = time.time()
self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
after = time.time()
display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
name = os.path.splitext(os.path.basename(path))[0]
return sorted(self.csharp_module_utils_imports[name])
def get_integration_target_usage(self, path):
"""
:type path: str
:rtype: list[str]
"""
target_name = path.split('/')[3]
dependents = [os.path.join('test/integration/targets/%s/' % target) for target in sorted(self.integration_dependencies.get(target_name, set()))]
return dependents
def classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path):
"""
:type path: str
:rtype: dict[str, str] | None
"""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('.github/'):
return minimal
if path.startswith('bin/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('contrib/'):
return {
'units': 'test/units/contrib/'
}
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if path.startswith('examples/'):
if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
return {
'windows-integration': 'connection_winrm',
}
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/executor/powershell/'):
units_path = 'test/units/executor/powershell/'
if units_path not in self.units_paths:
units_path = None
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if path.startswith('lib/ansible/modules/'):
module_name = self.module_names_by_path.get(path)
if module_name:
return {
'units': module_name if module_name in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
'network-integration': self.network_integration_by_module.get(module_name),
FOCUSED_TARGET: True,
}
return minimal
if path.startswith('lib/ansible/module_utils/'):
if ext == '.cs':
return minimal # already expanded using get_dependent_paths
if ext == '.psm1':
return minimal # already expanded using get_dependent_paths
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if path.startswith('lib/ansible/plugins/action/'):
if ext == '.py':
if name.startswith('net_'):
network_target = 'network/.*_%s' % name[4:]
if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
return {
'network-integration': network_target,
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if self.prefixes.get(name) == 'network':
network_platform = name
elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
network_platform = name[:-7]
elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
network_platform = name[:-9]
else:
network_platform = None
if network_platform:
network_target = 'network/%s/' % network_platform
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
if path.startswith('lib/ansible/plugins/connection/'):
if name == '__init__':
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': 'test/units/plugins/connection/',
}
units_path = 'test/units/plugins/connection/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
# entire integration test commands depend on these connection plugins
if name in ['winrm', 'psrp']:
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if name == 'local':
return {
'integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': self.integration_all_target,
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'units': units_path,
}
if path.startswith('lib/ansible/plugins/inventory/'):
if name == '__init__':
return all_tests(self.args) # broad impact, run all tests
# These inventory plugins are enabled by default (see INVENTORY_ENABLED).
# Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
test_all = [
'host_list',
'script',
'yaml',
'ini',
'auto',
]
if name in test_all:
posix_integration_fallback = get_integration_all_target(self.args)
else:
posix_integration_fallback = None
target = self.integration_targets_by_name.get('inventory_%s' % name)
units_path = 'test/units/plugins/inventory/test_%s.py' % name
if units_path not in self.units_paths:
units_path = None
return {
'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
'windows-integration': target.name if target and 'windows/' in target.aliases else None,
'network-integration': target.name if target and 'network/' in target.aliases else None,
'units': units_path,
FOCUSED_TARGET: target is not None,
}
if (path.startswith('lib/ansible/plugins/terminal/') or
path.startswith('lib/ansible/plugins/cliconf/') or
path.startswith('lib/ansible/plugins/netconf/')):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if path.startswith('lib/ansible/utils/module_docs_fragments/'):
return {
'sanity': 'all',
}
if path.startswith('lib/ansible/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('packaging/'):
if path.startswith('packaging/requirements/'):
if name.startswith('requirements-') and ext == '.txt':
component = name.split('-', 1)[1]
candidates = (
'cloud/%s/' % component,
)
for candidate in candidates:
if candidate in self.integration_targets_by_alias:
return {
'integration': candidate,
}
return all_tests(self.args) # broad impact, run all tests
return minimal
if path.startswith('test/cache/'):
return minimal
if path.startswith('test/results/'):
return minimal
if path.startswith('test/legacy/'):
return minimal
if path.startswith('test/integration/roles/'):
return minimal
if path.startswith('test/integration/targets/'):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name[path.split('/')[3]]
if 'hidden/' in target.aliases:
if target.type == 'role':
return minimal # already expanded using get_dependent_paths
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
FOCUSED_TARGET: True,
}
if path.startswith('test/integration/'):
if dirname == 'test/integration':
if self.prefixes.get(name) == 'network' and ext == '.yaml':
return minimal # network integration test playbooks are not used by ansible-test
if filename == 'network-all.yaml':
return minimal # network integration test playbook not used by ansible-test
if filename == 'platform_agnostic.yaml':
return minimal # network integration test playbook not used by ansible-test
for command in (
'integration',
'windows-integration',
'network-integration',
):
if name == command and ext == '.cfg':
return {
command: self.integration_all_target,
}
if name.startswith('cloud-config-'):
cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
if path.startswith('test/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/units/'):
if path in self.units_paths:
return {
'units': path,
}
if path.startswith('test/units/compat/'):
return {
'units': 'test/units/',
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if path.startswith('test/runner/completion/'):
if path == 'test/runner/completion/docker.txt':
return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
if path.startswith('test/runner/lib/cloud/'):
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/runner/lib/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/runner/requirements/'):
if name in (
'integration',
'network-integration',
'windows-integration',
):
return {
name: self.integration_all_target,
}
if name in (
'sanity',
'units',
):
return {
name: 'all',
}
if name.startswith('integration.cloud.'):
cloud_target = 'cloud/%s/' % name.split('.')[2]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/runner/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/shippable/tools/'):
return minimal # not used by tests
if path.startswith('test/utils/shippable/'):
if dirname == 'test/utils/shippable':
test_map = {
'cloud.sh': 'integration:cloud/',
'freebsd.sh': 'integration:all',
'linux.sh': 'integration:all',
'network.sh': 'network-integration:all',
'osx.sh': 'integration:all',
'rhel.sh': 'integration:all',
'sanity.sh': 'sanity:all',
'units.sh': 'units:all',
'windows.sh': 'windows-integration:all',
}
test_match = test_map.get(filename)
if test_match:
test_command, test_target = test_match.split(':')
return {
test_command: test_target,
}
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/'):
return minimal
if path == 'test/README.md':
return minimal
if path.startswith('ticket_stubs/'):
return minimal
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.gitmodules',
'.mailmap',
'tox.ini', # obsolete
'COPYING',
'VERSION',
'Makefile',
):
return minimal
if path in (
'shippable.yml',
'.coveragerc',
):
return all_tests(self.args) # test infrastructure, run all tests
if path == 'setup.py':
return all_tests(self.args) # broad impact, run all tests
if path == '.yamllint':
return {
'sanity': 'all',
}
if ext in ('.md', '.rst', '.txt', '.xml', '.in'):
return minimal
return None # unknown, will result in fall-back to run all tests
def all_tests(args, force=False):
"""
:type args: TestConfig
:type force: bool
:rtype: dict[str, str]
"""
if force:
integration_all_target = 'all'
else:
integration_all_target = get_integration_all_target(args)
return {
'sanity': 'all',
'units': 'all',
'integration': integration_all_target,
'windows-integration': integration_all_target,
'network-integration': integration_all_target,
}
def get_integration_all_target(args):
"""
:type args: TestConfig
:rtype: str
"""
if isinstance(args, IntegrationConfig):
return args.changed_all_target
return 'all'
| gpl-3.0 |
ukanga/SickRage | lib/hachoir_parser/file_system/linux_swap.py | 95 | 3777 | """
Linux swap file.
Documentation: Linux kernel source code, files:
- mm/swapfile.c
- include/linux/swap.h
Author: Victor Stinner
Creation date: 25 december 2006 (christmas ;-))
"""
from hachoir_parser import Parser
from hachoir_core.field import (ParserError, GenericVector,
UInt32, String,
Bytes, NullBytes, RawBytes)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.tools import humanFilesize
from hachoir_core.bits import str2hex
PAGE_SIZE = 4096
# Definition of MAX_SWAP_BADPAGES in Linux kernel:
# (__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)
MAX_SWAP_BADPAGES = ((PAGE_SIZE - 10) - 1536) // 4
class Page(RawBytes):
static_size = PAGE_SIZE*8
def __init__(self, parent, name):
RawBytes.__init__(self, parent, name, PAGE_SIZE)
class UUID(Bytes):
static_size = 16*8
def __init__(self, parent, name):
Bytes.__init__(self, parent, name, 16)
def createDisplay(self):
text = str2hex(self.value, format=r"%02x")
return "%s-%s-%s-%s-%s" % (
text[:8], text[8:12], text[12:16], text[16:20], text[20:])
class LinuxSwapFile(Parser):
PARSER_TAGS = {
"id": "linux_swap",
"file_ext": ("",),
"category": "file_system",
"min_size": PAGE_SIZE*8,
"description": "Linux swap file",
"magic": (
("SWAP-SPACE", (PAGE_SIZE-10)*8),
("SWAPSPACE2", (PAGE_SIZE-10)*8),
("S1SUSPEND\0", (PAGE_SIZE-10)*8),
),
}
endian = LITTLE_ENDIAN
def validate(self):
magic = self.stream.readBytes((PAGE_SIZE-10)*8, 10)
if magic not in ("SWAP-SPACE", "SWAPSPACE2", "S1SUSPEND\0"):
return "Unknown magic string"
if MAX_SWAP_BADPAGES < self["nb_badpage"].value:
return "Invalid number of bad page (%u)" % self["nb_badpage"].value
return True
def getPageCount(self):
"""
Number of pages which can really be used for swapping:
number of page minus bad pages minus one page (used for the header)
"""
# -1 because first page is used for the header
return self["last_page"].value - self["nb_badpage"].value - 1
def createDescription(self):
if self["magic"].value == "S1SUSPEND\0":
text = "Suspend swap file version 1"
elif self["magic"].value == "SWAPSPACE2":
text = "Linux swap file version 2"
else:
text = "Linux swap file version 1"
nb_page = self.getPageCount()
return "%s, page size: %s, %s pages" % (
text, humanFilesize(PAGE_SIZE), nb_page)
def createFields(self):
# First kilobyte: boot sectors
yield RawBytes(self, "boot", 1024, "Space for disklabel etc.")
# Header
yield UInt32(self, "version")
yield UInt32(self, "last_page")
yield UInt32(self, "nb_badpage")
yield UUID(self, "sws_uuid")
yield UUID(self, "sws_volume")
yield NullBytes(self, "reserved", 117*4)
# Read bad pages (if any)
count = self["nb_badpage"].value
if count:
if MAX_SWAP_BADPAGES < count:
raise ParserError("Invalid number of bad page (%u)" % count)
yield GenericVector(self, "badpages", count, UInt32, "badpage")
# Read magic
padding = self.seekByte(PAGE_SIZE - 10, "padding", null=True)
if padding:
yield padding
yield String(self, "magic", 10, charset="ASCII")
# Read all pages
yield GenericVector(self, "pages", self["last_page"].value, Page, "page")
# Padding at the end
padding = self.seekBit(self.size, "end_padding", null=True)
if padding:
yield padding
| gpl-3.0 |
ptoraskar/django | django/core/serializers/__init__.py | 347 | 8194 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.core.serializers.base import SerializerDoesNotExist
from django.utils import six
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if field.remote_field:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.remote_field.through._meta.auto_created:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| bsd-3-clause |
darmaa/odoo | addons/website/tests/test_requests.py | 9 | 4704 | # -*- coding: utf-8 -*-
import urlparse
import unittest2
import urllib2
import werkzeug.urls
import lxml.html
import openerp
from openerp import tools
import cases
__all__ = ['load_tests', 'CrawlSuite']
class RedirectHandler(urllib2.HTTPRedirectHandler):
"""
HTTPRedirectHandler is predicated upon HTTPErrorProcessor being used and
works by intercepting 3xy "errors".
Inherit from it to handle 3xy non-error responses instead, as we're not
using the error processor
"""
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if 300 <= code < 400:
return self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class CrawlSuite(unittest2.TestSuite):
""" Test suite crawling an openerp CMS instance and checking that all
internal links lead to a 200 response.
If a username and a password are provided, authenticates the user before
starting the crawl
"""
def __init__(self, user=None, password=None):
super(CrawlSuite, self).__init__()
registry = openerp.registry(tools.config['db_name'])
try:
# switch registry to test mode, so that requests can be made
registry.enter_test_mode()
self.opener = urllib2.OpenerDirector()
self.opener.add_handler(urllib2.UnknownHandler())
self.opener.add_handler(urllib2.HTTPHandler())
self.opener.add_handler(urllib2.HTTPSHandler())
self.opener.add_handler(urllib2.HTTPCookieProcessor())
self.opener.add_handler(RedirectHandler())
self._authenticate(user, password)
self.user = user
finally:
registry.leave_test_mode()
def _request(self, path):
return self.opener.open(urlparse.urlunsplit([
'http', 'localhost:%s' % tools.config['xmlrpc_port'],
path, '', ''
]))
def _authenticate(self, user, password):
# force tools.config['db_name'] in user session so opening `/` doesn't
# blow up in multidb situations
self.opener.open('http://localhost:{port}/web/?db={db}'.format(
port=tools.config['xmlrpc_port'],
db=werkzeug.urls.url_quote_plus(tools.config['db_name']),
))
if user is not None:
url = 'http://localhost:{port}/login?{query}'.format(
port=tools.config['xmlrpc_port'],
query=werkzeug.urls.url_encode({
'db': tools.config['db_name'],
'login': user,
'key': password,
})
)
auth = self.opener.open(url)
assert auth.getcode() < 400, "Auth failure %d" % auth.getcode()
def _wrapped_run(self, result, debug=False):
registry = openerp.registry(tools.config['db_name'])
try:
# switch registry to test mode, so that requests can be made
registry.enter_test_mode()
paths = [URL('/')]
seen = set(paths)
while paths:
url = paths.pop(0)
r = self._request(url.url)
url.to_case(self.user, r).run(result)
if r.info().gettype() != 'text/html':
continue
doc = lxml.html.fromstring(r.read())
for link in doc.xpath('//a[@href]'):
href = link.get('href')
# avoid repeats, even for links we won't crawl no need to
# bother splitting them if we've already ignored them
# previously
if href in seen: continue
seen.add(href)
parts = urlparse.urlsplit(href)
if parts.netloc or \
not parts.path.startswith('/') or \
parts.path == '/web' or\
parts.path.startswith('/web/') or \
(parts.scheme and parts.scheme not in ('http', 'https')):
continue
paths.append(URL(href, url.url))
finally:
registry.leave_test_mode()
class URL(object):
def __init__(self, url, source=None):
self.url = url
self.source = source
def to_case(self, user, result):
return cases.URLCase(user, self.url, self.source, result)
def load_tests(loader, base, _):
base.addTest(CrawlSuite())
base.addTest(CrawlSuite('admin', 'admin'))
base.addTest(CrawlSuite('demo', 'demo'))
return base
| agpl-3.0 |
explosion/spaCy | spacy/lang/en/lemmatizer.py | 2 | 1484 | from ...pipeline import Lemmatizer
from ...tokens import Token
class EnglishLemmatizer(Lemmatizer):
"""English lemmatizer. Only overrides is_base_form."""
def is_base_form(self, token: Token) -> bool:
"""
Check whether we're dealing with an uninflected paradigm, so we can
avoid lemmatization entirely.
univ_pos (unicode / int): The token's universal part-of-speech tag.
morphology (dict): The token's morphological features following the
Universal Dependencies scheme.
"""
univ_pos = token.pos_.lower()
morphology = token.morph.to_dict()
if univ_pos == "noun" and morphology.get("Number") == "Sing":
return True
elif univ_pos == "verb" and morphology.get("VerbForm") == "Inf":
return True
# This maps 'VBP' to base form -- probably just need 'IS_BASE'
# morphology
elif univ_pos == "verb" and (
morphology.get("VerbForm") == "Fin"
and morphology.get("Tense") == "Pres"
and morphology.get("Number") is None
):
return True
elif univ_pos == "adj" and morphology.get("Degree") == "Pos":
return True
elif morphology.get("VerbForm") == "Inf":
return True
elif morphology.get("VerbForm") == "None":
return True
elif morphology.get("Degree") == "Pos":
return True
else:
return False
| mit |
djw8605/condor | src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/Languages/it.py | 10 | 7275 | apiAttachAvailable = u'API disponibile'
apiAttachNotAvailable = u'Non disponibile'
apiAttachPendingAuthorization = u'In attesa di autorizzazione'
apiAttachRefused = u'Rifiutato'
apiAttachSuccess = u'Riuscito'
apiAttachUnknown = u'Sconosciuto'
budDeletedFriend = u"Eliminato dall'elenco amici"
budFriend = u'Amico'
budNeverBeenFriend = u'Mai stato in elenco amici'
budPendingAuthorization = u'In attesa di autorizzazione'
budUnknown = u'Sconosciuto'
cfrBlockedByRecipient = u'Chiamata bloccata dal destinatario'
cfrMiscError = u'Errori vari'
cfrNoCommonCodec = u'Nessun codec comune'
cfrNoProxyFound = u'Nessun proxy trovato'
cfrNotAuthorizedByRecipient = u'Utente corrente non autorizzato dal destinatario'
cfrRecipientNotFriend = u'Il destinatario non \xe8 un amico'
cfrRemoteDeviceError = u'Problema con la periferica audio remota'
cfrSessionTerminated = u'Sessione conclusa'
cfrSoundIOError = u'Errore I/O audio'
cfrSoundRecordingError = u'Errore di registrazione audio'
cfrUnknown = u'Sconosciuto'
cfrUserDoesNotExist = u'Utente o numero di telefono inesistente'
cfrUserIsOffline = u'Non \xe8 in linea'
chsAllCalls = u'Finestra versione'
chsDialog = u'Dialogo'
chsIncomingCalls = u'In attesa di riscontro'
chsLegacyDialog = u'Finestra versione'
chsMissedCalls = u'Dialogo'
chsMultiNeedAccept = u'In attesa di riscontro'
chsMultiSubscribed = u'Multi-iscritti'
chsOutgoingCalls = u'Multi-iscritti'
chsUnknown = u'Sconosciuto'
chsUnsubscribed = u'Disiscritto'
clsBusy = u'Occupato'
clsCancelled = u'Cancellato'
clsEarlyMedia = u'Esecuzione Early Media in corso (Playing Early Media)'
clsFailed = u'spiacente, chiamata non riuscita!'
clsFinished = u'Terminata'
clsInProgress = u'Chiamata in corso'
clsLocalHold = u'Chiamata in sospeso da utente locale'
clsMissed = u'Chiamata persa'
clsOnHold = u'Sospesa'
clsRefused = u'Rifiutato'
clsRemoteHold = u'Chiamata in sospeso da utente remoto'
clsRinging = u'in chiamata'
clsRouting = u'Routing'
clsTransferred = u'Sconosciuto'
clsTransferring = u'Sconosciuto'
clsUnknown = u'Sconosciuto'
clsUnplaced = u'Mai effettuata'
clsVoicemailBufferingGreeting = u'Buffering del saluto in corso'
clsVoicemailCancelled = u'La voicemail \xe8 stata annullata'
clsVoicemailFailed = u'Messaggio vocale non inviato'
clsVoicemailPlayingGreeting = u'Esecuzione saluto'
clsVoicemailRecording = u'Registrazione del messaggio vocale'
clsVoicemailSent = u'La voicemail \xe8 stata inviata'
clsVoicemailUploading = u'Caricamento voicemail in corso'
cltIncomingP2P = u'Chiamata Peer-to-Peer in arrivo'
cltIncomingPSTN = u'Telefonata in arrivo'
cltOutgoingP2P = u'Chiamata Peer-to-Peer in uscita'
cltOutgoingPSTN = u'Telefonata in uscita'
cltUnknown = u'Sconosciuto'
cmeAddedMembers = u'Membri aggiunti'
cmeCreatedChatWith = u'Chat creata con'
cmeEmoted = u'Sconosciuto'
cmeLeft = u'Uscito'
cmeSaid = u'Detto'
cmeSawMembers = u'Membri visti'
cmeSetTopic = u'Argomento impostato'
cmeUnknown = u'Sconosciuto'
cmsRead = u'Letto'
cmsReceived = u'Ricevuto'
cmsSending = u'Sto inviando...'
cmsSent = u'Inviato'
cmsUnknown = u'Sconosciuto'
conConnecting = u'In connessione'
conOffline = u'Non in linea'
conOnline = u'In linea'
conPausing = u'In pausa'
conUnknown = u'Sconosciuto'
cusAway = u'Torno subito'
cusDoNotDisturb = u'Occupato'
cusInvisible = u'Invisibile'
cusLoggedOut = u'Non in linea'
cusNotAvailable = u'Non disponibile'
cusOffline = u'Non in linea'
cusOnline = u'In linea'
cusSkypeMe = u'Libero per la Chat'
cusUnknown = u'Sconosciuto'
cvsBothEnabled = u'Invio e ricezione video'
cvsNone = u'Assenza video'
cvsReceiveEnabled = u'Ricezione video'
cvsSendEnabled = u'Invio video'
cvsUnknown = u''
grpAllFriends = u'Tutti gli amici'
grpAllUsers = u'Tutti gli utenti'
grpCustomGroup = u'Personalizzato'
grpOnlineFriends = u'Amici online'
grpPendingAuthorizationFriends = u'In attesa di autorizzazione'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'Utenti contattati di recente'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'Amici Skype'
grpSkypeOutFriends = u'Amici SkypeOut'
grpUngroupedFriends = u'Amici non di gruppo'
grpUnknown = u'Sconosciuto'
grpUsersAuthorizedByMe = u'Autorizzato da me'
grpUsersBlockedByMe = u'Bloccato da me'
grpUsersWaitingMyAuthorization = u'In attesa di mia autorizzazione'
leaAddDeclined = u'Rifiutato'
leaAddedNotAuthorized = u'Deve essere autorizzato'
leaAdderNotFriend = u'Deve essere un amico'
leaUnknown = u'Sconosciuto'
leaUnsubscribe = u'Disiscritto'
leaUserIncapable = u'Utente incapace'
leaUserNotFound = u'Utente non trovato'
olsAway = u'Torno subito'
olsDoNotDisturb = u'Occupato'
olsNotAvailable = u'Non disponibile'
olsOffline = u'Non in linea'
olsOnline = u'In linea'
olsSkypeMe = u'Libero per la Chat'
olsSkypeOut = u'SkypeOut...'
olsUnknown = u'Sconosciuto'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'Femmina'
usexMale = u'Maschio'
usexUnknown = u'Sconosciuto'
vmrConnectError = u'Errore di connessione'
vmrFileReadError = u'Errore lettura file'
vmrFileWriteError = u'Errore scrittura file'
vmrMiscError = u'Errori vari'
vmrNoError = u'Nessun errore'
vmrNoPrivilege = u'Nessun privilegio voicemail'
vmrNoVoicemail = u'Voicemail inesistente'
vmrPlaybackError = u'Errore di riproduzione'
vmrRecordingError = u'Errore di registrazione'
vmrUnknown = u'Sconosciuto'
vmsBlank = u'Vuota'
vmsBuffering = u'Buffering'
vmsDeleting = u'Eliminazione in corso'
vmsDownloading = u'Download in corso'
vmsFailed = u'Fallita'
vmsNotDownloaded = u'Non scaricata'
vmsPlayed = u'Riprodotta'
vmsPlaying = u'Riproduzione in corso'
vmsRecorded = u'Registrata'
vmsRecording = u'Registrazione del messaggio vocale'
vmsUnknown = u'Sconosciuto'
vmsUnplayed = u'Non riprodotta'
vmsUploaded = u'Caricata'
vmsUploading = u'Caricamento in corso'
vmtCustomGreeting = u'Saluto personalizzato'
vmtDefaultGreeting = u'Saluto predefinito'
vmtIncoming = u'Messaggio vocale in arrivo'
vmtOutgoing = u'In uscita'
vmtUnknown = u'Sconosciuto'
vssAvailable = u'Disponibile'
vssNotAvailable = u'Non disponibile'
vssPaused = u'In pausa'
vssRejected = u'Rifiutata'
vssRunning = u'In corso'
vssStarting = u'Avvio in corso'
vssStopping = u'Arresto in corso'
vssUnknown = u'Sconosciuto'
| apache-2.0 |
AndreManzano/django-rblreport | coredata/migrations/0001_initial.py | 1 | 1375 | # Generated by Django 3.0.4 on 2020-04-10 23:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=False, verbose_name='Is Active')),
('ipaddress', models.GenericIPAddressField(unique=True, verbose_name='Ip address')),
('updated_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Rbl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=False, verbose_name='Is Active')),
('name', models.CharField(max_length=100, unique=True, verbose_name='Rbl name')),
('address', models.CharField(max_length=100, unique=True, verbose_name='Rbl address')),
('link', models.CharField(max_length=200, null=True, verbose_name='Rbl link')),
('updated_date', models.DateTimeField(auto_now_add=True)),
],
),
]
| bsd-3-clause |
DavidIngraham/ardupilot | Tools/autotest/param_metadata/rstemit.py | 5 | 9829 | #!/usr/bin/env python
from __future__ import print_function
import re
from param import known_param_fields, known_units
from emit import Emit
import html
# Emit docs in a RST format
class RSTEmit(Emit):
def blurb(self):
return """This is a complete list of the parameters which can be set (e.g. via the MAVLink protocol) to control vehicle behaviour. They are stored in persistent storage on the vehicle.
This list is automatically generated from the latest ardupilot source code, and so may contain parameters which are not yet in the stable released versions of the code.
""" # noqa
def toolname(self):
return "Tools/autotest/param_metadata/param_parse.py"
def __init__(self):
Emit.__init__(self)
output_fname = 'Parameters.rst'
self.f = open(output_fname, mode='w')
self.spacer = re.compile("^", re.MULTILINE)
self.rstescape = re.compile("([^a-zA-Z0-9\n ])")
self.preamble = """.. Dynamically generated list of documented parameters
.. This page was generated using {toolname}
.. DO NOT EDIT
.. _parameters:
Complete Parameter List
=======================
{blurb}
""".format(blurb=self.escape(self.blurb()),
toolname=self.escape(self.toolname()))
self.t = ''
def escape(self, s):
ret = re.sub(self.rstescape, "\\\\\g<1>", s)
return ret
def close(self):
self.f.write(self.preamble)
self.f.write(self.t)
self.f.close()
def start_libraries(self):
pass
def tablify_row(self, rowheading, row, widths, height):
joiner = "|"
row_lines = [x.split("\n") for x in row]
for row_line in row_lines:
row_line.extend([""] * (height - len(row_line)))
if rowheading is not None:
rowheading_lines = rowheading.split("\n")
rowheading_lines.extend([""] * (height - len(rowheading_lines)))
out_lines = []
for i in range(0, height):
out_line = ""
if rowheading is not None:
rowheading_line = rowheading_lines[i]
out_line += joiner + " " + rowheading_line + " " * (widths[0] - len(rowheading_line) - 1)
joiner = "#"
j = 0
for item in row_lines:
widthnum = j
if rowheading is not None:
widthnum += 1
line = item[i]
out_line += joiner + " " + line + " " * (widths[widthnum] - len(line) - 1)
joiner = "|"
j += 1
out_line += "|"
out_lines.append(out_line)
return "\n".join(out_lines)
def tablify_longest_row_length(self, rows, rowheadings, headings):
check_width_rows = rows[:]
if headings is not None:
check_width_rows.append(headings)
longest_row_length = 0
for row in check_width_rows:
if len(row) > longest_row_length:
longest_row_length = len(row)
if rowheadings is not None:
longest_row_length += 1
return longest_row_length
def longest_line_in_string(self, string):
longest = 0
for line in string.split("\n"):
if len(line) > longest:
longest = len(line)
return longest
def tablify_calc_row_widths_heights(self, rows, rowheadings, headings):
rows_to_check = []
if headings is not None:
rows_to_check.append(headings)
rows_to_check.extend(rows[:])
heights = [0] * len(rows_to_check)
longest_row_length = self.tablify_longest_row_length(rows, rowheadings, headings)
widths = [0] * longest_row_length
all_rowheadings = []
if rowheadings is not None:
if headings is not None:
all_rowheadings.append("")
all_rowheadings.extend(rowheadings)
for rownum in range(0, len(rows_to_check)):
row = rows_to_check[rownum]
values_to_check = []
if rowheadings is not None:
values_to_check.append(all_rowheadings[rownum])
values_to_check.extend(row[:])
colnum = 0
for value in values_to_check:
height = len(value.split("\n"))
if height > heights[rownum]:
heights[rownum] = height
longest_line = self.longest_line_in_string(value)
width = longest_line + 2 # +2 for leading/trailing ws
if width > widths[colnum]:
widths[colnum] = width
colnum += 1
return (widths, heights)
def tablify(self, rows, headings=None, rowheadings=None):
(widths, heights) = self.tablify_calc_row_widths_heights(rows, rowheadings, headings)
# create dividing lines
bar = ""
heading_bar = ""
for width in widths:
bar += "+"
heading_bar += "+"
bar += "-" * width
heading_bar += "=" * width
bar += "+"
heading_bar += "+"
# create table
ret = bar + "\n"
if headings is not None:
rowheading = None
if rowheadings is not None:
rowheading = ""
ret += self.tablify_row(rowheading, headings, widths, heights[0]) + "\n"
ret += heading_bar + "\n"
for i in range(0, len(rows)):
rowheading = None
height = i
if rowheadings is not None:
rowheading = rowheadings[i]
if headings is not None:
height += 1
ret += self.tablify_row(rowheading, rows[i], widths, heights[height]) + "\n"
ret += bar + "\n"
return ret
def render_prog_values_field(self, render_info, param, field):
values = (param.__dict__[field]).split(',')
rows = []
for value in values:
v = [x.strip() for x in value.split(':')]
rows.append(v)
return self.tablify(rows, headings=render_info["headings"])
def emit(self, g):
tag = '%s Parameters' % self.escape(g.name)
reference = "parameters_" + g.name
field_table_info = {
"Values": {
"headings": ['Value', 'Meaning'],
},
"Bitmask": {
"headings": ['Bit', 'Meaning'],
},
}
ret = """
.. _{reference}:
{tag}
{underline}
""".format(tag=tag, underline="-" * len(tag),
reference=reference)
for param in g.params:
if not hasattr(param, 'DisplayName') or not hasattr(param, 'Description'):
continue
d = param.__dict__
# Get param path if defined (i.e. is duplicate parameter)
param_path = getattr(param, 'path', '')
if self.annotate_with_vehicle:
name = param.name
else:
name = param.name.split(':')[-1]
tag_param_path = ' (%s)' % param_path if param_path else ''
tag = '%s%s: %s' % (self.escape(name), self.escape(tag_param_path), self.escape(param.DisplayName),)
tag = tag.strip()
reference = param.name
# remove e.g. "ArduPlane:" from start of parameter name:
if self.annotate_with_vehicle:
reference = g.name + "_" + reference.split(":")[-1]
else:
reference = reference.split(":")[-1]
if param_path:
reference += '__' + param_path
ret += """
.. _{reference}:
{tag}
{tag_underline}
""".format(tag=tag, tag_underline='~' * len(tag), reference=reference)
if d.get('User', None) == 'Advanced':
ret += '\n| *Note: This parameter is for advanced users*'
ret += "\n\n%s\n" % self.escape(param.Description)
headings = []
row = []
for field in sorted(param.__dict__.keys()):
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
headings.append(field)
if field in field_table_info and Emit.prog_values_field.match(param.__dict__[field]):
row.append(self.render_prog_values_field(field_table_info[field], param, field))
elif field == "Range":
(param_min, param_max) = (param.__dict__[field]).split(' ')
row.append("%s - %s" % (param_min, param_max,))
elif field == 'Units':
abreviated_units = param.__dict__[field]
if abreviated_units != '':
# use the known_units dictionary to
# convert the abreviated unit into a full
# textual one:
units = known_units[abreviated_units]
row.append(html.escape(units))
else:
row.append(html.escape(param.__dict__[field]))
if len(row):
ret += "\n\n" + self.tablify([row], headings=headings) + "\n\n"
self.t += ret + "\n"
def table_test():
e = RSTEmit()
print("Test 1")
print(e.tablify([["A", "B"], ["C", "D"]]))
print("Test 2")
print(e.tablify([["A", "B"], ["CD\nE", "FG"]]))
print("Test 3")
print(e.tablify([["A", "B"], ["CD\nEF", "GH"]], rowheadings=["r1", "row2"]))
print("Test 4")
print(e.tablify([["A", "B"], ["CD\nEF", "GH"]], headings=["c1", "col2"]))
print("Test 5")
print(e.tablify([["A", "B"], ["CD\nEF", "GH"]], headings=["c1", "col2"], rowheadings=["r1", "row2"]))
if __name__ == '__main__':
table_test()
| gpl-3.0 |
eric-haibin-lin/mxnet | example/reinforcement-learning/parallel_actor_critic/model.py | 28 | 5038 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import chain
import numpy as np
import scipy.signal
import mxnet as mx
class Agent(object):
def __init__(self, input_size, act_space, config):
super(Agent, self).__init__()
self.input_size = input_size
self.num_envs = config.num_envs
self.ctx = config.ctx
self.act_space = act_space
self.config = config
# Shared network.
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(
data=net, name='fc1', num_hidden=config.hidden_size, no_bias=True)
net = mx.sym.Activation(data=net, name='relu1', act_type="relu")
# Policy network.
policy_fc = mx.sym.FullyConnected(
data=net, name='policy_fc', num_hidden=act_space, no_bias=True)
policy = mx.sym.SoftmaxActivation(data=policy_fc, name='policy')
policy = mx.sym.clip(data=policy, a_min=1e-5, a_max=1 - 1e-5)
log_policy = mx.sym.log(data=policy, name='log_policy')
out_policy = mx.sym.BlockGrad(data=policy, name='out_policy')
# Negative entropy.
neg_entropy = policy * log_policy
neg_entropy = mx.sym.MakeLoss(
data=neg_entropy, grad_scale=config.entropy_wt, name='neg_entropy')
# Value network.
value = mx.sym.FullyConnected(data=net, name='value', num_hidden=1)
self.sym = mx.sym.Group([log_policy, value, neg_entropy, out_policy])
self.model = mx.mod.Module(self.sym, data_names=('data',),
label_names=None)
self.paralell_num = config.num_envs * config.t_max
self.model.bind(
data_shapes=[('data', (self.paralell_num, input_size))],
label_shapes=None,
grad_req="write")
self.model.init_params(config.init_func)
optimizer_params = {'learning_rate': config.learning_rate,
'rescale_grad': 1.0}
if config.grad_clip:
optimizer_params['clip_gradient'] = config.clip_magnitude
self.model.init_optimizer(
kvstore='local', optimizer=config.update_rule,
optimizer_params=optimizer_params)
def act(self, ps):
us = np.random.uniform(size=ps.shape[0])[:, np.newaxis]
as_ = (np.cumsum(ps, axis=1) > us).argmax(axis=1)
return as_
def train_step(self, env_xs, env_as, env_rs, env_vs):
# NOTE(reed): Reshape to set the data shape.
self.model.reshape([('data', (len(env_xs), self.input_size))])
xs = mx.nd.array(env_xs, ctx=self.ctx)
as_ = np.array(list(chain.from_iterable(env_as)))
# Compute discounted rewards and advantages.
advs = []
gamma, lambda_ = self.config.gamma, self.config.lambda_
for i in range(len(env_vs)):
# Compute advantages using Generalized Advantage Estimation;
# see eqn. (16) of [Schulman 2016].
delta_t = (env_rs[i] + gamma*np.array(env_vs[i][1:]) -
np.array(env_vs[i][:-1]))
advs.extend(self._discount(delta_t, gamma * lambda_))
# Negative generalized advantage estimations.
neg_advs_v = -np.asarray(advs)
# NOTE(reed): Only keeping the grads for selected actions.
neg_advs_np = np.zeros((len(advs), self.act_space), dtype=np.float32)
neg_advs_np[np.arange(neg_advs_np.shape[0]), as_] = neg_advs_v
neg_advs = mx.nd.array(neg_advs_np, ctx=self.ctx)
# NOTE(reed): The grads of values is actually negative advantages.
v_grads = mx.nd.array(self.config.vf_wt * neg_advs_v[:, np.newaxis],
ctx=self.ctx)
data_batch = mx.io.DataBatch(data=[xs], label=None)
self._forward_backward(data_batch=data_batch,
out_grads=[neg_advs, v_grads])
self._update_params()
def _discount(self, x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
def _forward_backward(self, data_batch, out_grads=None):
self.model.forward(data_batch, is_train=True)
self.model.backward(out_grads=out_grads)
def _update_params(self):
self.model.update()
self.model._sync_params_from_devices()
| apache-2.0 |
spacemansteve/ADSimportpipeline | aip/classic/merger.py | 3 | 11102 | import collections
import copy
import datetime
import itertools
import os
import sys
import types
from aip.classic import enforce_schema, author_match
import adsputils as utils
_config = utils.load_config()
def mergeRecords(records):
completeRecords = []
e = enforce_schema.Enforcer() # TODO: no need to create new instances?
for r in copy.deepcopy(records):
r['text'] = Merger().mergeText(r['text'])
blocks = e.ensureList(r['metadata'])
#Multiply defined blocks need merging.
metadatablockCounter = collections.Counter([i['tempdata']['type'] for i in blocks])
needsMerging = dict([(k,[]) for k,v in metadatablockCounter.iteritems() if v>1])
completeMetadata = {}
#First pass: Add the singly defined blocks to the complete record
for b in blocks:
_type = b['tempdata']['type']
if _type not in needsMerging:
completeMetadata[_type] = b
else:
needsMerging[_type].append(b)
#Second pass: Merge the multiple defined blocks
for _type,blocks in needsMerging.iteritems():
m = Merger(blocks)
m.merge()
completeMetadata.update({
_type: m.block,
})
#Finally, we have a complete record
r['metadata'] = completeMetadata
completeRecords.append(e.finalPassEnforceSchema(r))
return completeRecords
class Merger:
def __init__(self,
blocks=None,
logger=None,
merger_rules= _config['MERGER_RULES'],
priorities = _config['PRIORITIES'],
references_always_append = _config['REFERENCES_ALWAYS_APPEND']
):
self.blocks = blocks
self.logger=logger
self.block = {}
self.altpublications = []
self.eL = enforce_schema.Enforcer().ensureList
self.merger_rules = merger_rules
self.priorities = priorities
self.references_always_append = references_always_append
if blocks:
#Assert that there is only block type being merged
assert len(set([i['tempdata']['type'] for i in blocks]))==1
self.blocktype = blocks[0]['tempdata']['type']
if not self.logger:
self.logger = utils.setup_logging('merger')
def _dispatcher(self, field):
if field not in self.merger_rules:
self.logger.error("%s not in MERGER_RULES" % field)
raise Exception("%s not in MERGER_RULES" % field)
return eval('self. '+ self.merger_rules[field])(field) #rca: eeeek!
def mergeText(self,blocks):
mergedBlock = {}
#Order matters here; we prioritize data coming from body over acknow.
fields = ['acknowledgement']
for f in fields:
mergedBlock[f] = {}
if len(blocks[f])<2:
result = blocks[f][0] if blocks[f] else {}
blocks[f] = result
else:
result = None
data = [ (i,i['tempdata']) for i in blocks[f]]
while len(data) > 0:
f1 = data.pop()
f2 = result if result else data.pop()
result = self._getBestOrigin(f1,f2,'default')
result = result[0]
mergedBlock[f]['content'] = result.get('content')
mergedBlock[f]['provider'] = result.get('provider')
mergedBlock[f]['modtime'] = result.get('modtime')
return mergedBlock
def merge(self):
fieldsHist = {}
for fieldName in [i for i in list(itertools.chain(*self.blocks)) if i != 'tempdata']:
fieldsHist[fieldName] = 0
for block in self.blocks:
if fieldName in block:
fieldsHist[fieldName] += 1
singleDefinedFields = [k for k,v in fieldsHist.iteritems() if v==1]
multipleDefinedFields = [k for k,v in fieldsHist.iteritems() if v>1]
r = {}
# First pass: construct the record from singly defined fields
for field in singleDefinedFields:
for block in self.blocks:
if block[field]:
r[field] = block[field]
# Second pass: merge the multiply defined fields
for field in multipleDefinedFields:
try:
r[field] = self._dispatcher(field)
except Exception, err:
self.logger.error('Error with merger dispatcher on %s: %s' % (field,err))
raise
self.block = r
if self.blocktype == 'general':
self.block['altpublications'] = self.altpublications
def authorMerger(self,field='authors'):
data = [ [i[field],i['tempdata']] for i in self.blocks if field in i]
result = None
while len(data) > 0:
f1 = data.pop()
f2 = result if result else data.pop()
result = self._getBestOrigin(f1,f2,'authors')
other = f2 if result == f1 else f1
#Only do the matching if at least one of the the bestOrigin authors lacks an affiliation
#AND the other author field has at least one
if not all( [i['affiliations'] for i in result[0]] ) and\
any( [i['affiliations'] for i in other[0]] ):
best_matches = author_match.match_ads_author_fields(result[0],other[0])
for match in best_matches:
if not author_match.is_suitable_match(*match):
continue
if not match[0]['affiliations'] and match[1]['affiliations']:
match[0]['affiliations'] = match[1]['affiliations']
result = [[i[0] for i in best_matches],result[1]]
return result[0]
def booleanMerger(self,field):
if any([i[field] for i in self.blocks if field in i]):
return True
return False
def referencesMerger(self,field='references'):
data = [ (i[field],i['tempdata']) for i in self.blocks if field in i]
result = None
#First pass: OriginTrust
while len(data) > 0:
f1 = data.pop()
f2 = result if result else data.pop()
result = self._getBestOrigin(f1,f2,'references')
result = list(result[0])
#Second pass: append if the origin is in REFERENCES_ALWAYS_APPEND
data = [ (i[field],i['tempdata']) for i in self.blocks if field in i]
for f in data:
if f[1]['origin'] in self.references_always_append:
for reference in f[0]:
if reference not in result:
result.append(reference)
return result
def publicationMerger(self,field):
primaries = [({
'origin': i['publication']['origin'],
'volume': i['publication']['volume'],
'issue': i['publication']['issue'],
'page': i['publication']['page'],
'page_last': i['publication']['page_last'],
'page_range': i['publication']['page_range'],
'page_count': i['publication']['page_count'],
'series': i['publication'].get('series', None),
'altbibcode': i['publication']['altbibcode'],
'electronic_id': i['publication']['electronic_id'],
'name': i['publication']['name'],
'dates': i['publication']['dates'],
},i['tempdata']) for i in self.blocks if not i['tempdata']['alternate_journal'] ]
altpublications = [{
'origin': i['publication']['origin'],
'volume': i['publication']['volume'],
'issue': i['publication']['issue'],
'page': i['publication']['page'],
'page_last': i['publication']['page_last'],
'page_range': i['publication']['page_range'],
'page_count': i['publication']['page_count'],
'series': i['publication'].get('series', None),
'altbibcode': i['publication']['altbibcode'],
'electronic_id': i['publication']['electronic_id'],
'name': i['publication']['name'],
'dates': i['publication']['dates'],
} for i in self.blocks if i['tempdata']['alternate_journal'] ]
self.altpublications = altpublications
assert len(primaries)+len(altpublications) == len(self.blocks)
if len(primaries) == 1:
return primaries[0][0]
result = None
while len(primaries) > 0:
f1 = primaries.pop()
f2 = result if result else primaries.pop()
result = self._getBestOrigin(f1,f2,'journals')
return result[0]
def takeAll(self,field):
def deDuplicated(L):
#This will still consider 'origin' in the comparison
result = []
for i in L:
if i not in result:
result.append(i)
return result
r = []
for i in [j for j in self.blocks if field in j]:
if i[field] and i[field] not in r:
r.extend(i[field])
return deDuplicated(r)
def _getBestOrigin(self,f1,f2,field):
#If one of the two fields has empty content, return the one with content
if not all([f1[0],f2[0]]) and any([f1[0],f2[0]]):
return f1 if f1[0] else f2
if field not in self.priorities:
p = self.priorities['default']
else:
p = self.priorities[field]
# pick the origin with the highest priority for each record
origins = f1[1]['origin'].split('; ')
o1 = origins.pop()
for i in origins:
o1 = i if p.get(i.upper(),0) >= p.get(o1.upper(),0) else o1
origins = f2[1]['origin'].split('; ')
o2 = origins.pop()
for i in origins:
o2 = i if p.get(i.upper(),0) >= p.get(o2.upper(),0) else o2
# if origin not defined, default to 'PUBLISHER'
P1 = p.get(o1.upper(),p.get('PUBLISHER'))
P2 = p.get(o2.upper(),p.get('PUBLISHER'))
if P1==P2:
return self.equalTrustFallback(f1,f2)
return f1 if P1 > P2 else f2
def originTrustMerger(self,field):
data = [ (i[field],i['tempdata']) for i in self.blocks if field in i]
result = None
while len(data) > 0:
f1 = data.pop()
f2 = result if result else data.pop()
result = self._getBestOrigin(f1,f2,field)
return result[0]
def equalTrustFallback(self,f1,f2):
#1. Priority
if f1[1]['primary'] and not f2[1]['primary']:
return f1
if f2[1]['primary'] and not f1[1]['primary']:
return f2
dt_1 = self.want_datetime(f1[1]['modtime'])
dt_2 = self.want_datetime(f2[1]['modtime'])
#2. Same origin, diff modtime -> latest modtime
if f1[1]['origin']==f2[1]['origin'] and dt_1 != dt_2:
return f1 if dt_1 > dt_2 else f2
#3. Length of content
if len(f1[0]) != len(f2[0]):
return f1 if len(f1[0]) > len(f2[0]) else f2
#4. latest modtime (regardless of origins)
if dt_1 != dt_2:
return f1 if dt_1 > dt_2 else f2
#5. Doesn't matter anymore. Return one of them.
return f1
def want_datetime(self,obj):
if isinstance(obj,datetime.date):
return obj
date_format = '%Y-%m-%dT%H:%M:%SZ'
obj_str = str(obj)
if obj_str[-1] != 'Z': # hack, Z seems is often missing
date_format = '%Y-%m-%dT%H:%M:%S'
elif '.' in obj_str:
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
try:
try:
return datetime.datetime.fromtimestamp(int(obj))
except ValueError:
return datetime.datetime.strptime(obj, date_format)
except Exception as e:
self.logger.warning('Error coercing {0} to a datetime. Returning datetime.now(): {1}'.format(obj,e))
return datetime.datetime.now() # Should return something that has __cmp__ defined
| gpl-3.0 |
MDAnalysis/pmda | pmda/test/test_util.py | 1 | 8518 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# PMDA
# Copyright (c) 2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
from __future__ import absolute_import
from six.moves import range, zip
import pytest
import time
import functools
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from pmda.util import timeit, make_balanced_slices, fold_second_order_moments
def test_timeit():
with timeit() as timer:
time.sleep(1)
assert_almost_equal(timer.elapsed, 1, decimal=2)
@pytest.mark.parametrize("start", (None, 0, 1, 10))
@pytest.mark.parametrize("n_frames,n_blocks,result", [
(5, 1, [slice(0, None, 1)]),
(5, 2, [slice(0, 3, 1), slice(3, None, 1)]),
(5, 3, [slice(0, 2, 1), slice(2, 4, 1), slice(4, None, 1)]),
(5, 4, [slice(0, 2, 1), slice(2, 3, 1), slice(3, 4, 1),
slice(4, None, 1)]),
(5, 5, [slice(0, 1, 1), slice(1, 2, 1), slice(2, 3, 1), slice(3, 4, 1),
slice(4, None, 1)]),
(10, 2, [slice(0, 5, 1), slice(5, None, 1)]),
(10, 3, [slice(0, 4, 1), slice(4, 7, 1), slice(7, None, 1)]),
(10, 7, [slice(0, 2, 1), slice(2, 4, 1), slice(4, 6, 1), slice(6, 7, 1),
slice(7, 8, 1), slice(8, 9, 1), slice(9, None, 1)]),
])
def test_make_balanced_slices_step1(n_frames, n_blocks, start, result, step=1):
assert step in (None, 1), "This test can only test step None or 1"
_start = start if start is not None else 0
_result = [slice(sl.start + _start,
sl.stop + _start if sl.stop is not None else None,
sl.step) for sl in result]
slices = make_balanced_slices(n_frames, n_blocks,
start=start, step=step)
assert_equal(slices, _result)
def _test_make_balanced_slices(n_blocks, start, stop, step, scale):
_start = start if start is not None else 0
traj_frames = range(scale * stop)
frames = traj_frames[start:stop:step]
n_frames = len(frames)
if n_frames >= n_blocks:
slices = make_balanced_slices(n_frames, n_blocks,
start=start, stop=stop, step=step)
assert len(slices) == n_blocks
# assemble frames again by blocks and show that we have all
# the original frames; get the sizes of the blocks
block_frames = []
block_sizes = []
for bslice in slices:
bframes = traj_frames[bslice]
block_frames.extend(list(bframes))
block_sizes.append(len(bframes))
block_sizes = np.array(block_sizes)
# check that we have all the frames accounted for
assert_equal(np.asarray(block_frames), np.asarray(frames))
# check that the distribution is balanced
assert np.all(block_sizes > 0)
minsize = n_frames // n_blocks
assert len(np.setdiff1d(block_sizes, [minsize, minsize+1])) == 0, \
"For n_blocks <= n_frames, block sizes are not balanced"
else:
with pytest.raises(ValueError, match="n_blocks must be smaller"):
slices = make_balanced_slices(n_frames, n_blocks,
start=start, stop=stop, step=step)
@pytest.mark.parametrize('n_blocks', [1, 2, 3, 4, 5, 7, 10, 11])
@pytest.mark.parametrize('start', [0, 1, 10])
@pytest.mark.parametrize('stop', [11, 100, 256])
@pytest.mark.parametrize('step', [None, 1, 2, 3, 5, 7])
@pytest.mark.parametrize('scale', [1, 2])
def test_make_balanced_slices(n_blocks, start, stop, step, scale):
return _test_make_balanced_slices(n_blocks, start, stop, step, scale)
def test_make_balanced_slices_step_gt_stop(n_blocks=2, start=None,
stop=5, step=6, scale=1):
return _test_make_balanced_slices(n_blocks, start, stop, step, scale)
@pytest.mark.parametrize('n_blocks', [1, 2])
@pytest.mark.parametrize('start', [0, 10])
@pytest.mark.parametrize('step', [None, 1, 2])
def test_make_balanced_slices_empty(n_blocks, start, step):
slices = make_balanced_slices(0, n_blocks, start=start, step=step)
assert slices == []
@pytest.mark.parametrize("n_frames,n_blocks,start,stop,step",
[(-1, 5, None, None, None), (5, 0, None, None, None),
(5, -1, None, None, None), (0, 0, None, None, None),
(-1, -1, None, None, None),
(5, 4, -1, None, None), (0, 5, -1, None, None),
(5, 0, -1, None, None),
(5, 4, None, -1, None), (5, 4, 3, 2, None),
(5, 4, None, None, -1), (5, 4, None, None, 0),
(4, 5, None, None, None)])
def test_make_balanced_slices_ValueError(n_frames, n_blocks,
start, stop, step):
with pytest.raises(ValueError):
make_balanced_slices(n_frames, n_blocks,
start=start, stop=stop, step=step)
def sumofsquares(a):
"""
Calculates the sum of squares
Parameters
----------
a : array
`t x n x m` array where `ts` is an integer (number of elements in the
partition, e.g., the number of time frames), `n` is an integer (number
of atoms in the system), and `m` is the number of dimensions (3 in this
case).
Returns
-------
sos : array
`n x m` array of the sum of squares for 'n' atoms
"""
dev = a - np.mean(a, axis=0, dtype=np.float64)
sos = np.sum(dev**2, axis=0, dtype=np.float64)
return sos
@pytest.fixture(scope="module")
def pos():
"""Generates array of random positions in range [-100, 100]"""
return 200*(np.random.random(size=(100000,
1000,
3)) - 0.5).astype(np.float64)
@pytest.mark.parametrize('n_frames', [3, 4, 10, 19, 101, 331, 1000])
@pytest.mark.parametrize('isplit',
[1, -1] +
["rand{0:03d}".format(i) for i in range(10)])
def test_second_order_moments(pos, n_frames, isplit):
pos = pos[:n_frames]
if str(isplit).startswith("rand"):
# generate random splitting point
isplit = np.random.randint(1, n_frames-1)
# split into two partitions
p1, p2 = pos[:isplit], pos[isplit:]
# create [t, mu, M] lists
S1 = [len(p1), p1.mean(axis=0), sumofsquares(p1)]
S2 = [len(p2), p2.mean(axis=0), sumofsquares(p2)]
# run lists through second_order_moments
result = fold_second_order_moments([S1, S2])
# compare result to calculations over entire pos array
assert result[0] == len(pos)
assert_almost_equal(result[1], pos.mean(axis=0))
assert_almost_equal(result[2], sumofsquares(pos))
@pytest.mark.parametrize('n_frames', [1000, 10000, 50000])
@pytest.mark.parametrize('n_blocks', [2, 3, 4, 5, 10, 100, 500])
def test_fold_second_order_moments(pos, n_frames, n_blocks):
pos = pos[:n_frames]
# all possible indices, except first and last ones
indices = np.arange(1, n_frames-1)
# (need n_blocks-1 indices "between" blocks)
# shuffle indices, take the first n_block indices, and sort
np.random.shuffle(indices)
split_indices = list(np.sort(indices[:n_blocks-1]))
# create start and stop indices for slices
start_indices = [0] + split_indices
stop_indices = split_indices + [n_frames]
# slice "trajectory" pos into random length blocks to test more than two
# cases per iteration
blocks = [pos[i:j] for i, j in zip(start_indices, stop_indices)]
S = [(len(block), block.mean(axis=0, dtype=np.float64),
sumofsquares(block)) for block in blocks]
# combine block results using fold method
results = fold_second_order_moments(S)
# compare result to calculations over entire pos array
assert results[0] == len(pos)
# check that the mean of the original pos array is equal to the collected
# mean array from reduce()
assert_almost_equal(results[1], pos.mean(axis=0))
# check that the sum of square arrays are equal
# Note: 'decimal' was changed from the default '7' to '5' because the
# absolute error for large trajectory lengths (n_frames > 1e4) is not
# almost equal to 7 decimal places
assert_almost_equal(results[2], sumofsquares(pos), decimal=5)
| gpl-2.0 |
therandomcode/WikiWriter | lib/bs4/builder/_html5lib.py | 39 | 12788 | __all__ = [
'HTML5TreeBuilder',
]
from pdb import set_trace
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import (
NamespacedAttribute,
whitespace_re,
)
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
NAME = "html5lib"
features = [NAME, PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding,
document_declared_encoding=None, exclude_encodings=None):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
# document_declared_encoding and exclude_encodings aren't used
# ATM because the html5lib TreeBuilder doesn't use
# UnicodeDammit.
if exclude_encodings:
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
# If this attribute is a multi-valued attribute for this element,
# turn its value into a list.
list_attr = HTML5TreeBuilder.cdata_list_attributes
if (name in list_attr['*']
or (self.element.name in list_attr
and name in list_attr[self.element.name])):
# A node that is being cloned may have already undergone
# this procedure.
if not isinstance(value, list):
value = whitespace_re.split(value)
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
elif self.element.next_element is not None:
# Something from further ahead in the parse tree is
# being inserted into this earlier element. This is
# very annoying because it means an expensive search
# for the last element in the tree.
most_recent_element = self.soup._last_descendant()
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
# print "MOVE", self.element.contents
# print "FROM", self.element
# print "TO", new_parent.element
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent_element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
if new_parents_last_descendant:
first_child.previous_element = new_parents_last_descendant
else:
first_child.previous_element = new_parent_element
first_child.previous_sibling = new_parents_last_child
if new_parents_last_descendant:
new_parents_last_descendant.next_element = first_child
else:
new_parent_element.next_element = first_child
if new_parents_last_child:
new_parents_last_child.next_sibling = first_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
if new_parents_last_descendant_next_element:
new_parents_last_descendant_next_element.previous_element = last_child
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
# print "DONE WITH MOVE"
# print "FROM", self.element
# print "TO", new_parent_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| apache-2.0 |
brokenjacobs/ansible | lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_facts.py | 68 | 4606 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup_facts
version_added: "2.1"
short_description: Get resource group facts.
description:
- Get facts for a specific resource group or all resource groups.
options:
name:
description:
- Limit results to a specific resource group.
required: false
default: null
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
required: false
default: null
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one resource group
azure_rm_resourcegroup_facts:
name: Testing
- name: Get facts for all resource groups
azure_rm_securitygroup_facts:
- name: Get facts by tags
azure_rm_resourcegroup_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_resourcegroups:
description: List of resource group dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing",
"location": "westus",
"name": "Testing",
"properties": {
"provisioningState": "Succeeded"
},
"tags": {
"delete": "never",
"testing": "testing"
}
}]
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'ResourceGroup'
class AzureRMResourceGroupFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_resourcegroups=[])
)
self.name = None
self.tags = None
super(AzureRMResourceGroupFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name:
self.results['ansible_facts']['azure_resourcegroups'] = self.get_item()
else:
self.results['ansible_facts']['azure_resourcegroups'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.rm_client.resource_groups.get(self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return result
def list_items(self):
self.log('List all items')
try:
response = self.rm_client.resource_groups.list()
except AzureHttpError as exc:
self.fail("Failed to list all items - {1}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMResourceGroupFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/requests/packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| apache-2.0 |
marqh/iris | lib/iris/experimental/concatenate.py | 17 | 1615 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Automatic concatenation of multiple cubes over one or more existing dimensions.
.. warning::
This functionality has now been moved to
:meth:`iris.cube.CubeList.concatenate`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
def concatenate(cubes):
"""
Concatenate the provided cubes over common existing dimensions.
.. warning::
This function is now **disabled**.
The functionality has been moved to
:meth:`iris.cube.CubeList.concatenate`.
"""
raise Exception(
'The function "iris.experimental.concatenate.concatenate" has been '
'moved, and is now a CubeList instance method.'
'\nPlease replace '
'"iris.experimental.concatenate.concatenate(<cubes>)" with '
'"iris.cube.CubeList(<cubes>).concatenate()".')
| lgpl-3.0 |
ehudmagal/robotqcapp | boto/regioninfo.py | 73 | 2441 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class RegionInfo(object):
"""
Represents an AWS Region
"""
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
def __repr__(self):
return 'RegionInfo:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'regionName':
self.name = value
elif name == 'regionEndpoint':
self.endpoint = value
else:
setattr(self, name, value)
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(region=self, **kw_params)
| bsd-3-clause |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/registry/model/codeofconduct.py | 1 | 13402 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""A module for CodeOfConduct (CoC) related classes.
https://launchpad.canonical.com/CodeOfConduct
"""
__metaclass__ = type
__all__ = ['CodeOfConduct', 'CodeOfConductSet', 'CodeOfConductConf',
'SignedCodeOfConduct', 'SignedCodeOfConductSet']
from datetime import datetime
import os
import pytz
from sqlobject import (
BoolCol,
ForeignKey,
StringCol,
)
from zope.component import getUtility
from zope.interface import implements
from lp.app.errors import NotFoundError
from lp.registry.interfaces.codeofconduct import (
ICodeOfConduct,
ICodeOfConductConf,
ICodeOfConductSet,
ISignedCodeOfConduct,
ISignedCodeOfConductSet,
)
from lp.registry.interfaces.gpg import IGPGKeySet
from lp.services.config import config
from lp.services.database.constants import UTC_NOW
from lp.services.database.datetimecol import UtcDateTimeCol
from lp.services.database.sqlbase import (
flush_database_updates,
quote,
SQLBase,
)
from lp.services.gpg.interfaces import (
GPGVerificationError,
IGPGHandler,
)
from lp.services.mail.sendmail import (
format_address,
simple_sendmail,
)
from lp.services.webapp import canonical_url
class CodeOfConductConf:
"""Abstract Component to store the current CoC configuration."""
implements(ICodeOfConductConf)
## XXX: cprov 2005-02-17
## Integrate this class with LaunchpadCentral configuration
## in the future.
path = 'lib/lp/registry/codesofconduct/'
prefix = 'Ubuntu Code of Conduct - '
currentrelease = '2.0'
# Set the datereleased to the date that 1.0 CoC was released,
# preserving everyone's Ubuntu Code of Conduct signatory status.
# https://launchpad.net/products/launchpad/+bug/48995
datereleased = datetime(2005, 4, 12, tzinfo=pytz.timezone("UTC"))
class CodeOfConduct:
"""CoC class model.
A set of properties allow us to properly handle the CoC stored
in the filesystem, so it's not a database class.
"""
implements(ICodeOfConduct)
def __init__(self, version):
self.version = version
# verify if the respective file containing the code of conduct exists
if not os.path.exists(self._filename):
# raise something sane
raise NotFoundError(version)
@property
def title(self):
"""Return preformatted title (config_prefix + version)."""
## XXX: cprov 2005-02-18
## Missed doctest, problems initing ZopeComponentLookupError.
# Recover the prefix for CoC from a Component
prefix = getUtility(ICodeOfConductConf).prefix
# Build a fancy title
return '%s' % prefix + self.version
@property
def content(self):
"""Return the content of the CoC file."""
fp = open(self._filename)
data = fp.read()
fp.close()
return data
@property
def current(self):
"""Is this the current release of the Code of Conduct?"""
return getUtility(ICodeOfConductConf).currentrelease == self.version
@property
def _filename(self):
"""Rebuild filename according to the local version."""
# Recover the path for CoC from a Component
path = getUtility(ICodeOfConductConf).path
return os.path.join(path, self.version + '.txt')
@property
def datereleased(self):
return getUtility(ICodeOfConductConf).datereleased
class CodeOfConductSet:
"""A set of CodeOfConducts."""
implements(ICodeOfConductSet)
title = 'Launchpad Codes of Conduct'
def __getitem__(self, version):
"""See ICodeOfConductSet."""
# Create an entry point for the Admin Console
# Obviously we are excluding a CoC version called 'console'
if version == 'console':
return SignedCodeOfConductSet()
# in normal conditions return the CoC Release
try:
return CodeOfConduct(version)
except NotFoundError:
return None
def __iter__(self):
"""See ICodeOfConductSet."""
releases = []
# Recover the path for CoC from a component
cocs_path = getUtility(ICodeOfConductConf).path
# iter through files and store the CoC Object
for filename in os.listdir(cocs_path):
# Select the correct filenames
if filename.endswith('.txt'):
# Extract the version from filename
version = filename.replace('.txt', '')
releases.append(CodeOfConduct(version))
# Return the available list of CoCs objects
return iter(releases)
@property
def current_code_of_conduct(self):
# XXX kiko 2006-08-01:
# What a hack, but this whole file needs cleaning up.
currentrelease = getUtility(ICodeOfConductConf).currentrelease
for code in self:
if currentrelease == code.version:
return code
raise AssertionError("No current code of conduct registered")
class SignedCodeOfConduct(SQLBase):
"""Code of Conduct."""
implements(ISignedCodeOfConduct)
_table = 'SignedCodeOfConduct'
owner = ForeignKey(foreignKey="Person", dbName="owner", notNull=True)
signedcode = StringCol(dbName='signedcode', notNull=False, default=None)
signingkey = ForeignKey(foreignKey="GPGKey", dbName="signingkey",
notNull=False, default=None)
datecreated = UtcDateTimeCol(dbName='datecreated', notNull=True,
default=UTC_NOW)
recipient = ForeignKey(foreignKey="Person", dbName="recipient",
notNull=False, default=None)
admincomment = StringCol(dbName='admincomment', notNull=False,
default=None)
active = BoolCol(dbName='active', notNull=True, default=False)
@property
def displayname(self):
"""Build a Fancy Title for CoC."""
displayname = self.datecreated.strftime('%Y-%m-%d')
if self.signingkey:
displayname += (': digitally signed by %s (%s)'
% (self.owner.displayname,
self.signingkey.displayname))
else:
displayname += (': paper submission accepted by %s'
% self.recipient.displayname)
return displayname
def sendAdvertisementEmail(self, subject, content):
"""See ISignedCodeOfConduct."""
assert self.owner.preferredemail
template = open('lib/lp/registry/emailtemplates/'
'signedcoc-acknowledge.txt').read()
fromaddress = format_address(
"Launchpad Code Of Conduct System",
config.canonical.noreply_from_address)
replacements = {'user': self.owner.displayname,
'content': content}
message = template % replacements
simple_sendmail(
fromaddress, str(self.owner.preferredemail.email),
subject, message)
class SignedCodeOfConductSet:
"""A set of CodeOfConducts"""
implements(ISignedCodeOfConductSet)
title = 'Code of Conduct Administrator Page'
def __getitem__(self, id):
"""Get a Signed CoC Entry."""
return SignedCodeOfConduct.get(id)
def __iter__(self):
"""Iterate through the Signed CoC."""
return iter(SignedCodeOfConduct.select())
def verifyAndStore(self, user, signedcode):
"""See ISignedCodeOfConductSet."""
# XXX cprov 2005-02-24:
# Are we missing the version field in SignedCoC table?
# how to figure out which CoC version is signed?
# XXX: cprov 2005-02-27:
# To be implemented:
# * Valid Person (probably always true via permission lp.AnyPerson),
# * Valid GPGKey (valid and active),
# * Person and GPGkey matches (done on DB side too),
# * CoC is the current version available, or the previous
# still-supported version in old.txt,
# * CoC was signed (correctly) by the GPGkey.
# use a utility to perform the GPG operations
gpghandler = getUtility(IGPGHandler)
try:
sane_signedcode = signedcode.encode('utf-8')
except UnicodeEncodeError:
raise TypeError('Signed Code Could not be encoded as UTF-8')
try:
sig = gpghandler.getVerifiedSignature(sane_signedcode)
except GPGVerificationError as e:
return str(e)
if not sig.fingerprint:
return ('The signature could not be verified. '
'Check that the OpenPGP key you used to sign with '
'is published correctly in the global key ring.')
gpgkeyset = getUtility(IGPGKeySet)
gpg = gpgkeyset.getByFingerprint(sig.fingerprint)
if not gpg:
return ('The key you used, which has the fingerprint <code>%s'
'</code>, is not registered in Launchpad. Please '
'<a href="%s/+editpgpkeys">follow the '
'instructions</a> and try again.'
% (sig.fingerprint, canonical_url(user)))
if gpg.owner.id != user.id:
return ('You (%s) do not seem to be the owner of this OpenPGP '
'key (<code>%s</code>).'
% (user.displayname, gpg.owner.displayname))
if not gpg.active:
return ('The OpenPGP key used (<code>%s</code>) has been '
'deactivated. '
'Please <a href="%s/+editpgpkeys">reactivate</a> it and '
'try again.'
% (gpg.displayname, canonical_url(user)))
# recover the current CoC release
coc = CodeOfConduct(getUtility(ICodeOfConductConf).currentrelease)
current = coc.content
# calculate text digest
if sig.plain_data.split() != current.split():
return ('The signed text does not match the Code of Conduct. '
'Make sure that you signed the correct text (white '
'space differences are acceptable).')
# Store the signature
signed = SignedCodeOfConduct(owner=user, signingkey=gpg,
signedcode=signedcode, active=True)
# Send Advertisement Email
subject = 'Your Code of Conduct signature has been acknowledged'
content = ('Digitally Signed by %s\n' % sig.fingerprint)
signed.sendAdvertisementEmail(subject, content)
def searchByDisplayname(self, displayname, searchfor=None):
"""See ISignedCodeOfConductSet."""
clauseTables = ['Person']
# XXX: cprov 2005-02-27:
# FTI presents problems when query by incomplete names
# and I'm not sure if the best solution here is to use
# trivial ILIKE query. Oppinion required on Review.
# glue Person and SignedCoC table
query = 'SignedCodeOfConduct.owner = Person.id'
# XXX cprov 2005-03-02:
# I'm not sure if the it is correct way to query ALL
# entries. If it is it should be part of FTI queries,
# isn't it ?
# the name shoudl work like a filter, if you don't enter anything
# you get everything.
if displayname:
query += ' AND Person.fti @@ ftq(%s)' % quote(displayname)
# Attempt to search for directive
if searchfor == 'activeonly':
query += ' AND SignedCodeOfConduct.active = true'
elif searchfor == 'inactiveonly':
query += ' AND SignedCodeOfConduct.active = false'
return SignedCodeOfConduct.select(
query, clauseTables=clauseTables,
orderBy='SignedCodeOfConduct.active')
def searchByUser(self, user_id, active=True):
"""See ISignedCodeOfConductSet."""
# XXX kiko 2006-08-14:
# What is this user_id nonsense? Use objects!
return SignedCodeOfConduct.selectBy(ownerID=user_id,
active=active)
def modifySignature(self, sign_id, recipient, admincomment, state):
"""See ISignedCodeOfConductSet."""
sign = SignedCodeOfConduct.get(sign_id)
sign.active = state
sign.admincomment = admincomment
sign.recipient = recipient.id
subject = 'Launchpad: Code Of Conduct Signature Modified'
content = ('State: %s\n'
'Comment: %s\n'
'Modified by %s'
% (state, admincomment, recipient.displayname))
sign.sendAdvertisementEmail(subject, content)
flush_database_updates()
def acknowledgeSignature(self, user, recipient):
"""See ISignedCodeOfConductSet."""
active = True
sign = SignedCodeOfConduct(owner=user, recipient=recipient,
active=active)
subject = 'Launchpad: Code Of Conduct Signature Acknowledge'
content = 'Paper Submitted acknowledge by %s' % recipient.displayname
sign.sendAdvertisementEmail(subject, content)
def getLastAcceptedDate(self):
"""See ISignedCodeOfConductSet."""
return getUtility(ICodeOfConductConf).datereleased
| agpl-3.0 |
pathletboy/rt-thread | bsp/x86/rtconfig.py | 10 | 1598 | import os
# toolchains options
ARCH='ia32'
CPU=''
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery,
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'E:/Program Files/CodeSourcery/Sourcery_CodeBench_Lite_for_IA32_ELF/bin'
elif CROSS_TOOL == 'keil':
print '================ERROR============================'
print 'Not support keil yet!'
print '================================================='
exit(0)
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'i686-elf-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mtune=generic'
CFLAGS = DEVICE + ' -Wall'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-ia32.map,-cref,-u,_start -T x86_ram.lds -nostdlib'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
| gpl-2.0 |
Pedals2Paddles/ardupilot | Tools/LogAnalyzer/tests/TestNaN.py | 34 | 1178 | from LogAnalyzer import Test,TestResult
import math
class TestNaN(Test):
'''test for NaNs present in log'''
def __init__(self):
Test.__init__(self)
self.name = "NaNs"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
nans_ok = {
"CTUN": [ "DSAlt", "TAlt" ],
"POS": [ "RelOriginAlt"],
}
for channel in logdata.channels.keys():
for field in logdata.channels[channel].keys():
if channel in nans_ok and field in nans_ok[channel]:
continue
try:
for tupe in logdata.channels[channel][field].listData:
(ts, val) = tupe
if isinstance(val, float) and math.isnan(val):
FAIL()
self.result.statusMessage += "Found NaN in %s.%s\n" % (channel, field,)
raise ValueError()
except ValueError as e:
continue
| gpl-3.0 |
nemaniarjun/coala | coalib/output/ConsoleInteraction.py | 1 | 42987 | import copy
import logging
import platform
import os
from termcolor import colored
try:
# This import has side effects and is needed to make input() behave nicely
import readline # pylint: disable=unused-import
except ImportError: # pragma: no cover
pass
from coalib.misc.DictUtilities import inverse_dicts
from coalib.misc.Exceptions import log_exception
from coalib.bearlib.spacing.SpacingHelper import SpacingHelper
from coalib.results.Result import Result
from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction
from coalib.results.result_actions.OpenEditorAction import OpenEditorAction
from coalib.results.result_actions.IgnoreResultAction import IgnoreResultAction
from coalib.results.result_actions.DoNothingAction import DoNothingAction
from coalib.results.result_actions.GeneratePatchesAction import (
GeneratePatchesAction)
from coalib.results.result_actions.ShowAppliedPatchesAction import (
ShowAppliedPatchesAction)
from coalib.results.result_actions.PrintDebugMessageAction import (
PrintDebugMessageAction)
from coalib.results.result_actions.PrintMoreInfoAction import (
PrintMoreInfoAction)
from coalib.results.result_actions.ShowPatchAction import ShowPatchAction
from coalib.results.RESULT_SEVERITY import (
RESULT_SEVERITY, RESULT_SEVERITY_COLORS)
from coalib.settings.Setting import Setting
from coala_utils.string_processing.Core import join_names
from pygments import highlight
from pygments.formatters import (TerminalTrueColorFormatter,
TerminalFormatter)
from pygments.filters import VisibleWhitespaceFilter
from pygments.lexers import TextLexer, get_lexer_for_filename
from pygments.style import Style
from pygments.token import Token
from pygments.util import ClassNotFound
class BackgroundSourceRangeStyle(Style):
styles = {
Token: 'bold bg:#BB4D3E #111'
}
class BackgroundMessageStyle(Style):
styles = {
Token: 'bold bg:#eee #111'
}
class NoColorStyle(Style):
styles = {
Token: 'noinherit'
}
def highlight_text(no_color, text, style, lexer=TextLexer()):
formatter = TerminalTrueColorFormatter(style=style)
if no_color:
formatter = TerminalTrueColorFormatter(style=NoColorStyle)
return highlight(text, lexer, formatter)[:-1]
STR_GET_VAL_FOR_SETTING = ('Please enter a value for the setting \"{}\" ({}) '
'needed by {} for section \"{}\": ')
STR_LINE_DOESNT_EXIST = ('The line belonging to the following result '
'cannot be printed because it refers to a line '
"that doesn't seem to exist in the given file.")
STR_PROJECT_WIDE = 'Project wide:'
STR_ENTER_NUMBER = 'Enter number (Ctrl-{} to exit): '.format(
'Z' if platform.system() == 'Windows' else 'D')
FILE_NAME_COLOR = 'blue'
FILE_LINES_COLOR = 'blue'
CAPABILITY_COLOR = 'green'
HIGHLIGHTED_CODE_COLOR = 'red'
SUCCESS_COLOR = 'green'
REQUIRED_SETTINGS_COLOR = 'green'
CLI_ACTIONS = (OpenEditorAction(),
ApplyPatchAction(),
PrintDebugMessageAction(),
PrintMoreInfoAction(),
ShowPatchAction(),
IgnoreResultAction(),
ShowAppliedPatchesAction(),
GeneratePatchesAction())
DIFF_EXCERPT_MAX_SIZE = 4
def color_letter(console_printer, line):
x = -1
y = -1
letter = ''
for i, l in enumerate(line, 0):
if line[i] == '(':
x = i
if line[i] == ')':
y = i
if l.isupper() and x != -1:
letter = l
first_part = line[:x+1]
second_part = line[y:]
console_printer.print(first_part, end='')
console_printer.print(letter, color='blue', end='')
console_printer.print(second_part)
def format_lines(lines, symbol='', line_nr=''):
def sym(x): return ']' if x is '[' else x
return '\n'.join('{}{:>4}{} {}'.format(symbol, line_nr, sym(symbol), line)
for line in lines.rstrip('\n').split('\n'))
def print_section_beginning(console_printer, section):
"""
Will be called after initialization current_section in
begin_section()
:param console_printer: Object to print messages on the console.
:param section: The section that will get executed now.
"""
console_printer.print('Executing section {name}...'.format(
name=section.name))
def nothing_done(log_printer=None):
"""
Will be called after processing a coafile when nothing had to be done,
i.e. no section was enabled/targeted.
:param log_printer: A LogPrinter object.
"""
logging.warning('No existent section was targeted or enabled. Nothing to '
'do.')
def acquire_actions_and_apply(console_printer,
section,
file_diff_dict,
result,
file_dict,
cli_actions=None,
apply_single=False):
"""
Acquires applicable actions and applies them.
:param console_printer: Object to print messages on the console.
:param section: Name of section to which the result belongs.
:param file_diff_dict: Dictionary containing filenames as keys and Diff
objects as values.
:param result: A derivative of Result.
:param file_dict: A dictionary containing all files with filename as
key.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param cli_actions: The list of cli actions available.
"""
cli_actions = CLI_ACTIONS if cli_actions is None else cli_actions
failed_actions = set()
applied_actions = {}
while True:
actions = []
for action in cli_actions:
if action.is_applicable(result, file_dict, file_diff_dict) is True:
actions.append(action)
if actions == []:
return
action_dict = {}
metadata_list = []
for action in actions:
metadata = action.get_metadata()
action_dict[metadata.name] = action
metadata_list.append(metadata)
# User can always choose no action which is guaranteed to succeed
if apply_single:
ask_for_action_and_apply(console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions,
apply_single=apply_single)
break
elif not ask_for_action_and_apply(console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions,
apply_single=apply_single):
break
def print_lines(console_printer,
file_dict,
sourcerange):
"""
Prints the lines between the current and the result line. If needed
they will be shortened.
:param console_printer: Object to print messages on the console.
:param file_dict: A dictionary containing all files as values with
filenames as key.
:param sourcerange: The SourceRange object referring to the related
lines to print.
"""
no_color = not console_printer.print_colored
for i in range(sourcerange.start.line, sourcerange.end.line + 1):
# Print affected file's line number in the sidebar.
console_printer.print(format_lines(lines='', line_nr=i, symbol='['),
color=FILE_LINES_COLOR,
end='')
line = file_dict[sourcerange.file][i - 1].rstrip('\n')
try:
lexer = get_lexer_for_filename(sourcerange.file)
except ClassNotFound:
lexer = TextLexer()
lexer.add_filter(VisibleWhitespaceFilter(
spaces=True, tabs=True,
tabsize=SpacingHelper.DEFAULT_TAB_WIDTH))
# highlight() combines lexer and formatter to output a ``str``
# object.
printed_chars = 0
if i == sourcerange.start.line and sourcerange.start.column:
console_printer.print(highlight_text(
no_color, line[:sourcerange.start.column - 1],
BackgroundMessageStyle, lexer), end='')
printed_chars = sourcerange.start.column - 1
if i == sourcerange.end.line and sourcerange.end.column:
console_printer.print(highlight_text(
no_color, line[printed_chars:sourcerange.end.column - 1],
BackgroundSourceRangeStyle, lexer), end='')
console_printer.print(highlight_text(
no_color, line[sourcerange.end.column - 1:],
BackgroundSourceRangeStyle, lexer), end='')
console_printer.print('')
else:
console_printer.print(highlight_text(
no_color, line[printed_chars:], BackgroundMessageStyle, lexer),
end='')
console_printer.print('')
def print_result(console_printer,
section,
file_diff_dict,
result,
file_dict,
interactive=True,
apply_single=False):
"""
Prints the result to console.
:param console_printer: Object to print messages on the console.
:param section: Name of section to which the result belongs.
:param file_diff_dict: Dictionary containing filenames as keys and Diff
objects as values.
:param result: A derivative of Result.
:param file_dict: A dictionary containing all files with filename as
key.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param interactive: Variable to check whether or not to
offer the user actions interactively.
"""
no_color = not console_printer.print_colored
if not isinstance(result, Result):
logging.warning('One of the results can not be printed since it is '
'not a valid derivative of the coala result '
'class.')
return
if hasattr(section, 'name'):
console_printer.print('**** {bear} [Section: {section} | Severity: '
'{severity}] ****'
.format(bear=result.origin,
section=section.name,
severity=RESULT_SEVERITY.__str__(
result.severity)),
color=RESULT_SEVERITY_COLORS[result.severity])
else:
console_printer.print('**** {bear} [Section {section} | Severity '
'{severity}] ****'
.format(bear=result.origin, section='<empty>',
severity=RESULT_SEVERITY.__str__(
result.severity)),
color=RESULT_SEVERITY_COLORS[result.severity])
lexer = TextLexer()
result.message = highlight_text(no_color, result.message,
BackgroundMessageStyle, lexer)
console_printer.print(format_lines(result.message, symbol='!'))
if interactive:
cli_actions = CLI_ACTIONS
show_patch_action = ShowPatchAction()
if show_patch_action.is_applicable(
result, file_dict, file_diff_dict) is True:
diff_size = sum(len(diff) for diff in result.diffs.values())
if diff_size <= DIFF_EXCERPT_MAX_SIZE:
show_patch_action.apply_from_section(result,
file_dict,
file_diff_dict,
section)
cli_actions = tuple(action for action in cli_actions
if not isinstance(action, ShowPatchAction))
else:
print_diffs_info(result.diffs, console_printer)
acquire_actions_and_apply(console_printer,
section,
file_diff_dict,
result,
file_dict,
cli_actions,
apply_single=apply_single)
def print_diffs_info(diffs, printer):
"""
Prints diffs information (number of additions and deletions) to the console.
:param diffs: List of Diff objects containing corresponding diff info.
:param printer: Object responsible for printing diffs on console.
"""
for filename, diff in sorted(diffs.items()):
additions, deletions = diff.stats()
printer.print(
format_lines('+{additions} -{deletions} in {file}'.format(
file=filename,
additions=additions,
deletions=deletions), '!'),
color='green')
def print_results_formatted(log_printer,
section,
result_list,
file_dict,
*args):
"""
Prints results through the format string from the format setting done by
user.
:param log_printer: Printer responsible for logging the messages.
:param section: The section to which the results belong.
:param result_list: List of Result objects containing the corresponding
results.
"""
default_format = ('id:{id}:origin:{origin}:file:{file}:line:{line}:'
'column:{column}:end_line:{end_line}:end_column:'
'{end_column}:severity:{severity}:severity_str:'
'{severity_str}:message:{message}')
format_str = str(section.get('format', default_format))
if format_str == 'True':
format_str = default_format
for result in result_list:
severity_str = RESULT_SEVERITY.__str__(result.severity)
format_args = vars(result)
try:
if len(result.affected_code) == 0:
format_args['affected_code'] = None
print(format_str.format(file=None,
line=None,
end_line=None,
column=None,
end_column=None,
severity_str=severity_str,
message=result.message,
**format_args))
continue
for range in result.affected_code:
format_args['affected_code'] = range
format_args['source_lines'] = range.affected_source(file_dict)
print(format_str.format(file=range.start.file,
line=range.start.line,
end_line=range.end.line,
column=range.start.column,
end_column=range.end.column,
severity_str=severity_str,
message=result.message,
**format_args))
except KeyError as exception:
log_exception(
'Unable to print the result with the given format string.',
exception)
def print_bears_formatted(bears, format=None):
format_str = format or ('name:{name}:can_detect:{can_detect}:'
'can_fix:{can_fix}:description:{description}')
print('\n\n'.join(format_str.format(name=bear.name,
can_detect=bear.CAN_DETECT,
can_fix=bear.CAN_FIX,
description=bear.get_metadata().desc)
for bear in bears))
def print_affected_files(console_printer,
log_printer,
result,
file_dict):
"""
Prints all the affected files and affected lines within them.
:param console_printer: Object to print messages on the console.
:param log_printer: Printer responsible for logging the messages.
:param result: The result to print the context for.
:param file_dict: A dictionary containing all files with filename as
key.
"""
if len(result.affected_code) == 0:
console_printer.print('\n' + STR_PROJECT_WIDE,
color=FILE_NAME_COLOR)
else:
for sourcerange in result.affected_code:
if (
sourcerange.file is not None and
sourcerange.file not in file_dict):
logging.warning('The context for the result ({}) cannot '
'be printed because it refers to a file '
"that doesn't seem to exist ({})"
'.'.format(result, sourcerange.file))
else:
print_affected_lines(console_printer,
file_dict,
sourcerange)
def print_results_no_input(log_printer,
section,
result_list,
file_dict,
file_diff_dict,
console_printer,
apply_single=False):
"""
Prints all non interactive results in a section
:param log_printer: Printer responsible for logging the messages.
:param section: The section to which the results belong to.
:param result_list: List containing the results
:param file_dict: A dictionary containing all files with filename as
key.
:param file_diff_dict: A dictionary that contains filenames as keys and
diff objects as values.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param console_printer: Object to print messages on the console.
"""
for result in result_list:
print_affected_files(console_printer,
None,
result,
file_dict)
print_result(console_printer,
section,
file_diff_dict,
result,
file_dict,
interactive=False,
apply_single=apply_single)
def print_results(log_printer,
section,
result_list,
file_dict,
file_diff_dict,
console_printer,
apply_single=False):
"""
Prints all the results in a section.
:param log_printer: Printer responsible for logging the messages.
:param section: The section to which the results belong to.
:param result_list: List containing the results
:param file_dict: A dictionary containing all files with filename as
key.
:param file_diff_dict: A dictionary that contains filenames as keys and
diff objects as values.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param console_printer: Object to print messages on the console.
"""
for result in sorted(result_list):
print_affected_files(console_printer,
None,
result,
file_dict)
print_result(console_printer,
section,
file_diff_dict,
result,
file_dict,
apply_single=apply_single)
def print_affected_lines(console_printer, file_dict, sourcerange):
"""
Prints the lines affected by the bears.
:param console_printer: Object to print messages on the console.
:param file_dict: A dictionary containing all files with filename
as key.
:param sourcerange: The SourceRange object referring to the related
lines to print.
"""
console_printer.print('\n' + os.path.relpath(sourcerange.file),
color=FILE_NAME_COLOR)
if sourcerange.start.line is not None:
if len(file_dict[sourcerange.file]) < sourcerange.end.line:
console_printer.print(format_lines(lines=STR_LINE_DOESNT_EXIST,
line_nr=sourcerange.end.line,
symbol='!'))
else:
print_lines(console_printer,
file_dict,
sourcerange)
def require_setting(setting_name, arr, section):
"""
This method is responsible for prompting a user about a missing setting and
taking its value as input from the user.
:param setting_name: Name of the setting missing
:param arr: A list containing a description in [0] and the name
of the bears who need this setting in [1] and
following.
:param section: The section the action corresponds to.
"""
needed = join_names(arr[1:])
# Don't use input, it can't deal with escapes!
print(colored(STR_GET_VAL_FOR_SETTING.format(setting_name, arr[0], needed,
section.name),
REQUIRED_SETTINGS_COLOR))
return input()
def acquire_settings(log_printer, settings_names_dict, section):
"""
This method prompts the user for the given settings.
:param log_printer:
Printer responsible for logging the messages. This is needed to comply
with the interface.
:param settings_names_dict:
A dictionary with the settings name as key and a list containing a
description in [0] and the name of the bears who need this setting in
[1] and following.
Example:
::
{"UseTabs": ["describes whether tabs should be used instead of spaces",
"SpaceConsistencyBear",
"SomeOtherBear"]}
:param section:
The section the action corresponds to.
:return:
A dictionary with the settings name as key and the given value as
value.
"""
if not isinstance(settings_names_dict, dict):
raise TypeError('The settings_names_dict parameter has to be a '
'dictionary.')
result = {}
for setting_name, arr in sorted(settings_names_dict.items(),
key=lambda x: (join_names(x[1][1:]), x[0])):
value = require_setting(setting_name, arr, section)
result.update({setting_name: value} if value is not None else {})
return result
def get_action_info(section, action, failed_actions):
"""
Gets all the required Settings for an action. It updates the section with
the Settings.
:param section: The section the action corresponds to.
:param action: The action to get the info for.
:param failed_actions: A set of all actions that have failed. A failed
action remains in the list until it is successfully
executed.
:return: Action name and the updated section.
"""
params = action.non_optional_params
for param_name in params:
if param_name not in section or action.name in failed_actions:
question = format_lines(
"Please enter a value for the parameter '{}' ({}): "
.format(param_name, params[param_name][0]), symbol='!')
section.append(Setting(param_name, input(question)))
return action.name, section
def choose_action(console_printer, actions, apply_single=False):
"""
Presents the actions available to the user and takes as input the action
the user wants to choose.
:param console_printer: Object to print messages on the console.
:param actions: Actions available to the user.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:return: Return a tuple of lists, a list with the names of
actions that needs to be applied and a list with
with the description of the actions.
"""
actions.insert(0, DoNothingAction().get_metadata())
actions_desc = []
actions_name = []
if apply_single:
for i, action in enumerate(actions, 0):
if apply_single == action.desc:
return ([action.desc], [action.name])
return (['Do (N)othing'], ['Do (N)othing'])
else:
while True:
for i, action in enumerate(actions, 0):
output = '{:>2}. {}' if i != 0 else '*{}. {}'
color_letter(console_printer, format_lines(output.format(
i, action.desc), symbol='['))
line = format_lines(STR_ENTER_NUMBER, symbol='[')
choice = input(line)
choice = str(choice)
for c in choice:
c = str(c)
actions_desc_len = len(actions_desc)
if c.isnumeric():
for i, action in enumerate(actions, 0):
c = int(c)
if i == c:
actions_desc.append(action.desc)
actions_name.append(action.name)
break
elif c.isalpha():
c = c.upper()
c = '(' + c + ')'
for i, action in enumerate(actions, 1):
if c in action.desc:
actions_desc.append(action.desc)
actions_name.append(action.name)
break
if actions_desc_len == len(actions_desc):
console_printer.print(format_lines(
'Please enter a valid letter.', symbol='['))
if not choice:
actions_desc.append(DoNothingAction().get_metadata().desc)
actions_name.append(DoNothingAction().get_metadata().name)
return (actions_desc, actions_name)
def try_to_apply_action(action_name,
chosen_action,
console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions):
"""
Try to apply the given action.
:param action_name: The name of the action.
:param choose_action: The action object that will be applied.
:param console_printer: Object to print messages on the console.
:param section: Currently active section.
:param metadata_list: Contains metadata for all the actions.
:param action_dict: Contains the action names as keys and their
references as values.
:param failed_actions: A set of all actions that have failed. A failed
action remains in the list until it is successfully
executed.
:param result: Result corresponding to the actions.
:param file_diff_dict: If it is an action which applies a patch, this
contains the diff of the patch to be applied to
the file with filename as keys.
:param applied_actions: A dictionary that contains the result, file_dict,
file_diff_dict and the section for an action.
:param file_dict: Dictionary with filename as keys and its contents
as values.
"""
try:
chosen_action.apply_from_section(result,
file_dict,
file_diff_dict,
section)
console_printer.print(
format_lines(chosen_action.SUCCESS_MESSAGE, symbol='['),
color=SUCCESS_COLOR)
applied_actions[action_name] = [copy.copy(result), copy.copy(
file_dict),
copy.copy(file_diff_dict),
copy.copy(section)]
result.set_applied_actions(applied_actions)
failed_actions.discard(action_name)
except Exception as exception: # pylint: disable=broad-except
logging.error('Failed to execute the action {} with error: {}.'
.format(action_name, exception))
failed_actions.add(action_name)
def ask_for_action_and_apply(console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions,
apply_single=False):
"""
Asks the user for an action and applies it.
:param console_printer: Object to print messages on the console.
:param section: Currently active section.
:param metadata_list: Contains metadata for all the actions.
:param action_dict: Contains the action names as keys and their
references as values.
:param failed_actions: A set of all actions that have failed. A failed
action remains in the list until it is successfully
executed.
:param result: Result corresponding to the actions.
:param file_diff_dict: If it is an action which applies a patch, this
contains the diff of the patch to be applied to
the file with filename as keys.
:param file_dict: Dictionary with filename as keys and its contents
as values.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param applied_actions: A dictionary that contains the result, file_dict,
file_diff_dict and the section for an action.
:return: Returns a boolean value. True will be returned, if
it makes sense that the user may choose to execute
another action, False otherwise.
"""
actions_desc, actions_name = choose_action(console_printer, metadata_list,
apply_single)
if apply_single:
if apply_single == 'Do (N)othing':
return False
for index, action_details in enumerate(metadata_list, 1):
if apply_single == action_details.desc:
action_name, section = get_action_info(
section, metadata_list[index - 1], failed_actions)
chosen_action = action_dict[action_details.name]
try_to_apply_action(action_name,
chosen_action,
console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions)
else:
for action_choice, action_choice_name in zip(actions_desc,
actions_name):
if action_choice == 'Do (N)othing':
return False
chosen_action = action_dict[action_choice_name]
action_choice_made = action_choice
for index, action_details in enumerate(metadata_list, 1):
if action_choice_made in action_details.desc:
action_name, section = get_action_info(
section, metadata_list[index-1], failed_actions)
try_to_apply_action(action_name,
chosen_action,
console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions)
return True
def show_enumeration(console_printer,
title,
items,
indentation,
no_items_text):
"""
This function takes as input an iterable object (preferably a list or
a dict) and prints it in a stylized format. If the iterable object is
empty, it prints a specific statement given by the user. An e.g :
<indentation>Title:
<indentation> * Item 1
<indentation> * Item 2
:param console_printer: Object to print messages on the console.
:param title: Title of the text to be printed
:param items: The iterable object.
:param indentation: Number of spaces to indent every line by.
:param no_items_text: Text printed when iterable object is empty.
"""
if not items:
console_printer.print(indentation + no_items_text)
else:
console_printer.print(indentation + title)
if isinstance(items, dict):
for key, value in items.items():
console_printer.print(indentation + ' * ' + key + ': ' +
value[0])
else:
for item in items:
console_printer.print(indentation + ' * ' + item)
console_printer.print()
def show_bear(bear,
show_description,
show_params,
console_printer):
"""
Displays all information about a bear.
:param bear: The bear to be displayed.
:param show_description: True if the main description should be shown.
:param show_params: True if the details should be shown.
:param console_printer: Object to print messages on the console.
"""
console_printer.print(bear.name, color='blue')
if not show_description and not show_params:
return
metadata = bear.get_metadata()
if show_description:
console_printer.print(
' ' + metadata.desc.replace('\n', '\n '))
console_printer.print() # Add a newline
if show_params:
show_enumeration(
console_printer, 'Supported languages:',
bear.LANGUAGES,
' ',
'The bear does not provide information about which languages '
'it can analyze.')
show_enumeration(console_printer,
'Needed Settings:',
metadata.non_optional_params,
' ',
'No needed settings.')
show_enumeration(console_printer,
'Optional Settings:',
metadata.optional_params,
' ',
'No optional settings.')
show_enumeration(console_printer,
'Can detect:',
bear.can_detect,
' ',
'This bear does not provide information about what '
'categories it can detect.')
show_enumeration(console_printer,
'Can fix:',
bear.CAN_FIX,
' ',
'This bear cannot fix issues or does not provide '
'information about what categories it can fix.')
console_printer.print(
' Path:\n' + ' ' + repr(bear.source_location) + '\n')
def print_bears(bears,
show_description,
show_params,
console_printer,
args=None):
"""
Presents all bears being used in a stylized manner.
:param bears: It's a dictionary with bears as keys and list of
sections containing those bears as values.
:param show_description: True if the main description of the bears should
be shown.
:param show_params: True if the parameters and their description
should be shown.
:param console_printer: Object to print messages on the console.
:param args: Args passed to coala command.
"""
if not bears:
console_printer.print('No bears to show. Did you forget to install '
'the `coala-bears` package? Try `pip3 install '
'coala-bears`.')
return
results = [bear for bear, _ in sorted(bears.items(),
key=lambda bear_tuple:
bear_tuple[0].name.lower())]
if args and args.json:
from coalib.output.JSONEncoder import create_json_encoder
JSONEncoder = create_json_encoder(use_relpath=args.relpath)
json_output = {'bears': results}
import json
json_formatted_output = json.dumps(json_output,
cls=JSONEncoder,
sort_keys=True,
indent=2,
separators=(',', ': '))
if args.output:
filename = args.output[0]
with open(filename, 'w') as fp:
fp.write(json_formatted_output)
else:
print(json_formatted_output)
elif args and args.format:
print_bears_formatted(results)
else:
for bear in results:
show_bear(bear,
show_description,
show_params,
console_printer)
def show_bears(local_bears,
global_bears,
show_description,
show_params,
console_printer,
args=None):
"""
Extracts all the bears from each enabled section or the sections in the
targets and passes a dictionary to the show_bears_callback method.
:param local_bears: Dictionary of local bears with section names
as keys and bear list as values.
:param global_bears: Dictionary of global bears with section
names as keys and bear list as values.
:param show_description: True if the main description of the bears should
be shown.
:param show_params: True if the parameters and their description
should be shown.
:param console_printer: Object to print messages on the console.
:param args: Args passed to coala command.
"""
bears = inverse_dicts(local_bears, global_bears)
print_bears(bears, show_description, show_params, console_printer, args)
def show_language_bears_capabilities(language_bears_capabilities,
console_printer):
"""
Displays what the bears can detect and fix.
:param language_bears_capabilities:
Dictionary with languages as keys and their bears' capabilities as
values. The capabilities are stored in a tuple of two elements where the
first one represents what the bears can detect, and the second one what
they can fix.
:param console_printer:
Object to print messages on the console.
"""
if not language_bears_capabilities:
console_printer.print('There is no bear available for this language')
else:
for language, capabilities in language_bears_capabilities.items():
if capabilities[0]:
console_printer.print(
'coala can do the following for ', end='')
console_printer.print(language.upper(), color='blue')
console_printer.print(' Can detect only: ', end='')
console_printer.print(
', '.join(sorted(capabilities[0])), color=CAPABILITY_COLOR)
if capabilities[1]:
console_printer.print(' Can fix : ', end='')
console_printer.print(
', '.join(sorted(capabilities[1])),
color=CAPABILITY_COLOR)
else:
console_printer.print('coala does not support ', color='red',
end='')
console_printer.print(language, color='blue')
| agpl-3.0 |
emersonsoftware/ansiblefork | lib/ansible/modules/cloud/openstack/os_port_facts.py | 6 | 7703 | #!/usr/bin/python
# Copyright (c) 2016 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: os_port_facts
short_description: Retrieve facts about ports within OpenStack.
version_added: "2.1"
author: "David Shrewsbury (@Shrews)"
description:
- Retrieve facts about ports from OpenStack.
notes:
- Facts are placed in the C(openstack_ports) variable.
requirements:
- "python >= 2.6"
- "shade"
options:
port:
description:
- Unique name or ID of a port.
required: false
default: null
filters:
description:
- A dictionary of meta data to use for further filtering. Elements
of this dictionary will be matched against the returned port
dictionaries. Matching is currently limited to strings within
the port dictionary, or strings within nested dictionaries.
required: false
default: null
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all ports
- os_port_facts:
cloud: mycloud
# Gather facts about a single port
- os_port_facts:
cloud: mycloud
port: 6140317d-e676-31e1-8a4a-b1913814a471
# Gather facts about all ports that have device_id set to a specific value
# and with a status of ACTIVE.
- os_port_facts:
cloud: mycloud
filters:
device_id: 1038a010-3a37-4a9d-82ea-652f1da36597
status: ACTIVE
'''
RETURN = '''
openstack_ports:
description: List of port dictionaries. A subset of the dictionary keys
listed below may be returned, depending on your cloud provider.
returned: always, but can be null
type: complex
contains:
admin_state_up:
description: The administrative state of the router, which is
up (true) or down (false).
returned: success
type: boolean
sample: true
allowed_address_pairs:
description: A set of zero or more allowed address pairs. An
address pair consists of an IP address and MAC address.
returned: success
type: list
sample: []
"binding:host_id":
description: The UUID of the host where the port is allocated.
returned: success
type: string
sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
"binding:profile":
description: A dictionary the enables the application running on
the host to pass and receive VIF port-specific
information to the plug-in.
returned: success
type: dict
sample: {}
"binding:vif_details":
description: A dictionary that enables the application to pass
information about functions that the Networking API
provides.
returned: success
type: dict
sample: {"port_filter": true}
"binding:vif_type":
description: The VIF type for the port.
returned: success
type: dict
sample: "ovs"
"binding:vnic_type":
description: The virtual network interface card (vNIC) type that is
bound to the neutron port.
returned: success
type: string
sample: "normal"
device_id:
description: The UUID of the device that uses this port.
returned: success
type: string
sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
device_owner:
description: The UUID of the entity that uses this port.
returned: success
type: string
sample: "network:router_interface"
dns_assignment:
description: DNS assignment information.
returned: success
type: list
dns_name:
description: DNS name
returned: success
type: string
sample: ""
extra_dhcp_opts:
description: A set of zero or more extra DHCP option pairs.
An option pair consists of an option value and name.
returned: success
type: list
sample: []
fixed_ips:
description: The IP addresses for the port. Includes the IP address
and UUID of the subnet.
returned: success
type: list
id:
description: The UUID of the port.
returned: success
type: string
sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
ip_address:
description: The IP address.
returned: success
type: string
sample: "127.0.0.1"
mac_address:
description: The MAC address.
returned: success
type: string
sample: "00:00:5E:00:53:42"
name:
description: The port name.
returned: success
type: string
sample: "port_name"
network_id:
description: The UUID of the attached network.
returned: success
type: string
sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d"
port_security_enabled:
description: The port security status. The status is enabled (true) or disabled (false).
returned: success
type: boolean
sample: false
security_groups:
description: The UUIDs of any attached security groups.
returned: success
type: list
status:
description: The port status.
returned: success
type: string
sample: "ACTIVE"
tenant_id:
description: The UUID of the tenant who owns the network.
returned: success
type: string
sample: "51fce036d7984ba6af4f6c849f65ef00"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
port=dict(required=False),
filters=dict(type='dict', required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
port = module.params.pop('port')
filters = module.params.pop('filters')
try:
cloud = shade.openstack_cloud(**module.params)
ports = cloud.search_ports(port, filters)
module.exit_json(changed=False, ansible_facts=dict(
openstack_ports=ports))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
cms-externals/pyqt | examples/designer/plugins/widgets/analogclock.py | 20 | 6305 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
class PyAnalogClock(QtGui.QWidget):
"""PyAnalogClock(QtGui.QWidget)
Provides an analog clock custom widget with signals, slots and properties.
The implementation is based on the Analog Clock example provided with both
Qt and PyQt.
"""
# Emitted when the clock's time changes.
timeChanged = QtCore.pyqtSignal(QtCore.QTime)
# Emitted when the clock's time zone changes.
timeZoneChanged = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super(PyAnalogClock, self).__init__(parent)
self.timeZoneOffset = 0
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update)
timer.timeout.connect(self.updateTime)
timer.start(1000)
self.setWindowTitle(QtCore.QObject.tr(self, "Analog Clock"))
self.resize(200, 200)
self.hourHand = QtGui.QPolygon([
QtCore.QPoint(7, 8),
QtCore.QPoint(-7, 8),
QtCore.QPoint(0, -40)
])
self.minuteHand = QtGui.QPolygon([
QtCore.QPoint(7, 8),
QtCore.QPoint(-7, 8),
QtCore.QPoint(0, -70)
])
self.hourColor = QtGui.QColor(0, 127, 0)
self.minuteColor = QtGui.QColor(0, 127, 127, 191)
def paintEvent(self, event):
side = min(self.width(), self.height())
time = QtCore.QTime.currentTime()
time = time.addSecs(self.timeZoneOffset * 3600)
painter = QtGui.QPainter()
painter.begin(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.translate(self.width() / 2, self.height() / 2)
painter.scale(side / 200.0, side / 200.0)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtGui.QBrush(self.hourColor))
painter.save()
painter.rotate(30.0 * ((time.hour() + time.minute() / 60.0)))
painter.drawConvexPolygon(self.hourHand)
painter.restore()
painter.setPen(self.hourColor)
for i in range(0, 12):
painter.drawLine(88, 0, 96, 0)
painter.rotate(30.0)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtGui.QBrush(self.minuteColor))
painter.save()
painter.rotate(6.0 * (time.minute() + time.second() / 60.0))
painter.drawConvexPolygon(self.minuteHand)
painter.restore()
painter.setPen(QtGui.QPen(self.minuteColor))
for j in range(0, 60):
if (j % 5) != 0:
painter.drawLine(92, 0, 96, 0)
painter.rotate(6.0)
painter.end()
def minimumSizeHint(self):
return QtCore.QSize(50, 50)
def sizeHint(self):
return QtCore.QSize(100, 100)
def updateTime(self):
self.timeChanged.emit(QtCore.QTime.currentTime())
# The timeZone property is implemented using the getTimeZone() getter
# method, the setTimeZone() setter method, and the resetTimeZone() method.
# The getter just returns the internal time zone value.
def getTimeZone(self):
return self.timeZoneOffset
# The setTimeZone() method is also defined to be a slot. The @pyqtSlot
# decorator is used to tell PyQt which argument type the method expects,
# and is especially useful when you want to define slots with the same
# name that accept different argument types.
@QtCore.pyqtSlot(int)
def setTimeZone(self, value):
self.timeZoneOffset = value
self.timeZoneChanged.emit(value)
self.update()
# Qt's property system supports properties that can be reset to their
# original values. This method enables the timeZone property to be reset.
def resetTimeZone(self):
self.timeZoneOffset = 0
self.timeZoneChanged.emit(0)
self.update()
# Qt-style properties are defined differently to Python's properties.
# To declare a property, we call pyqtProperty() to specify the type and,
# in this case, getter, setter and resetter methods.
timeZone = QtCore.pyqtProperty(int, getTimeZone, setTimeZone, resetTimeZone)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
clock = PyAnalogClock()
clock.show()
sys.exit(app.exec_())
| gpl-3.0 |
programmecat/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
why11002526/keras | examples/skipgram_word_embeddings.py | 24 | 7370 |
'''
We loop over words in a dataset, and for each word, we look at a context window around the word.
We generate pairs of (pivot_word, other_word_from_same_context) with label 1,
and pairs of (pivot_word, random_word) with label 0 (skip-gram method).
We use the layer WordContextProduct to learn embeddings for the word couples,
and compute a proximity score between the embeddings (= p(context|word)),
trained with our positive and negative labels.
We then use the weights computed by WordContextProduct to encode words
and demonstrate that the geometry of the embedding space
captures certain useful semantic properties.
Read more about skip-gram in this particularly gnomic paper by Mikolov et al.:
http://arxiv.org/pdf/1301.3781v3.pdf
Note: you should run this on GPU, otherwise training will be quite slow.
On a EC2 GPU instance, expect 3 hours per 10e6 comments (~10e8 words) per epoch with dim_proj=256.
Should be much faster on a modern GPU.
GPU command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python skipgram_word_embeddings.py
Dataset: 5,845,908 Hacker News comments.
Obtain the dataset at:
https://mega.co.nz/#F!YohlwD7R!wec0yNO86SeaNGIYQBOR0A
(HNCommentsAll.1perline.json.bz2)
'''
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import theano
from six.moves import cPickle
import os, re, json
from keras.preprocessing import sequence, text
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils, generic_utils
from keras.models import Sequential
from keras.layers.embeddings import WordContextProduct, Embedding
from six.moves import range
from six.moves import zip
max_features = 50000 # vocabulary size: top 50,000 most common words in data
skip_top = 100 # ignore top 100 most common words
nb_epoch = 1
dim_proj = 256 # embedding space dimension
save = True
load_model = False
load_tokenizer = False
train_model = True
save_dir = os.path.expanduser("~/.keras/models")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_load_fname = "HN_skipgram_model.pkl"
model_save_fname = "HN_skipgram_model.pkl"
tokenizer_fname = "HN_tokenizer.pkl"
data_path = os.path.expanduser("~/")+"HNCommentsAll.1perline.json"
# text preprocessing utils
html_tags = re.compile(r'<.*?>')
to_replace = [(''', "'")]
hex_tags = re.compile(r'&.*?;')
def clean_comment(comment):
c = str(comment.encode("utf-8"))
c = html_tags.sub(' ', c)
for tag, char in to_replace:
c = c.replace(tag, char)
c = hex_tags.sub(' ', c)
return c
def text_generator(path=data_path):
f = open(path)
for i, l in enumerate(f):
comment_data = json.loads(l)
comment_text = comment_data["comment_text"]
comment_text = clean_comment(comment_text)
if i % 10000 == 0:
print(i)
yield comment_text
f.close()
# model management
if load_tokenizer:
print('Load tokenizer...')
tokenizer = cPickle.load(open(os.path.join(save_dir, tokenizer_fname), 'rb'))
else:
print("Fit tokenizer...")
tokenizer = text.Tokenizer(nb_words=max_features)
tokenizer.fit_on_texts(text_generator())
if save:
print("Save tokenizer...")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cPickle.dump(tokenizer, open(os.path.join(save_dir, tokenizer_fname), "wb"))
# training process
if train_model:
if load_model:
print('Load model...')
model = cPickle.load(open(os.path.join(save_dir, model_load_fname), 'rb'))
else:
print('Build model...')
model = Sequential()
model.add(WordContextProduct(max_features, proj_dim=dim_proj, init="uniform"))
model.compile(loss='mse', optimizer='rmsprop')
sampling_table = sequence.make_sampling_table(max_features)
for e in range(nb_epoch):
print('-'*40)
print('Epoch', e)
print('-'*40)
progbar = generic_utils.Progbar(tokenizer.document_count)
samples_seen = 0
losses = []
for i, seq in enumerate(tokenizer.texts_to_sequences_generator(text_generator())):
# get skipgram couples for one text in the dataset
couples, labels = sequence.skipgrams(seq, max_features, window_size=4, negative_samples=1., sampling_table=sampling_table)
if couples:
# one gradient update per sentence (one sentence = a few 1000s of word couples)
X = np.array(couples, dtype="int32")
loss = model.train_on_batch(X, labels)
losses.append(loss)
if len(losses) % 100 == 0:
progbar.update(i, values=[("loss", np.mean(losses))])
losses = []
samples_seen += len(labels)
print('Samples seen:', samples_seen)
print("Training completed!")
if save:
print("Saving model...")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cPickle.dump(model, open(os.path.join(save_dir, model_save_fname), "wb"))
print("It's test time!")
# recover the embedding weights trained with skipgram:
weights = model.layers[0].get_weights()[0]
# we no longer need this
del model
weights[:skip_top] = np.zeros((skip_top, dim_proj))
norm_weights = np_utils.normalize(weights)
word_index = tokenizer.word_index
reverse_word_index = dict([(v, k) for k, v in list(word_index.items())])
word_index = tokenizer.word_index
def embed_word(w):
i = word_index.get(w)
if (not i) or (i < skip_top) or (i >= max_features):
return None
return norm_weights[i]
def closest_to_point(point, nb_closest=10):
proximities = np.dot(norm_weights, point)
tups = list(zip(list(range(len(proximities))), proximities))
tups.sort(key=lambda x: x[1], reverse=True)
return [(reverse_word_index.get(t[0]), t[1]) for t in tups[:nb_closest]]
def closest_to_word(w, nb_closest=10):
i = word_index.get(w)
if (not i) or (i < skip_top) or (i >= max_features):
return []
return closest_to_point(norm_weights[i].T, nb_closest)
''' the resuls in comments below were for:
5.8M HN comments
dim_proj = 256
nb_epoch = 2
optimizer = rmsprop
loss = mse
max_features = 50000
skip_top = 100
negative_samples = 1.
window_size = 4
and frequency subsampling of factor 10e-5.
'''
words = [
"article", # post, story, hn, read, comments
"3", # 6, 4, 5, 2
"two", # three, few, several, each
"great", # love, nice, working, looking
"data", # information, memory, database
"money", # company, pay, customers, spend
"years", # ago, year, months, hours, week, days
"android", # ios, release, os, mobile, beta
"javascript", # js, css, compiler, library, jquery, ruby
"look", # looks, looking
"business", # industry, professional, customers
"company", # companies, startup, founders, startups
"after", # before, once, until
"own", # personal, our, having
"us", # united, country, american, tech, diversity, usa, china, sv
"using", # javascript, js, tools (lol)
"here", # hn, post, comments
]
for w in words:
res = closest_to_word(w)
print('====', w)
for r in res:
print(r)
| mit |
hassoon3/odoo | addons/l10n_cr/__init__.py | 438 | 2045 | # -*- encoding: utf-8 -*-
##############################################################################
#
# __init__.py
# l10n_cr_account
# First author: Carlos Vásquez <carlos.vasquez@clearcorp.co.cr> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zasdfgbnm/tensorflow | tensorflow/python/debug/cli/base_ui.py | 89 | 7715 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Class of TensorFlow Debugger (tfdbg) Command-Line Interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
class BaseUI(object):
"""Base class of tfdbg user interface."""
CLI_PROMPT = "tfdbg> "
CLI_EXIT_COMMANDS = ["exit", "quit"]
ERROR_MESSAGE_PREFIX = "ERROR: "
INFO_MESSAGE_PREFIX = "INFO: "
def __init__(self, on_ui_exit=None, config=None):
"""Constructor of the base class.
Args:
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: An instance of `cli_config.CLIConfig()` carrying user-facing
configurations.
"""
self._on_ui_exit = on_ui_exit
self._command_handler_registry = (
debugger_cli_common.CommandHandlerRegistry())
self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()
# Create top-level tab-completion context and register the exit and help
# commands.
self._tab_completion_registry.register_tab_comp_context(
[""], self.CLI_EXIT_COMMANDS +
[debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] +
debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)
self._config = config or cli_config.CLIConfig()
self._config_argparser = argparse.ArgumentParser(
description="config command", usage=argparse.SUPPRESS)
subparsers = self._config_argparser.add_subparsers()
set_parser = subparsers.add_parser("set")
set_parser.add_argument("property_name", type=str)
set_parser.add_argument("property_value", type=str)
set_parser = subparsers.add_parser("show")
self.register_command_handler(
"config",
self._config_command_handler,
self._config_argparser.format_help(),
prefix_aliases=["cfg"])
def set_help_intro(self, help_intro):
"""Set an introductory message to the help output of the command registry.
Args:
help_intro: (RichTextLines) Rich text lines appended to the beginning of
the output of the command "help", as introductory information.
"""
self._command_handler_registry.set_help_intro(help_intro=help_intro)
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""A wrapper around CommandHandlerRegistry.register_command_handler().
In addition to calling the wrapped register_command_handler() method, this
method also registers the top-level tab-completion context based on the
command prefixes and their aliases.
See the doc string of the wrapped method for more details on the args.
Args:
prefix: (str) command prefix.
handler: (callable) command handler.
help_info: (str) help information.
prefix_aliases: (list of str) aliases of the command prefix.
"""
self._command_handler_registry.register_command_handler(
prefix, handler, help_info, prefix_aliases=prefix_aliases)
self._tab_completion_registry.extend_comp_items("", [prefix])
if prefix_aliases:
self._tab_completion_registry.extend_comp_items("", prefix_aliases)
def register_tab_comp_context(self, *args, **kwargs):
"""Wrapper around TabCompletionRegistry.register_tab_comp_context()."""
self._tab_completion_registry.register_tab_comp_context(*args, **kwargs)
def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the UI until user- or command- triggered exit.
Args:
init_command: (str) Optional command to run on CLI start up.
title: (str) Optional title to display in the CLI.
title_color: (str) Optional color of the title, e.g., "yellow".
enable_mouse_on_start: (bool) Whether the mouse mode is to be enabled on
start-up.
Returns:
An exit token of arbitrary type. Can be None.
"""
raise NotImplementedError("run_ui() is not implemented in BaseUI")
def _parse_command(self, command):
"""Parse a command string into prefix and arguments.
Args:
command: (str) Command string to be parsed.
Returns:
prefix: (str) The command prefix.
args: (list of str) The command arguments (i.e., not including the
prefix).
output_file_path: (str or None) The path to save the screen output
to (if any).
"""
command = command.strip()
if not command:
return "", [], None
command_items = command_parser.parse_command(command)
command_items, output_file_path = command_parser.extract_output_file_path(
command_items)
return command_items[0], command_items[1:], output_file_path
def _analyze_tab_complete_input(self, text):
"""Analyze raw input to tab-completer.
Args:
text: (str) the full, raw input text to be tab-completed.
Returns:
context: (str) the context str. For example,
If text == "print_tensor softmax", returns "print_tensor".
If text == "print", returns "".
If text == "", returns "".
prefix: (str) the prefix to be tab-completed, from the last word.
For example, if text == "print_tensor softmax", returns "softmax".
If text == "print", returns "print".
If text == "", returns "".
except_last_word: (str) the input text, except the last word.
For example, if text == "print_tensor softmax", returns "print_tensor".
If text == "print_tensor -a softmax", returns "print_tensor -a".
If text == "print", returns "".
If text == "", returns "".
"""
text = text.lstrip()
if not text:
# Empty (top-level) context.
context = ""
prefix = ""
except_last_word = ""
else:
items = text.split(" ")
if len(items) == 1:
# Single word: top-level context.
context = ""
prefix = items[0]
except_last_word = ""
else:
# Multiple words.
context = items[0]
prefix = items[-1]
except_last_word = " ".join(items[:-1]) + " "
return context, prefix, except_last_word
@property
def config(self):
"""Obtain the CLIConfig of this `BaseUI` instance."""
return self._config
def _config_command_handler(self, args, screen_info=None):
"""Command handler for the "config" command."""
del screen_info # Currently unused.
parsed = self._config_argparser.parse_args(args)
if hasattr(parsed, "property_name") and hasattr(parsed, "property_value"):
# set.
self._config.set(parsed.property_name, parsed.property_value)
return self._config.summarize(highlight=parsed.property_name)
else:
# show.
return self._config.summarize()
| apache-2.0 |
xerxes2/gpodder | src/gpodder/gtkui/frmntl/mafw.py | 1 | 10557 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2011 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Maemo 5 Media Player / MAFW Playback Monitor
# Send playback status information to gPodder using D-Bus
# Thomas Perl <thp@gpodder.org>; 2010-08-16 / 2010-08-17
#
# The code below is based on experimentation with MAFW and real files,
# so it might not work in the general case. It worked fine for me with
# local and streaming files (audio/video), though. Blame missing docs!
import gtk
import gobject
import dbus
import dbus.mainloop
import dbus.service
import dbus.glib
import urllib
import time
import gpodder
class gPodderPlayer(dbus.service.Object):
# Empty class with method definitions to send D-Bus signals
def __init__(self, path, name):
dbus.service.Object.__init__(self, object_path=path, bus_name=name)
# Signals for gPodder's media player integration
@dbus.service.signal(dbus_interface='org.gpodder.player', signature='us')
def PlaybackStarted(self, position, file_uri):
pass
@dbus.service.signal(dbus_interface='org.gpodder.player', signature='uuus')
def PlaybackStopped(self, start_position, end_position, total_time, \
file_uri):
pass
class MafwResumeHandler(object):
# Simple state machine for handling resume with MAFW
#
# NoResume ... No desire to resume / error state ("do nothing")
# Init ....... Want to resume, filename and position set
# Loaded ..... The correct filename has been loaded
# Seekable ... The media is seekable (metadata "is-seekable" is True)
# Playing .... The media is being played back (state_changed with 1)
# Done ....... The "resume" action has been carried out
NoResume, Init, Loaded, Seekable, Playing, Done = range(6)
def __init__(self, playback_monitor):
self.playback_monitor = playback_monitor
self.state = MafwResumeHandler.NoResume
self.filename = None
self.position = None
def set_resume_point(self, filename, position):
self.filename = filename
self.position = position
if self.position:
self.state = MafwResumeHandler.Init
else:
self.state = MafwResumeHandler.NoResume
def on_media_changed(self, filename):
if self.state == MafwResumeHandler.Init:
if filename.startswith('file://'):
filename = urllib.unquote(filename[len('file://'):])
if self.filename == filename:
self.state = MafwResumeHandler.Loaded
def on_metadata_changed(self, key, value):
if self.state == MafwResumeHandler.Loaded:
if key == 'is-seekable' and value == True:
self.state = MafwResumeHandler.Seekable
def on_state_changed(self, new_state):
if self.state == MafwResumeHandler.Seekable:
if new_state == 1:
self.state = MafwResumeHandler.Playing
self.playback_monitor.set_position(self.position)
self.state = MafwResumeHandler.Done
class MafwPlaybackMonitor(object):
MAFW_RENDERER_OBJECT = 'com.nokia.mafw.renderer.Mafw-Gst-Renderer-Plugin.gstrenderer'
MAFW_RENDERER_PATH = '/com/nokia/mafw/renderer/gstrenderer'
MAFW_RENDERER_INTERFACE = 'com.nokia.mafw.renderer'
MAFW_RENDERER_SIGNAL_MEDIA = 'media_changed'
MAFW_RENDERER_SIGNAL_STATE = 'state_changed'
MAFW_RENDERER_SIGNAL_METADATA = 'metadata_changed'
MAFW_SENDER_PATH = '/org/gpodder/maemo/mafw'
class MafwPlayState(object):
Stopped = 0
Playing = 1
Paused = 2
Transitioning = 3
def __init__(self, bus):
self.bus = bus
self._filename = None
self._is_playing = False
self._start_time = time.time()
self._start_position = 0
self._duration = 0
self._resume_handler = MafwResumeHandler(self)
self._player = gPodderPlayer(self.MAFW_SENDER_PATH, \
dbus.service.BusName(gpodder.dbus_bus_name, self.bus))
state, object_id = self.get_status()
self.on_media_changed(0, object_id)
self.on_state_changed(state)
self.bus.add_signal_receiver(self.on_media_changed, \
self.MAFW_RENDERER_SIGNAL_MEDIA, \
self.MAFW_RENDERER_INTERFACE, \
None, \
self.MAFW_RENDERER_PATH)
self.bus.add_signal_receiver(self.on_state_changed, \
self.MAFW_RENDERER_SIGNAL_STATE, \
self.MAFW_RENDERER_INTERFACE, \
None, \
self.MAFW_RENDERER_PATH)
self.bus.add_signal_receiver(self.on_metadata_changed, \
self.MAFW_RENDERER_SIGNAL_METADATA, \
self.MAFW_RENDERER_INTERFACE, \
None, \
self.MAFW_RENDERER_PATH)
# Capture requests to the renderer where the position is to be set to
# something else because we don't get normal signals in these cases
bus.add_match_string("type='method_call',destination='com.nokia.mafw.renderer.Mafw-Gst-Renderer-Plugin.gstrenderer',path='/com/nokia/mafw/renderer/gstrenderer',interface='com.nokia.mafw.renderer',member='set_position'")
bus.add_message_filter(self._message_filter)
def set_resume_point(self, filename, position):
self._resume_handler.set_resume_point(filename, position)
def _message_filter(self, bus, message):
try:
# message type 1 = dbus.lowlevel.MESSAGE_TYPE_METHOD_CALL
if message.get_path() == self.MAFW_RENDERER_PATH and \
message.get_interface() == self.MAFW_RENDERER_INTERFACE and \
message.get_destination() == self.MAFW_RENDERER_OBJECT and \
message.get_type() == 1 and \
message.get_member() == 'set_position' and \
self._is_playing:
# Fake stop-of-old / start-of-new
self.on_state_changed(-1)
self.on_state_changed(self.MafwPlayState.Playing)
finally:
return 1 # = dbus.lowlevel.HANDLER_RESULT_NOT_YET_HANDLED
def object_id_to_filename(self, object_id):
# Naive, but works for now...
if object_id.startswith('localtagfs::'):
if isinstance(object_id, unicode):
object_id = object_id.encode('utf-8')
return 'file://'+urllib.quote(urllib.unquote(object_id[object_id.index('%2F'):]))
elif object_id.startswith('urisource::'):
return object_id[len('urisource::'):]
else:
# This is pretty bad, actually (can happen with other
# sources, but should not happen for gPodder episodes)
return object_id
@property
def renderer(self):
o = self.bus.get_object(self.MAFW_RENDERER_OBJECT, \
self.MAFW_RENDERER_PATH)
return dbus.Interface(o, self.MAFW_RENDERER_INTERFACE)
def get_position(self):
return self.renderer.get_position()
def set_position(self, position):
self.renderer.set_position(0, position)
self._start_position = position
self._start_time = time.time()
return False
def get_status(self):
"""Returns playing status and updates filename"""
playlist, index, state, object_id = self.renderer.get_status()
return (state, object_id)
def on_media_changed(self, position, object_id):
# We don't know the duration for newly-loaded files at first
self._duration = 0
if self._is_playing:
# Fake stop-of-old / start-of-new
self.on_state_changed(-1) # (see below where we catch the "-1")
self._filename = self.object_id_to_filename(object_id)
self.on_state_changed(self.MafwPlayState.Playing)
else:
self._filename = self.object_id_to_filename(object_id)
self._resume_handler.on_media_changed(self._filename)
def on_state_changed(self, state):
if state == self.MafwPlayState.Playing:
self._is_playing = True
try:
self._start_position = self.get_position()
except:
# XXX: WTF?
pass
self._start_time = time.time()
self._player.PlaybackStarted(self._start_position, self._filename.decode('utf-8'))
else:
if self._is_playing:
try:
# Lame: if state is -1 (a faked "stop" event), don't try to
# get the "current" position, but use the wall time method
# from below to calculate the stop time
assert state != -1
position = self.get_position()
except:
# Happens if the assertion fails or if the position cannot
# be determined for whatever reason. Use wall time and
# assume that the media file has advanced the same amount.
position = self._start_position + (time.time()-self._start_time)
if self._start_position != position:
self._player.PlaybackStopped(self._start_position, \
position, self._duration, self._filename.decode('utf-8'))
self._is_playing = False
self._resume_handler.on_state_changed(state)
def on_metadata_changed(self, key, count, value):
if key == 'duration':
# Remember the duration of the media - right now, we don't care
# if this is for the right file, as we re-set the internally-saved
# duration when the media is changed (see on_media_changed above)
self._duration = int(value)
elif key == 'is-seekable':
self._resume_handler.on_metadata_changed(key, value)
| gpl-3.0 |
redwin/miscTools | task3.py | 1 | 1412 |
maxlen=100000
minval=-1000000000
maxval=1000000000
def solution(A):
premax= None
postmax = None
maxabs = 0
tmpabs = 0
maxA=[]
length=len(A)
#not good list
if length < 2 or length > maxlen:
return 0
#build max val list for #n element
for i,val in enumerate(A):
if val <minval or val >maxval:
return 0
if premax == None or val > premax:
premax = val
maxA.append(premax)
#print maxA
i=length-1
for val in A[::-1]:
if postmax == None or postmax < val:
postmax = val
#maxA[i] had the max val before #n
#and postmax had max val after #n
tmpabs = abs(maxA[i]-postmax)
if tmpabs > maxabs:
maxabs = tmpabs
#print i,maxabs,postmax,maxA[i]
i-=1
return maxabs
def run_test():
test_suit = (
[],
[1],
[minval],
[maxval],
[minval-1],
[maxval+1],
[1,1],
[1,2],
[1,2,3],
[-3,-2,-1,1,2,3],
[3,2,1,0,-1,-2,-3],
[3,2,1,5,-1,-2,-3],
[0,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0],
[minval,maxval],
)
for tc in test_suit:
print "test data:",tc, "solution data:",solution(tc)
if __name__=="__main__":
run_test()
| mit |
jorik041/scrapy | tests/test_downloadermiddleware_useragent.py | 15 | 2212 | from unittest import TestCase
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
from scrapy.utils.test import get_crawler
class UserAgentMiddlewareTest(TestCase):
def get_spider_and_mw(self, default_useragent):
crawler = get_crawler(Spider, {'USER_AGENT': default_useragent})
spider = crawler._create_spider('foo')
return spider, UserAgentMiddleware.from_crawler(crawler)
def test_default_agent(self):
spider, mw = self.get_spider_and_mw('default_useragent')
req = Request('http://scrapytest.org/')
assert mw.process_request(req, spider) is None
self.assertEquals(req.headers['User-Agent'], 'default_useragent')
def test_remove_agent(self):
# settings UESR_AGENT to None should remove the user agent
spider, mw = self.get_spider_and_mw('default_useragent')
spider.user_agent = None
mw.spider_opened(spider)
req = Request('http://scrapytest.org/')
assert mw.process_request(req, spider) is None
assert req.headers.get('User-Agent') is None
def test_spider_agent(self):
spider, mw = self.get_spider_and_mw('default_useragent')
spider.user_agent = 'spider_useragent'
mw.spider_opened(spider)
req = Request('http://scrapytest.org/')
assert mw.process_request(req, spider) is None
self.assertEquals(req.headers['User-Agent'], 'spider_useragent')
def test_header_agent(self):
spider, mw = self.get_spider_and_mw('default_useragent')
spider.user_agent = 'spider_useragent'
mw.spider_opened(spider)
req = Request('http://scrapytest.org/', headers={'User-Agent': 'header_useragent'})
assert mw.process_request(req, spider) is None
self.assertEquals(req.headers['User-Agent'], 'header_useragent')
def test_no_agent(self):
spider, mw = self.get_spider_and_mw(None)
spider.user_agent = None
mw.spider_opened(spider)
req = Request('http://scrapytest.org/')
assert mw.process_request(req, spider) is None
assert 'User-Agent' not in req.headers
| bsd-3-clause |
eldarion/formly | formly/tests/test_models.py | 1 | 3950 | import datetime
from django.core.exceptions import ValidationError
from django.urls import reverse
from mock import patch
from .mixins import SimpleTests
class ModelTests(SimpleTests):
def setUp(self):
self.user = self.make_user("test_user")
self.scale = self._ordinal_scale()
def test_ordinal_scale(self):
"""Ensure proper string representation"""
choices = [("last", 3), ("middle", 2), ("first", 1)]
for choice in choices:
self._ordinal_choice(label=choice[0], score=choice[1])
expected = "{} [first (1), middle (2), last (3)]".format(self.scale.name)
self.assertEqual(str(self.scale), expected)
def test_ordinal_scale_unique_label(self):
"""Ensure scale and choice label uniqueness is enforced"""
label = "label"
score = 1
self._ordinal_choice(label=label, score=score)
# duplicate label
choice = self._ordinal_choice(label=label, score=score + 1, create=False)
msg = "Ordinal choice with this Scale and Label already exists."
with self.assertRaisesMessage(ValidationError, msg):
choice.validate_unique()
def test_ordinal_scale_unique_score(self):
"""Ensure scale and choice score uniqueness is enforced"""
label = "label"
score = 1
self._ordinal_choice(label=label, score=score)
# duplicate score
choice = self._ordinal_choice(label="different", score=score, create=False)
msg = "Ordinal choice with this Scale and Score already exists."
with self.assertRaisesMessage(ValidationError, msg):
choice.validate_unique()
def test_survey_url(self):
"""Verify proper URL"""
survey = self._survey()
self.assertEqual(
survey.get_absolute_url(),
reverse("formly:survey_detail", args=[survey.pk])
)
@patch("formly.models.timezone.now")
def test_survey_save_updated(self, mock_now):
"""Verify `updated` field is updated on save"""
fake_now = datetime.datetime(2018, 2, 14)
mock_now.return_value = datetime.datetime(2018, 2, 14)
survey = self._survey()
survey.name = "updated name"
survey.save()
self.assertEqual(survey.updated, fake_now)
def test_page_unspecified_page_num(self):
"""Ensure `page_num` is set as expected"""
self.survey = self._survey()
self._page(page_num=2)
self._page(page_num=1)
page = self._page()
self.assertEqual(page.page_num, 3)
def test_page_label(self):
"""Ensure correct label"""
self.survey = self._survey()
page = self._page(page_num=5, create=False)
# no subtitle, check for default label
self.assertEqual(page.label(), "Page {}".format(page.page_num))
# set subtitle
subtitle = "subtitle"
page.subtitle = subtitle
self.assertEqual(page.label(), subtitle)
def test_page_url(self):
"""Verify proper URL"""
self.survey = self._survey()
page = self._page()
self.assertEqual(
page.get_absolute_url(),
reverse("formly:page_detail", args=[page.pk])
)
def test_field_move_up(self):
self.survey = self._survey()
page1 = self._page()
field1 = self._field(page=page1, ordinal=1)
field2 = self._field(page=page1, ordinal=2)
field2.move_up()
field1.refresh_from_db()
field2.refresh_from_db()
self.assertTrue(field2.ordinal < field1.ordinal)
def test_field_move_down(self):
self.survey = self._survey()
page1 = self._page()
field1 = self._field(page=page1, ordinal=1)
field2 = self._field(page=page1, ordinal=2)
field1.move_down()
field1.refresh_from_db()
field2.refresh_from_db()
self.assertTrue(field2.ordinal < field1.ordinal)
| bsd-3-clause |
jelugbo/hebs_master | cms/djangoapps/contentstore/views/access.py | 15 | 1780 | """ Helper methods for determining user access permissions in Studio """
from student.roles import CourseStaffRole, GlobalStaff, CourseInstructorRole, OrgStaffRole, OrgInstructorRole
from student import auth
def has_course_access(user, course_key, role=CourseStaffRole):
"""
Return True if user allowed to access this course_id
Note that the CMS permissions model is with respect to courses
There is a super-admin permissions if user.is_staff is set
Also, since we're unifying the user database between LMS and CAS,
I'm presuming that the course instructor (formally known as admin)
will not be in both INSTRUCTOR and STAFF groups, so we have to cascade our
queries here as INSTRUCTOR has all the rights that STAFF do
"""
if GlobalStaff().has_user(user):
return True
if OrgInstructorRole(org=course_key.org).has_user(user):
return True
if OrgStaffRole(org=course_key.org).has_user(user):
return True
# temporary to ensure we give universal access given a course until we impl branch specific perms
return auth.has_access(user, role(course_key.for_branch(None)))
def get_user_role(user, course_id):
"""
What type of access: staff or instructor does this user have in Studio?
No code should use this for access control, only to quickly serialize the type of access
where this code knows that Instructor trumps Staff and assumes the user has one or the other.
This will not return student role because its purpose for using in Studio.
:param course_id: the course_id of the course we're interested in
"""
# afaik, this is only used in lti
if auth.has_access(user, CourseInstructorRole(course_id)):
return 'instructor'
else:
return 'staff'
| agpl-3.0 |
sloanyang/gyp | test/actions/gyptest-all.py | 243 | 3677 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using an explicit build target of 'all'.
"""
import glob
import os
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_all')
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Some gyp files use an action that mentions an output but never
# writes it as a means to making the action run on every build. That
# doesn't mesh well with ninja's semantics. TODO(evan): figure out
# how to work always-run actions in to ninja.
# Android also can't do this as it doesn't have order-only dependencies.
if test.format in ['ninja', 'android']:
test.build('actions.gyp', test.ALL, chdir='relocate/src')
else:
# Test that an "always run" action increases a counter on multiple
# invocations, and that a dependent action updates in step.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
# The "always run" action only counts to 2, but the dependent target
# will count forever if it's allowed to run. This verifies that the
# dependent target only runs when the "always run" action generates
# new output, not just because the "always run" ran.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
expect = "Hello from generate_main.py\n"
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('null_input', chdir=chdir, stdout=expect)
# Clean out files which may have been created if test.ALL was run.
def clean_dep_files():
for file in (glob.glob('relocate/src/dep_*.txt') +
glob.glob('relocate/src/deps_all_done_*.txt')):
if os.path.exists(file):
os.remove(file)
# Confirm our clean.
clean_dep_files()
test.must_not_exist('relocate/src/dep_1.txt')
test.must_not_exist('relocate/src/deps_all_done_first_123.txt')
# Make sure all deps finish before an action is run on a 'None' target.
# If using the Make builder, add -j to make things more difficult.
arguments = []
if test.format == 'make':
arguments = ['-j']
test.build('actions.gyp', 'action_with_dependencies_123', chdir='relocate/src',
arguments=arguments)
test.must_exist('relocate/src/deps_all_done_first_123.txt')
# Try again with a target that has deps in reverse. Output files from
# previous tests deleted. Confirm this execution did NOT run the ALL
# target which would mess up our dep tests.
clean_dep_files()
test.build('actions.gyp', 'action_with_dependencies_321', chdir='relocate/src',
arguments=arguments)
test.must_exist('relocate/src/deps_all_done_first_321.txt')
test.must_not_exist('relocate/src/deps_all_done_first_123.txt')
test.pass_test()
| bsd-3-clause |
rapilabs/django | django/core/management/commands/makemigrations.py | 162 | 12597 | import os
import sys
from itertools import takewhile
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db.migrations import Migration
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
InteractiveMigrationQuestioner, MigrationQuestioner,
NonInteractiveMigrationQuestioner,
)
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from django.utils.six import iteritems
from django.utils.six.moves import zip
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to create migrations for.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', default=False,
help="Just show what migrations would be made; don't actually write them.")
parser.add_argument('--merge', action='store_true', dest='merge', default=False,
help="Enable fixing of migration conflicts.")
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Create an empty migration.")
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('-n', '--name', action='store', dest='name', default=None,
help="Use this name for migration file(s).")
parser.add_argument('-e', '--exit', action='store_true', dest='exit_code', default=False,
help='Exit with error code 1 if no changes needing migrations are found.')
def handle(self, *app_labels, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.dry_run = options.get('dry_run', False)
self.merge = options.get('merge', False)
self.empty = options.get('empty', False)
self.migration_name = options.get('name')
self.exit_code = options.get('exit_code', False)
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = {
app_label: conflict for app_label, conflict in iteritems(conflicts)
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
else:
questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = {
app: [Migration("custom", app)]
for app in app_labels
}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
if self.exit_code:
sys.exit(1)
else:
return
self.write_migration_files(changes)
def write_migration_files(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(writer.filename),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "wb") as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
all_items_equal = lambda seq: all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*[m.ancestry for m in merge_migrations])
common_ancestor_count = sum(1 for common_ancestor_generation
in takewhile(all_items_equal, merge_migrations_generations))
if not common_ancestor_count:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
writer = MigrationWriter(new_migration)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will output the merge migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
| bsd-3-clause |
mdietrichc2c/account-financial-reporting | account_financial_report_webkit/report/webkit_parser_header_fix.py | 26 | 11497 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 Camptocamp SA (http://www.camptocamp.com)
#
# Author: Guewen Baconnier (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import subprocess
import tempfile
import logging
from functools import partial
from mako import exceptions
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
from openerp import pooler
from openerp import tools
from openerp.addons.report_webkit import webkit_report
from openerp.addons.report_webkit.report_helper import WebKitHelper
from openerp.modules.module import get_module_resource
_logger = logging.getLogger('financial.reports.webkit')
# Class used only as a workaround to bug:
# http://code.google.com/p/wkhtmltopdf/issues/detail?id=656
# html headers and footers do not work on big files (hundreds of pages) so we
# replace them by text headers and footers passed as arguments to wkhtmltopdf
# this class has to be removed once the bug is fixed
# in your report class, to print headers and footers as text, you have to add
# them in the localcontext with a key 'additional_args'
# for instance:
# header_report_name = _('PARTNER LEDGER')
# footer_date_time = self.formatLang(str(datetime.today()),
# date_time=True)
# self.localcontext.update({
# 'additional_args': [
# ('--header-font-name', 'Helvetica'),
# ('--footer-font-name', 'Helvetica'),
# ('--header-font-size', '10'),
# ('--footer-font-size', '7'),
# ('--header-left', header_report_name),
# ('--footer-left', footer_date_time),
# ('--footer-right', ' '.join((_('Page'), '[page]', _('of'),
# '[topage]'))),
# ('--footer-line',),
# ],
# })
# redefine mako_template as this is overriden by jinja since saas-1
# from openerp.addons.report_webkit.webkit_report import mako_template
from mako.template import Template
from mako.lookup import TemplateLookup
def mako_template(text):
"""Build a Mako template.
This template uses UTF-8 encoding
"""
tmp_lookup = TemplateLookup(
) # we need it in order to allow inclusion and inheritance
return Template(text, input_encoding='utf-8', output_encoding='utf-8',
lookup=tmp_lookup)
class HeaderFooterTextWebKitParser(webkit_report.WebKitParser):
def generate_pdf(self, comm_path, report_xml, header, footer, html_list,
webkit_header=False, parser_instance=False):
"""Call webkit in order to generate pdf"""
if not webkit_header:
webkit_header = report_xml.webkit_header
fd, out_filename = tempfile.mkstemp(suffix=".pdf",
prefix="webkit.tmp.")
file_to_del = [out_filename]
if comm_path:
command = [comm_path]
else:
command = ['wkhtmltopdf']
command.append('--quiet')
# default to UTF-8 encoding. Use <meta charset="latin-1"> to override.
command.extend(['--encoding', 'utf-8'])
if webkit_header.margin_top:
command.extend(
['--margin-top',
str(webkit_header.margin_top).replace(',', '.')])
if webkit_header.margin_bottom:
command.extend(
['--margin-bottom',
str(webkit_header.margin_bottom).replace(',', '.')])
if webkit_header.margin_left:
command.extend(
['--margin-left',
str(webkit_header.margin_left).replace(',', '.')])
if webkit_header.margin_right:
command.extend(
['--margin-right',
str(webkit_header.margin_right).replace(',', '.')])
if webkit_header.orientation:
command.extend(
['--orientation',
str(webkit_header.orientation).replace(',', '.')])
if webkit_header.format:
command.extend(
['--page-size',
str(webkit_header.format).replace(',', '.')])
if parser_instance.localcontext.get('additional_args', False):
for arg in parser_instance.localcontext['additional_args']:
command.extend(arg)
count = 0
for html in html_list:
with tempfile.NamedTemporaryFile(suffix="%d.body.html" % count,
delete=False) as html_file:
count += 1
html_file.write(self._sanitize_html(html))
file_to_del.append(html_file.name)
command.append(html_file.name)
command.append(out_filename)
stderr_fd, stderr_path = tempfile.mkstemp(text=True)
file_to_del.append(stderr_path)
try:
status = subprocess.call(command, stderr=stderr_fd)
os.close(stderr_fd) # ensure flush before reading
stderr_fd = None # avoid closing again in finally block
fobj = open(stderr_path, 'r')
error_message = fobj.read()
fobj.close()
if not error_message:
error_message = _('No diagnosis message was provided')
else:
error_message = _(
'The following diagnosis message was provided:\n') + \
error_message
if status:
raise except_orm(_('Webkit error'),
_("The command 'wkhtmltopdf' failed with \
error code = %s. Message: %s") %
(status, error_message))
with open(out_filename, 'rb') as pdf_file:
pdf = pdf_file.read()
os.close(fd)
finally:
if stderr_fd is not None:
os.close(stderr_fd)
for f_to_del in file_to_del:
try:
os.unlink(f_to_del)
except (OSError, IOError), exc:
_logger.error('cannot remove file %s: %s', f_to_del, exc)
return pdf
# override needed to keep the attachments' storing procedure
def create_single_pdf(self, cursor, uid, ids, data, report_xml,
context=None):
"""generate the PDF"""
if context is None:
context = {}
htmls = []
if report_xml.report_type != 'webkit':
return super(HeaderFooterTextWebKitParser, self
).create_single_pdf(cursor, uid, ids, data,
report_xml, context=context)
parser_instance = self.parser(cursor,
uid,
self.name2,
context=context)
self.pool = pooler.get_pool(cursor.dbname)
objs = self.getObjects(cursor, uid, ids, context)
parser_instance.set_context(objs, data, ids, report_xml.report_type)
template = False
if report_xml.report_file:
path = get_module_resource(
*report_xml.report_file.split(os.path.sep))
if os.path.exists(path):
template = file(path).read()
if not template and report_xml.report_webkit_data:
template = report_xml.report_webkit_data
if not template:
raise except_orm(
_('Error!'), _('Webkit Report template not found !'))
header = report_xml.webkit_header.html
if not header and report_xml.header:
raise except_orm(
_('No header defined for this Webkit report!'),
_('Please set a header in company settings.')
)
css = report_xml.webkit_header.css
if not css:
css = ''
translate_call = partial(self.translate_call, parser_instance)
# default_filters=['unicode', 'entity'] can be used to set global
# filter
body_mako_tpl = mako_template(template)
helper = WebKitHelper(cursor, uid, report_xml.id, context)
if report_xml.precise_mode:
for obj in objs:
parser_instance.localcontext['objects'] = [obj]
try:
html = body_mako_tpl.render(helper=helper,
css=css,
_=translate_call,
**parser_instance.localcontext)
htmls.append(html)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_orm(_('Webkit render'), msg)
else:
try:
html = body_mako_tpl.render(helper=helper,
css=css,
_=translate_call,
**parser_instance.localcontext)
htmls.append(html)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_orm(_('Webkit render'), msg)
# NO html footer and header because we write them as text with
# wkhtmltopdf
head = foot = False
if report_xml.webkit_debug:
try:
deb = body_mako_tpl.render(helper=helper,
css=css,
_debug=tools.ustr("\n".join(htmls)),
_=translate_call,
**parser_instance.localcontext)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_orm(_('Webkit render'), msg)
return (deb, 'html')
bin = self.get_lib(cursor, uid)
pdf = self.generate_pdf(bin, report_xml, head, foot, htmls,
parser_instance=parser_instance)
return (pdf, 'pdf')
| agpl-3.0 |
alanconway/dispatch | python/qpid_dispatch_internal/display_name/display_name.py | 4 | 3600 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
DisplayNameService provides the mapping needed to associate an un-friendly user identifier to a more friendly
user nick name.
Maintains a dict (profile_dict) of ssl profile names to SSLProfile objects. The SSLProfile objects are built using
the file name which contains a mapping of user identifiers to user names.
"""
import traceback
from traceback import format_exc
from ..router.message import Message
from ..dispatch import IoAdapter, LogAdapter, LOG_INFO, LOG_ERROR, LOG_TRACE, LOG_STACK_LIMIT
import json
class SSLProfile(object):
def __init__(self, profile_name, profile_file):
super(SSLProfile, self).__init__()
self.profile_name = profile_name
self.profile_file = profile_file
self.cache = {}
with open(profile_file) as json_data:
d = json.load(json_data)
for key in d.keys():
self.cache[key] = d[key]
def __repr__(self):
return "SSLProfile(%s)" % ", ".join("%s=%s" % (k, self.cache[k]) for k in self.cache.keys())
class DisplayNameService(object):
def __init__(self):
super(DisplayNameService, self).__init__()
# profile_dict will be a mapping from ssl_profile_name to the SSLProfile object
self.profile_dict = {}
self.io_adapter = None
self.log_adapter = LogAdapter("DISPLAYNAME")
def log(self, level, text):
info = traceback.extract_stack(limit=2)[0] # Caller frame info
self.log_adapter.log(level, text, info[0], info[1])
def add(self, profile_name, profile_file_location):
ssl_profile = SSLProfile(profile_name, profile_file_location)
self.profile_dict[profile_name] = ssl_profile
self.log(LOG_INFO, "Added profile name %s, profile file location %s to DisplayNameService" % (profile_name, profile_file_location))
def remove(self, profile_name):
try:
del self.profile_dict[profile_name]
except KeyError:
pass
def reload_all(self):
for profile_name in self.profile_dict.keys():
self.add(profile_name, self.profile_dict[profile_name].profile_file)
def reload(self, profile_name=None):
if profile_name:
self.add(profile_name, self.profile_dict[profile_name].profile_file)
else:
self.reload_all()
def query(self, profile_name, user_id):
self.log(LOG_TRACE, "Received query for profile name %s, user id %s to DisplayNameService" %
(profile_name, user_id))
ssl_profile = self.profile_dict.get(profile_name)
if ssl_profile:
profile_cache = self.profile_dict.get(profile_name).cache
user_name = profile_cache.get(user_id)
return user_name if user_name else user_id
else:
return user_id
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.