text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('container', '0002_auto_20160613_0114'),
]
operations = [
migrations.RemoveField(
model_name='trip',
name='name',
),
]
|
{
"content_hash": "5427ff0ca3390bad43d1f7df21b7fdae",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 49,
"avg_line_length": 18.705882352941178,
"alnum_prop": 0.5817610062893082,
"repo_name": "jonaqp/heroku",
"id": "3cec5cd598a46b71062a47df78bcc1a430abb1d3",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/container/migrations/0003_remove_trip_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40949"
},
{
"name": "HTML",
"bytes": "329134"
},
{
"name": "JavaScript",
"bytes": "78825"
},
{
"name": "Python",
"bytes": "182554"
}
],
"symlink_target": ""
}
|
from django.apps.registry import Apps
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
from django.db.migrations.exceptions import InvalidBasesError
from django.db.migrations.operations import (
AddField, AlterField, DeleteModel, RemoveField,
)
from django.db.migrations.state import (
ModelState, ProjectState, get_related_models_recursive,
)
from django.test import SimpleTestCase, override_settings
from django.test.utils import isolate_apps
from .models import (
FoodManager, FoodQuerySet, ModelWithCustomBase, NoMigrationFoodManager,
UnicodeModel,
)
class StateTests(SimpleTestCase):
"""
Tests state construction, rendering and modification by operations.
"""
def test_create(self):
"""
Tests making a ProjectState from an Apps
"""
new_apps = Apps(["migrations"])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = "migrations"
apps = new_apps
unique_together = ["name", "bio"]
index_together = ["bio", "age"]
class AuthorProxy(Author):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
ordering = ["name"]
class SubAuthor(Author):
width = models.FloatField(null=True)
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
title = models.CharField(max_length=1000)
author = models.ForeignKey(Author, models.CASCADE)
contributors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
verbose_name = "tome"
db_table = "test_tome"
indexes = [models.Index(fields=['title'])]
class Food(models.Model):
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
food_no_mgr = NoMigrationFoodManager('x', 'y')
class Meta:
app_label = "migrations"
apps = new_apps
class FoodNoManagers(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class FoodNoDefaultManager(models.Model):
food_no_mgr = NoMigrationFoodManager('x', 'y')
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
class Meta:
app_label = "migrations"
apps = new_apps
mgr1 = FoodManager('a', 'b')
mgr2 = FoodManager('x', 'y', c=3, d=4)
class FoodOrderedManagers(models.Model):
# The managers on this model should be ordered by their creation
# counter and not by the order in model body
food_no_mgr = NoMigrationFoodManager('x', 'y')
food_mgr2 = mgr2
food_mgr1 = mgr1
class Meta:
app_label = "migrations"
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
author_proxy_state = project_state.models['migrations', 'authorproxy']
sub_author_state = project_state.models['migrations', 'subauthor']
book_state = project_state.models['migrations', 'book']
food_state = project_state.models['migrations', 'food']
food_no_managers_state = project_state.models['migrations', 'foodnomanagers']
food_no_default_manager_state = project_state.models['migrations', 'foodnodefaultmanager']
food_order_manager_state = project_state.models['migrations', 'foodorderedmanagers']
book_index = models.Index(fields=['title'])
book_index.set_name_with_model(Book)
self.assertEqual(author_state.app_label, "migrations")
self.assertEqual(author_state.name, "Author")
self.assertEqual([x for x, y in author_state.fields], ["id", "name", "bio", "age"])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertIs(author_state.fields[2][1].null, False)
self.assertIs(author_state.fields[3][1].null, True)
self.assertEqual(
author_state.options,
{"unique_together": {("name", "bio")}, "index_together": {("bio", "age")}, "indexes": []}
)
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(book_state.app_label, "migrations")
self.assertEqual(book_state.name, "Book")
self.assertEqual([x for x, y in book_state.fields], ["id", "title", "author", "contributors"])
self.assertEqual(book_state.fields[1][1].max_length, 1000)
self.assertIs(book_state.fields[2][1].null, False)
self.assertEqual(book_state.fields[3][1].__class__.__name__, "ManyToManyField")
self.assertEqual(
book_state.options,
{"verbose_name": "tome", "db_table": "test_tome", "indexes": [book_index]},
)
self.assertEqual(book_state.bases, (models.Model, ))
self.assertEqual(author_proxy_state.app_label, "migrations")
self.assertEqual(author_proxy_state.name, "AuthorProxy")
self.assertEqual(author_proxy_state.fields, [])
self.assertEqual(author_proxy_state.options, {"proxy": True, "ordering": ["name"], "indexes": []})
self.assertEqual(author_proxy_state.bases, ("migrations.author", ))
self.assertEqual(sub_author_state.app_label, "migrations")
self.assertEqual(sub_author_state.name, "SubAuthor")
self.assertEqual(len(sub_author_state.fields), 2)
self.assertEqual(sub_author_state.bases, ("migrations.author", ))
# The default manager is used in migrations
self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr'])
self.assertTrue(all(isinstance(name, str) for name, mgr in food_state.managers))
self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2))
# No explicit managers defined. Migrations will fall back to the default
self.assertEqual(food_no_managers_state.managers, [])
# food_mgr is used in migration but isn't the default mgr, hence add the
# default
self.assertEqual([name for name, mgr in food_no_default_manager_state.managers],
['food_no_mgr', 'food_mgr'])
self.assertTrue(all(isinstance(name, str) for name, mgr in food_no_default_manager_state.managers))
self.assertEqual(food_no_default_manager_state.managers[0][1].__class__, models.Manager)
self.assertIsInstance(food_no_default_manager_state.managers[1][1], FoodManager)
self.assertEqual([name for name, mgr in food_order_manager_state.managers],
['food_mgr1', 'food_mgr2'])
self.assertTrue(all(isinstance(name, str) for name, mgr in food_order_manager_state.managers))
self.assertEqual([mgr.args for name, mgr in food_order_manager_state.managers],
[('a', 'b', 1, 2), ('x', 'y', 3, 4)])
def test_custom_default_manager_added_to_the_model_state(self):
"""
When the default manager of the model is a custom manager,
it needs to be added to the model state.
"""
new_apps = Apps(['migrations'])
custom_manager = models.Manager()
class Author(models.Model):
objects = models.TextField()
authors = custom_manager
class Meta:
app_label = 'migrations'
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.managers, [('authors', custom_manager)])
def test_custom_default_manager_named_objects_with_false_migration_flag(self):
"""
When a manager is added with a name of 'objects' but it does not
have `use_in_migrations = True`, no migration should be added to the
model state (#26643).
"""
new_apps = Apps(['migrations'])
class Author(models.Model):
objects = models.Manager()
class Meta:
app_label = 'migrations'
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.managers, [])
def test_no_duplicate_managers(self):
"""
When a manager is added with `use_in_migrations = True` and a parent
model had a manager with the same name and `use_in_migrations = True`,
the parent's manager shouldn't appear in the model state (#26881).
"""
new_apps = Apps(['migrations'])
class PersonManager(models.Manager):
use_in_migrations = True
class Person(models.Model):
objects = PersonManager()
class Meta:
abstract = True
class BossManager(PersonManager):
use_in_migrations = True
class Boss(Person):
objects = BossManager()
class Meta:
app_label = 'migrations'
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
boss_state = project_state.models['migrations', 'boss']
self.assertEqual(boss_state.managers, [('objects', Boss.objects)])
def test_custom_default_manager(self):
new_apps = Apps(['migrations'])
class Author(models.Model):
manager1 = models.Manager()
manager2 = models.Manager()
class Meta:
app_label = 'migrations'
apps = new_apps
default_manager_name = 'manager2'
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.options['default_manager_name'], 'manager2')
self.assertEqual(author_state.managers, [('manager2', Author.manager1)])
def test_custom_base_manager(self):
new_apps = Apps(['migrations'])
class Author(models.Model):
manager1 = models.Manager()
manager2 = models.Manager()
class Meta:
app_label = 'migrations'
apps = new_apps
base_manager_name = 'manager2'
class Author2(models.Model):
manager1 = models.Manager()
manager2 = models.Manager()
class Meta:
app_label = 'migrations'
apps = new_apps
base_manager_name = 'manager1'
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
self.assertEqual(author_state.options['base_manager_name'], 'manager2')
self.assertEqual(author_state.managers, [
('manager1', Author.manager1),
('manager2', Author.manager2),
])
author2_state = project_state.models['migrations', 'author2']
self.assertEqual(author2_state.options['base_manager_name'], 'manager1')
self.assertEqual(author2_state.managers, [
('manager1', Author2.manager1),
])
def test_apps_bulk_update(self):
"""
StateApps.bulk_update() should update apps.ready to False and reset
the value afterwards.
"""
project_state = ProjectState()
apps = project_state.apps
with apps.bulk_update():
self.assertFalse(apps.ready)
self.assertTrue(apps.ready)
with self.assertRaises(ValueError):
with apps.bulk_update():
self.assertFalse(apps.ready)
raise ValueError()
self.assertTrue(apps.ready)
def test_render(self):
"""
Tests rendering a ProjectState into an Apps.
"""
project_state = ProjectState()
project_state.add_model(ModelState(
app_label="migrations",
name="Tag",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
))
project_state.add_model(ModelState(
app_label="migrations",
name="SubTag",
fields=[
('tag_ptr', models.OneToOneField(
'migrations.Tag',
models.CASCADE,
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
)),
("awesome", models.BooleanField()),
],
bases=("migrations.Tag",),
))
base_mgr = models.Manager()
mgr1 = FoodManager('a', 'b')
mgr2 = FoodManager('x', 'y', c=3, d=4)
project_state.add_model(ModelState(
app_label="migrations",
name="Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
# The ordering we really want is objects, mgr1, mgr2
('default', base_mgr),
('food_mgr2', mgr2),
('food_mgr1', mgr1),
]
))
new_apps = project_state.apps
self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field("name").max_length, 100)
self.assertIs(new_apps.get_model("migrations", "Tag")._meta.get_field("hidden").null, False)
self.assertEqual(len(new_apps.get_model("migrations", "SubTag")._meta.local_fields), 2)
Food = new_apps.get_model("migrations", "Food")
self.assertEqual([mgr.name for mgr in Food._meta.managers],
['default', 'food_mgr1', 'food_mgr2'])
self.assertTrue(all(isinstance(mgr.name, str) for mgr in Food._meta.managers))
self.assertEqual([mgr.__class__ for mgr in Food._meta.managers],
[models.Manager, FoodManager, FoodManager])
def test_render_model_inheritance(self):
class Book(models.Model):
title = models.CharField(max_length=1000)
class Meta:
app_label = "migrations"
apps = Apps()
class Novel(Book):
class Meta:
app_label = "migrations"
apps = Apps()
# First, test rendering individually
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(Novel)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent model is in the app registry, it should be fine
ModelState.from_model(Book).render(apps)
ModelState.from_model(Novel).render(apps)
def test_render_model_with_multiple_inheritance(self):
class Foo(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class Bar(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class FooBar(Foo, Bar):
class Meta:
app_label = "migrations"
apps = Apps()
class AbstractSubFooBar(FooBar):
class Meta:
abstract = True
apps = Apps()
class SubFooBar(AbstractSubFooBar):
class Meta:
app_label = "migrations"
apps = Apps()
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(FooBar)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent models are in the app registry, it should be fine
ModelState.from_model(Foo).render(apps)
self.assertSequenceEqual(ModelState.from_model(Foo).bases, [models.Model])
ModelState.from_model(Bar).render(apps)
self.assertSequenceEqual(ModelState.from_model(Bar).bases, [models.Model])
ModelState.from_model(FooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(FooBar).bases, ['migrations.foo', 'migrations.bar'])
ModelState.from_model(SubFooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(SubFooBar).bases, ['migrations.foobar'])
def test_render_project_dependencies(self):
"""
The ProjectState render method correctly renders models
to account for inter-model base dependencies.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class B(A):
class Meta:
app_label = "migrations"
apps = new_apps
class C(B):
class Meta:
app_label = "migrations"
apps = new_apps
class D(A):
class Meta:
app_label = "migrations"
apps = new_apps
class E(B):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
class F(D):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.add_model(ModelState.from_model(D))
project_state.add_model(ModelState.from_model(E))
project_state.add_model(ModelState.from_model(F))
final_apps = project_state.apps
self.assertEqual(len(final_apps.get_models()), 6)
# Now make an invalid ProjectState and make sure it fails
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.add_model(ModelState.from_model(F))
with self.assertRaises(InvalidBasesError):
project_state.apps
def test_render_unique_app_labels(self):
"""
The ProjectState render method doesn't raise an
ImproperlyConfigured exception about unique labels if two dotted app
names have the same last part.
"""
class A(models.Model):
class Meta:
app_label = "django.contrib.auth"
class B(models.Model):
class Meta:
app_label = "vendor.auth"
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
self.assertEqual(len(project_state.apps.get_models()), 2)
def test_add_relations(self):
"""
#24573 - Adding relations to existing models should reload the
referenced models too.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = 'something'
apps = new_apps
class B(A):
class Meta:
app_label = 'something'
apps = new_apps
class C(models.Model):
class Meta:
app_label = 'something'
apps = new_apps
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.apps # We need to work with rendered models
old_state = project_state.clone()
model_a_old = old_state.apps.get_model('something', 'A')
model_b_old = old_state.apps.get_model('something', 'B')
model_c_old = old_state.apps.get_model('something', 'C')
# The relations between the old models are correct
self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old)
self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old)
operation = AddField('c', 'to_a', models.OneToOneField(
'something.A',
models.CASCADE,
related_name='from_c',
))
operation.state_forwards('something', project_state)
model_a_new = project_state.apps.get_model('something', 'A')
model_b_new = project_state.apps.get_model('something', 'B')
model_c_new = project_state.apps.get_model('something', 'C')
# All models have changed
self.assertIsNot(model_a_old, model_a_new)
self.assertIsNot(model_b_old, model_b_new)
self.assertIsNot(model_c_old, model_c_new)
# The relations between the old models still hold
self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old)
self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old)
# The relations between the new models correct
self.assertIs(model_a_new._meta.get_field('b').related_model, model_b_new)
self.assertIs(model_b_new._meta.get_field('a_ptr').related_model, model_a_new)
self.assertIs(model_a_new._meta.get_field('from_c').related_model, model_c_new)
self.assertIs(model_c_new._meta.get_field('to_a').related_model, model_a_new)
def test_remove_relations(self):
"""
#24225 - Relations between models are updated while
remaining the relations and references for models of an old state.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = "something"
apps = new_apps
class B(models.Model):
to_a = models.ForeignKey(A, models.CASCADE)
class Meta:
app_label = "something"
apps = new_apps
def get_model_a(state):
return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0]
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1)
old_state = project_state.clone()
operation = RemoveField("b", "to_a")
operation.state_forwards("something", project_state)
# Model from old_state still has the relation
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
self.assertEqual(len(model_a_old._meta.related_objects), 1)
self.assertEqual(len(model_a_new._meta.related_objects), 0)
# Same test for deleted model
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
old_state = project_state.clone()
operation = DeleteModel("b")
operation.state_forwards("something", project_state)
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
self.assertEqual(len(model_a_old._meta.related_objects), 1)
self.assertEqual(len(model_a_new._meta.related_objects), 0)
def test_self_relation(self):
"""
#24513 - Modifying an object pointing to itself would cause it to be
rendered twice and thus breaking its related M2M through objects.
"""
class A(models.Model):
to_a = models.ManyToManyField('something.A', symmetrical=False)
class Meta:
app_label = "something"
def get_model_a(state):
return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0]
project_state = ProjectState()
project_state.add_model((ModelState.from_model(A)))
self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1)
old_state = project_state.clone()
operation = AlterField(
model_name="a",
name="to_a",
field=models.ManyToManyField("something.A", symmetrical=False, blank=True)
)
# At this point the model would be rendered twice causing its related
# M2M through objects to point to an old copy and thus breaking their
# attribute lookup.
operation.state_forwards("something", project_state)
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
# The old model's _meta is still consistent
field_to_a_old = model_a_old._meta.get_field("to_a")
self.assertEqual(field_to_a_old.m2m_field_name(), "from_a")
self.assertEqual(field_to_a_old.m2m_reverse_field_name(), "to_a")
self.assertIs(field_to_a_old.related_model, model_a_old)
self.assertIs(field_to_a_old.remote_field.through._meta.get_field('to_a').related_model, model_a_old)
self.assertIs(field_to_a_old.remote_field.through._meta.get_field('from_a').related_model, model_a_old)
# The new model's _meta is still consistent
field_to_a_new = model_a_new._meta.get_field("to_a")
self.assertEqual(field_to_a_new.m2m_field_name(), "from_a")
self.assertEqual(field_to_a_new.m2m_reverse_field_name(), "to_a")
self.assertIs(field_to_a_new.related_model, model_a_new)
self.assertIs(field_to_a_new.remote_field.through._meta.get_field('to_a').related_model, model_a_new)
self.assertIs(field_to_a_new.remote_field.through._meta.get_field('from_a').related_model, model_a_new)
def test_equality(self):
"""
== and != are implemented correctly.
"""
# Test two things that should be equal
project_state = ProjectState()
project_state.add_model(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
{},
None,
))
project_state.apps # Fill the apps cached property
other_state = project_state.clone()
self.assertEqual(project_state, project_state)
self.assertEqual(project_state, other_state)
self.assertIs(project_state != project_state, False)
self.assertIs(project_state != other_state, False)
self.assertNotEqual(project_state.apps, other_state.apps)
# Make a very small change (max_len 99) and see if that affects it
project_state = ProjectState()
project_state.add_model(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=99)),
("hidden", models.BooleanField()),
],
{},
None,
))
self.assertNotEqual(project_state, other_state)
self.assertIs(project_state == other_state, False)
def test_dangling_references_throw_error(self):
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Publisher(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
publisher = models.ForeignKey(Publisher, models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
class Magazine(models.Model):
authors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Author))
project_state.add_model(ModelState.from_model(Publisher))
project_state.add_model(ModelState.from_model(Book))
project_state.add_model(ModelState.from_model(Magazine))
self.assertEqual(len(project_state.apps.get_models()), 4)
# now make an invalid one with a ForeignKey
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Book))
msg = (
"The field migrations.Book.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Book.publisher was declared with a lazy reference "
"to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
# And another with ManyToManyField.
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Magazine))
msg = (
"The field migrations.Magazine.authors was declared with a lazy reference "
"to 'migrations.author\', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Magazine_authors.author was declared with a lazy reference "
"to \'migrations.author\', but app 'migrations' doesn't provide model 'author'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
# And now with multiple models and multiple fields.
project_state.add_model(ModelState.from_model(Book))
msg = (
"The field migrations.Book.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Book.publisher was declared with a lazy reference "
"to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'.\n"
"The field migrations.Magazine.authors was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n"
"The field migrations.Magazine_authors.author was declared with a lazy reference "
"to 'migrations.author', but app 'migrations' doesn't provide model 'author'."
)
with self.assertRaisesMessage(ValueError, msg):
project_state.apps
def test_real_apps(self):
"""
Including real apps can resolve dangling FK errors.
This test relies on the fact that contenttypes is always loaded.
"""
new_apps = Apps()
class TestModel(models.Model):
ct = models.ForeignKey("contenttypes.ContentType", models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
# If we just stick it into an empty state it should fail
project_state = ProjectState()
project_state.add_model(ModelState.from_model(TestModel))
with self.assertRaises(ValueError):
project_state.apps
# If we include the real app it should succeed
project_state = ProjectState(real_apps=["contenttypes"])
project_state.add_model(ModelState.from_model(TestModel))
rendered_state = project_state.apps
self.assertEqual(
len([x for x in rendered_state.get_models() if x._meta.app_label == "migrations"]),
1,
)
def test_ignore_order_wrt(self):
"""
Makes sure ProjectState doesn't include OrderWrt fields when
making from existing models.
"""
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
app_label = "migrations"
apps = new_apps
order_with_respect_to = "author"
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Author))
project_state.add_model(ModelState.from_model(Book))
self.assertEqual(
[name for name, field in project_state.models["migrations", "book"].fields],
["id", "author"],
)
def test_manager_refer_correct_model_version(self):
"""
#24147 - Managers refer to the correct version of a
historical model
"""
project_state = ProjectState()
project_state.add_model(ModelState(
app_label="migrations",
name="Tag",
fields=[
("id", models.AutoField(primary_key=True)),
("hidden", models.BooleanField()),
],
managers=[
('food_mgr', FoodManager('a', 'b')),
('food_qs', FoodQuerySet.as_manager()),
]
))
old_model = project_state.apps.get_model('migrations', 'tag')
new_state = project_state.clone()
operation = RemoveField("tag", "hidden")
operation.state_forwards("migrations", new_state)
new_model = new_state.apps.get_model('migrations', 'tag')
self.assertIsNot(old_model, new_model)
self.assertIs(old_model, old_model.food_mgr.model)
self.assertIs(old_model, old_model.food_qs.model)
self.assertIs(new_model, new_model.food_mgr.model)
self.assertIs(new_model, new_model.food_qs.model)
self.assertIsNot(old_model.food_mgr, new_model.food_mgr)
self.assertIsNot(old_model.food_qs, new_model.food_qs)
self.assertIsNot(old_model.food_mgr.model, new_model.food_mgr.model)
self.assertIsNot(old_model.food_qs.model, new_model.food_qs.model)
def test_choices_iterator(self):
"""
#24483 - ProjectState.from_apps should not destructively consume
Field.choices iterators.
"""
new_apps = Apps(["migrations"])
choices = [('a', 'A'), ('b', 'B')]
class Author(models.Model):
name = models.CharField(max_length=255)
choice = models.CharField(max_length=255, choices=iter(choices))
class Meta:
app_label = "migrations"
apps = new_apps
ProjectState.from_apps(new_apps)
choices_field = Author._meta.get_field('choice')
self.assertEqual(list(choices_field.choices), choices)
class ModelStateTests(SimpleTestCase):
def test_custom_model_base(self):
state = ModelState.from_model(ModelWithCustomBase)
self.assertEqual(state.bases, (models.Model,))
def test_bound_field_sanity_check(self):
field = models.CharField(max_length=1)
field.model = models.Model
with self.assertRaisesMessage(ValueError, 'ModelState.fields cannot be bound to a model - "field" is.'):
ModelState('app', 'Model', [('field', field)])
def test_sanity_check_to(self):
field = models.ForeignKey(UnicodeModel, models.CASCADE)
with self.assertRaisesMessage(
ValueError,
'ModelState.fields cannot refer to a model class - "field.to" does. '
'Use a string reference instead.'
):
ModelState('app', 'Model', [('field', field)])
def test_sanity_check_through(self):
field = models.ManyToManyField('UnicodeModel')
field.remote_field.through = UnicodeModel
with self.assertRaisesMessage(
ValueError,
'ModelState.fields cannot refer to a model class - "field.through" does. '
'Use a string reference instead.'
):
ModelState('app', 'Model', [('field', field)])
def test_sanity_index_name(self):
field = models.IntegerField()
options = {'indexes': [models.Index(fields=['field'])]}
msg = "Indexes passed to ModelState require a name attribute. <Index: fields='field'> doesn't have one."
with self.assertRaisesMessage(ValueError, msg):
ModelState('app', 'Model', [('field', field)], options=options)
def test_fields_immutability(self):
"""
Rendering a model state doesn't alter its internal fields.
"""
apps = Apps()
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)])
Model = state.render(apps)
self.assertNotEqual(Model._meta.get_field('name'), field)
def test_repr(self):
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)], bases=['app.A', 'app.B', 'app.C'])
self.assertEqual(repr(state), "<ModelState: 'app.Model'>")
project_state = ProjectState()
project_state.add_model(state)
with self.assertRaisesMessage(InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]"):
project_state.apps
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_create_swappable(self):
"""
Tests making a ProjectState from an Apps with a swappable model
"""
new_apps = Apps(['migrations'])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = 'migrations'
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
author_state = ModelState.from_model(Author)
self.assertEqual(author_state.app_label, 'migrations')
self.assertEqual(author_state.name, 'Author')
self.assertEqual([x for x, y in author_state.fields], ['id', 'name', 'bio', 'age'])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertIs(author_state.fields[2][1].null, False)
self.assertIs(author_state.fields[3][1].null, True)
self.assertEqual(author_state.options, {'swappable': 'TEST_SWAPPABLE_MODEL', 'indexes': []})
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(author_state.managers, [])
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_create_swappable_from_abstract(self):
"""
A swappable model inheriting from a hierarchy:
concrete -> abstract -> concrete.
"""
new_apps = Apps(['migrations'])
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Meta:
app_label = 'migrations'
apps = new_apps
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CharField(max_length=128)
inbound = models.BooleanField(default=False)
class Meta(Station.Meta):
app_label = 'migrations'
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
station_state = ModelState.from_model(BusStation)
self.assertEqual(station_state.app_label, 'migrations')
self.assertEqual(station_state.name, 'BusStation')
self.assertEqual(
[x for x, y in station_state.fields],
['searchablelocation_ptr', 'name', 'bus_routes', 'inbound']
)
self.assertEqual(station_state.fields[1][1].max_length, 128)
self.assertEqual(station_state.fields[2][1].null, False)
self.assertEqual(
station_state.options,
{'abstract': False, 'swappable': 'TEST_SWAPPABLE_MODEL', 'indexes': []}
)
self.assertEqual(station_state.bases, ('migrations.searchablelocation', ))
self.assertEqual(station_state.managers, [])
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_custom_manager_swappable(self):
"""
Tests making a ProjectState from unused models with custom managers
"""
new_apps = Apps(['migrations'])
class Food(models.Model):
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
food_no_mgr = NoMigrationFoodManager('x', 'y')
class Meta:
app_label = "migrations"
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
food_state = ModelState.from_model(Food)
# The default manager is used in migrations
self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr'])
self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2))
@isolate_apps('migrations', 'django.contrib.contenttypes')
def test_order_with_respect_to_private_field(self):
class PrivateFieldModel(models.Model):
content_type = models.ForeignKey('contenttypes.ContentType', models.CASCADE)
object_id = models.PositiveIntegerField()
private = GenericForeignKey()
class Meta:
order_with_respect_to = 'private'
state = ModelState.from_model(PrivateFieldModel)
self.assertNotIn('order_with_respect_to', state.options)
@isolate_apps('migrations')
def test_abstract_model_children_inherit_indexes(self):
class Abstract(models.Model):
name = models.CharField(max_length=50)
class Meta:
app_label = 'migrations'
abstract = True
indexes = [models.indexes.Index(fields=['name'])]
class Child1(Abstract):
pass
class Child2(Abstract):
pass
child1_state = ModelState.from_model(Child1)
child2_state = ModelState.from_model(Child2)
index_names = [index.name for index in child1_state.options['indexes']]
self.assertEqual(index_names, ['migrations__name_b0afd7_idx'])
index_names = [index.name for index in child2_state.options['indexes']]
self.assertEqual(index_names, ['migrations__name_016466_idx'])
# Modifying the state doesn't modify the index on the model.
child1_state.options['indexes'][0].name = 'bar'
self.assertEqual(Child1._meta.indexes[0].name, 'migrations__name_b0afd7_idx')
@isolate_apps('migrations')
def test_explicit_index_name(self):
class TestModel(models.Model):
name = models.CharField(max_length=50)
class Meta:
app_label = 'migrations'
indexes = [models.indexes.Index(fields=['name'], name='foo_idx')]
model_state = ModelState.from_model(TestModel)
index_names = [index.name for index in model_state.options['indexes']]
self.assertEqual(index_names, ['foo_idx'])
class RelatedModelsTests(SimpleTestCase):
def setUp(self):
self.apps = Apps(['migrations.related_models_app'])
def create_model(self, name, foreign_keys=[], bases=(), abstract=False, proxy=False):
test_name = 'related_models_app'
assert not (abstract and proxy)
meta_contents = {
'abstract': abstract,
'app_label': test_name,
'apps': self.apps,
'proxy': proxy,
}
meta = type("Meta", tuple(), meta_contents)
if not bases:
bases = (models.Model,)
body = {
'Meta': meta,
'__module__': "__fake__",
}
fname_base = fname = '%s_%%d' % name.lower()
for i, fk in enumerate(foreign_keys, 1):
fname = fname_base % i
body[fname] = fk
return type(name, bases, body)
def assertRelated(self, model, needle):
self.assertEqual(
get_related_models_recursive(model),
{(n._meta.app_label, n._meta.model_name) for n in needle},
)
def test_unrelated(self):
A = self.create_model("A")
B = self.create_model("B")
self.assertRelated(A, [])
self.assertRelated(B, [])
def test_direct_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B")
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_direct_hidden_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE, related_name='+')])
B = self.create_model("B")
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_fk_through_proxy(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
C = self.create_model("C", bases=(B,), proxy=True)
D = self.create_model("D", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
self.assertRelated(A, [B, C, D])
self.assertRelated(B, [A, C, D])
self.assertRelated(C, [A, B, D])
self.assertRelated(D, [A, B, C])
def test_nested_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
C = self.create_model("C")
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_two_sided(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('A', models.CASCADE)])
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_circle(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)])
B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)])
C = self.create_model("C", foreign_keys=[models.ForeignKey('A', models.CASCADE)])
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,))
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_nested_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,))
C = self.create_model("C", bases=(B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_multiple_bases(self):
A = self.create_model("A")
B = self.create_model("B")
C = self.create_model("C", bases=(A, B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_multiple_nested_bases(self):
A = self.create_model("A")
B = self.create_model("B")
C = self.create_model("C", bases=(A, B,))
D = self.create_model("D")
E = self.create_model("E", bases=(D,))
F = self.create_model("F", bases=(C, E,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, C, D, E, F])
self.assertRelated(B, [A, C, D, E, F])
self.assertRelated(C, [A, B, D, E, F])
self.assertRelated(D, [A, B, C, E, F])
self.assertRelated(E, [A, B, C, D, F])
self.assertRelated(F, [A, B, C, D, E])
self.assertRelated(Y, [Z])
self.assertRelated(Z, [Y])
def test_base_to_base_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('Y', models.CASCADE)])
B = self.create_model("B", bases=(A,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, Y, Z])
self.assertRelated(B, [A, Y, Z])
self.assertRelated(Y, [A, B, Z])
self.assertRelated(Z, [A, B, Y])
def test_base_to_subclass_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('Z', models.CASCADE)])
B = self.create_model("B", bases=(A,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, Y, Z])
self.assertRelated(B, [A, Y, Z])
self.assertRelated(Y, [A, B, Z])
self.assertRelated(Z, [A, B, Y])
def test_direct_m2m(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B')])
B = self.create_model("B")
self.assertRelated(A, [A.a_1.rel.through, B])
self.assertRelated(B, [A, A.a_1.rel.through])
def test_direct_m2m_self(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('A')])
self.assertRelated(A, [A.a_1.rel.through])
def test_intermediate_m2m_self(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('A', through='T')])
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('A', models.CASCADE),
])
self.assertRelated(A, [T])
self.assertRelated(T, [A])
def test_intermediate_m2m(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
])
self.assertRelated(A, [B, T])
self.assertRelated(B, [A, T])
self.assertRelated(T, [A, B])
def test_intermediate_m2m_extern_fk(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
Z = self.create_model("Z")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
models.ForeignKey('Z', models.CASCADE),
])
self.assertRelated(A, [B, T, Z])
self.assertRelated(B, [A, T, Z])
self.assertRelated(T, [A, B, Z])
self.assertRelated(Z, [A, B, T])
def test_intermediate_m2m_base(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
S = self.create_model("S")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A', models.CASCADE),
models.ForeignKey('B', models.CASCADE),
], bases=(S,))
self.assertRelated(A, [B, S, T])
self.assertRelated(B, [A, S, T])
self.assertRelated(S, [A, B, T])
self.assertRelated(T, [A, B, S])
def test_generic_fk(self):
A = self.create_model("A", foreign_keys=[
models.ForeignKey('B', models.CASCADE),
GenericForeignKey(),
])
B = self.create_model("B", foreign_keys=[
models.ForeignKey('C', models.CASCADE),
])
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_abstract_base(self):
A = self.create_model("A", abstract=True)
B = self.create_model("B", bases=(A,))
self.assertRelated(A, [B])
self.assertRelated(B, [])
def test_nested_abstract_base(self):
A = self.create_model("A", abstract=True)
B = self.create_model("B", bases=(A,), abstract=True)
C = self.create_model("C", bases=(B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [C])
self.assertRelated(C, [])
def test_proxy_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
self.assertRelated(A, [B])
self.assertRelated(B, [])
def test_nested_proxy_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
C = self.create_model("C", bases=(B,), proxy=True)
self.assertRelated(A, [B, C])
self.assertRelated(B, [C])
self.assertRelated(C, [])
def test_multiple_mixed_bases(self):
A = self.create_model("A", abstract=True)
M = self.create_model("M")
P = self.create_model("P")
Q = self.create_model("Q", bases=(P,), proxy=True)
Z = self.create_model("Z", bases=(A, M, Q))
# M has a pointer O2O field p_ptr to P
self.assertRelated(A, [M, P, Q, Z])
self.assertRelated(M, [P, Q, Z])
self.assertRelated(P, [M, Q, Z])
self.assertRelated(Q, [M, P, Z])
self.assertRelated(Z, [M, P, Q])
|
{
"content_hash": "37e787522a968755f9a46fd0ce6958b9",
"timestamp": "",
"source": "github",
"line_count": 1383,
"max_line_length": 113,
"avg_line_length": 38.89877078814172,
"alnum_prop": 0.5832853133074335,
"repo_name": "zsiciarz/django",
"id": "ee6a9544d9284d6e6ee2156cf5e34d090bdd27c9",
"size": "53797",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tests/migrations/test_state.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "202902"
},
{
"name": "JavaScript",
"bytes": "252653"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11837275"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, unicode_literals, division
import json
import pkgutil
try:
from collections import OrderedDict
except ImportError as e:
from ordereddict import OrderedDict
from sc2reader.log_utils import loggable
try:
cmp # Python 2
except NameError:
cmp = lambda a, b: (a > b) - (a < b) # noqa Python 3
ABIL_LOOKUP = dict()
for entry in (
pkgutil.get_data("sc2reader.data", "ability_lookup.csv").decode("utf8").split("\n")
):
if not entry:
continue
str_id, abilities = entry.split(",", 1)
ABIL_LOOKUP[str_id] = abilities.split(",")
UNIT_LOOKUP = dict()
for entry in (
pkgutil.get_data("sc2reader.data", "unit_lookup.csv").decode("utf8").split("\n")
):
if not entry:
continue
str_id, title = entry.strip().split(",")
UNIT_LOOKUP[str_id] = title
unit_data = pkgutil.get_data("sc2reader.data", "unit_info.json").decode("utf8")
unit_lookup = json.loads(unit_data)
command_data = pkgutil.get_data("sc2reader.data", "train_commands.json").decode("utf8")
train_commands = json.loads(command_data)
class Unit(object):
"""Represents an in-game unit."""
def __init__(self, unit_id):
#: A reference to the player that currently owns this unit. Only available for 2.0.8+ replays.
self.owner = None
#: The frame the unit was started at. Only available for 2.0.8+ replays.
#: Specifically, it is the frame the :class:`~sc2reader.events.tracker.UnitInitEvent` is received. For units
#: that are born and not initiated this will be the same as :attr:`finished_at`.
self.started_at = None
#: The frame the unit was finished at. Only available for 2.0.8+ replays.
#: Specifically, it is the frame that the :class:`~sc2reader.events.tracker.UnitDoneEvent` is received. For units
#: that are born and not initiated this will be the frame that the :class:`~sc2reader.events.tracker.UnitBornEvent`
#: is received.
self.finished_at = None
#: The frame the unit died at. Only available for 2.0.8+ replays.
#: Specifically, it is the frame that the :class:`~sc2reader.events.tracker.UnitDiedEvent` is received.
self.died_at = None
#: Deprecated, see :attr:`self.killing_player`
self.killed_by = None
#: A reference to the player that killed this unit. Only available for 2.0.8+ replays.
#: This value is not set if the killer is unknown or not relevant (morphed into a
#: different unit or used to create a building, etc)
self.killing_player = None
#: A reference to the unit that killed this unit. Only available for 2.1+ replays.
#: This value is not set if the killer is unknown or not relevant (morphed into a
#: different unit or used to create a building, etc). If the killing unit dies before
#: the killed unit dies, a bug may cause the killing unit to be None. This can occur
#: due because of projectile speeds.
self.killing_unit = None
#: A list of units that this unit has killed. Only available for 2.1+ replays.
#: The unit only gets credit for the kills that it gets the final blow on.
self.killed_units = list()
#: The unique in-game id for this unit. The id can sometimes be zero because
#: TargetUnitCommandEvents will create a new unit with id zero when a unit
#: behind the fog of war is targeted.
self.id = unit_id
#: A reference to the unit type this unit is current in.
#: e.g. SeigeTank is a different type than SeigeTankSeiged
self._type_class = None
#: A history of all the unit types this unit has had stored
#: in order by frame the type was acquired.
self.type_history = OrderedDict()
#: Is this unit type a hallucinated one?
self.hallucinated = False
self.flags = 0
def apply_flags(self, flags):
self.flags = flags
self.hallucinated = flags & 2 == 2
def set_type(self, unit_type, frame):
self._type_class = unit_type
self.type_history[frame] = unit_type
def is_type(self, unit_type, strict=True):
if strict:
if isinstance(unit_type, int):
if self._type_class:
return unit_type == self._type_class.id
else:
return unit_type == 0
elif isinstance(unit_type, Unit):
return self._type_class == unit_type
else:
if self._type_class:
return unit_type == self._type_class.str_id
else:
return unit_type is None
else:
if isinstance(unit_type, int):
if self._type_class:
return unit_type in [
utype.id for utype in self.type_history.values()
]
else:
return unit_type == 0
elif isinstance(unit_type, Unit):
return unit_type in self.type_history.values()
else:
if self._type_class:
return unit_type in [
utype.str_id for utype in self.type_history.values()
]
else:
return unit_type is None
@property
def name(self):
"""The name of the unit type currently active. None if no type is assigned"""
return self._type_class.name if self._type_class else None
@property
def title(self):
return self._type_class.title if self._type_class else None
@property
def type(self):
"""The internal type id of the current unit type of this unit. None if no type is assigned"""
return self._type_class.id if self._type_class else None
@property
def race(self):
"""The race of this unit. One of Terran, Protoss, Zerg, Neutral, or None"""
return self._type_class.race if self._type_class else None
@property
def minerals(self):
"""The mineral cost of the unit. None if no type is assigned"""
return self._type_class.minerals if self._type_class else None
@property
def vespene(self):
"""The vespene cost of the unit. None if no type is assigned"""
return self._type_class.vespene if self._type_class else None
@property
def supply(self):
"""The supply used by this unit. Negative for supply providers. None if no type is assigned"""
return self._type_class.supply if self._type_class else None
@property
def is_worker(self):
"""Boolean flagging units as worker units. SCV, MULE, Drone, Probe"""
return self._type_class.is_worker if self._type_class else False
@property
def is_building(self):
"""Boolean flagging units as buildings."""
return self._type_class.is_building if self._type_class else False
@property
def is_army(self):
"""Boolean flagging units as army units."""
return self._type_class.is_army if self._type_class else False
def __str__(self):
return "{0} [{1:X}]".format(self.name, self.id)
def __cmp__(self, other):
return cmp(self.id, other.id)
def __lt__(self, other):
return self.id < other.id
def __le__(self, other):
return self.id <= other.id
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
def __gt__(self, other):
return self.id > other.id
def __ge__(self, other):
return self.id >= other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return str(self)
class UnitType(object):
"""Represents an in game unit type"""
def __init__(
self,
type_id,
str_id=None,
name=None,
title=None,
race=None,
minerals=0,
vespene=0,
supply=0,
is_building=False,
is_worker=False,
is_army=False,
):
#: The internal integer id representing this unit type
self.id = type_id
#: The internal string id representing this unit type
self.str_id = str_id
#: The name of this unit type
self.name = name
#: The printable title of this unit type; has spaces and possibly punctuation
self.title = title
#: The race this unit type belongs to
self.race = race
#: The mineral cost of this unit type
self.minerals = minerals
#: The vespene cost of this unit type
self.vespene = vespene
#: The supply cost of this unit type
self.supply = supply
#: Boolean flagging this unit type as a building
self.is_building = is_building
#: Boolean flagging this unit type as a worker
self.is_worker = is_worker
#: Boolean flagging this unit type as an army unit
self.is_army = is_army
class Ability(object):
"""Represents an in-game ability"""
def __init__(
self, id, name=None, title=None, is_build=False, build_time=0, build_unit=None
):
#: The internal integer id representing this ability.
self.id = id
#: The name of this ability
self.name = name
#: The printable title of this ability; has spaces and possibly punctuation.
self.title = title
#: Boolean flagging this ability as creating a new unit.
self.is_build = is_build
#: The number of seconds required to build this unit. 0 if not ``is_build``.
self.build_time = build_time
#: A reference to the :class:`UnitType` type built by this ability. Default to None.
self.build_unit = build_unit
@loggable
class Build(object):
"""
:param build_id: The build number identifying this dataset.
The datapack for a particular group of builds. Maps internal integer ids
to :class:`Unit` and :class:`Ability` types. Also contains builder methods
for creating new units and changing their types.
All build data is valid for standard games only. For arcade maps mileage
may vary.
"""
def __init__(self, build_id):
#: The integer id of the build
self.id = build_id
#: A dictionary mapping integer ids to available unit types.
self.units = dict()
#: A dictionary mapping integer ids to available abilities.
self.abilities = dict()
def create_unit(self, unit_id, unit_type, frame):
"""
:param unit_id: The unique id of this unit.
:param unit_type: The unit type to assign to the new unit
Creates a new unit and assigns it to the specified type.
"""
unit = Unit(unit_id)
self.change_type(unit, unit_type, frame)
return unit
def change_type(self, unit, new_type, frame):
"""
:param unit: The changing types.
:param unit_type: The unit type to assign to this unit
Assigns the given type to a unit.
"""
if new_type in self.units:
unit_type = self.units[new_type]
unit.set_type(unit_type, frame)
else:
self.logger.error(
"Unable to change type of {0} to {1} [frame {2}]; unit type not found in build {3}".format(
unit, new_type, frame, self.id
)
)
def add_ability(
self,
ability_id,
name,
title=None,
is_build=False,
build_time=None,
build_unit=None,
):
ability = Ability(
ability_id,
name=name,
title=title or name,
is_build=is_build,
build_time=build_time,
build_unit=build_unit,
)
setattr(self, name, ability)
self.abilities[ability_id] = ability
def add_unit_type(
self,
type_id,
str_id,
name,
title=None,
race="Neutral",
minerals=0,
vespene=0,
supply=0,
is_building=False,
is_worker=False,
is_army=False,
):
unit = UnitType(
type_id,
str_id=str_id,
name=name,
title=title or name,
race=race,
minerals=minerals,
vespene=vespene,
supply=supply,
is_building=is_building,
is_worker=is_worker,
is_army=is_army,
)
setattr(self, name, unit)
self.units[type_id] = unit
self.units[str_id] = unit
def load_build(expansion, version):
build = Build(version)
unit_file = "{0}/{1}_units.csv".format(expansion, version)
for entry in (
pkgutil.get_data("sc2reader.data", unit_file).decode("utf8").split("\n")
):
if not entry:
continue
int_id, str_id = entry.strip().split(",")
unit_type = int(int_id, 10)
title = UNIT_LOOKUP[str_id]
values = dict(type_id=unit_type, str_id=str_id, name=title)
for race in ("Protoss", "Terran", "Zerg"):
if title.lower() in unit_lookup[race]:
values.update(unit_lookup[race][title.lower()])
values["race"] = race
break
build.add_unit_type(**values)
abil_file = "{0}/{1}_abilities.csv".format(expansion, version)
build.add_ability(ability_id=0, name="RightClick", title="Right Click")
for entry in (
pkgutil.get_data("sc2reader.data", abil_file).decode("utf8").split("\n")
):
if not entry:
continue
int_id_base, str_id = entry.strip().split(",")
int_id_base = int(int_id_base, 10) << 5
abils = ABIL_LOOKUP[str_id]
real_abils = [(i, abil) for i, abil in enumerate(abils) if abil.strip() != ""]
if len(real_abils) == 0:
real_abils = [(0, str_id)]
for index, ability_name in real_abils:
unit_name, build_time = train_commands.get(ability_name, ("", 0))
if (
"Hallucinated" in unit_name
): # Not really sure how to handle hallucinations
unit_name = unit_name[12:]
build.add_ability(
ability_id=int_id_base | index,
name=ability_name,
is_build=bool(unit_name),
build_unit=getattr(build, unit_name, None),
build_time=build_time,
)
return build
# Load the WoL Data
wol_builds = dict()
for version in ("16117", "17326", "18092", "19458", "22612", "24944"):
wol_builds[version] = load_build("WoL", version)
# Load HotS Data
hots_builds = dict()
for version in ("base", "23925", "24247", "24764"):
hots_builds[version] = load_build("HotS", version)
hots_builds["38215"] = load_build("LotV", "base")
hots_builds["38215"].id = "38215"
# Load LotV Data
lotv_builds = dict()
for version in (
"base",
"44401",
"47185",
"48258",
"53644",
"54724",
"59587",
"70154",
"76114",
"77379",
"80949",
):
lotv_builds[version] = load_build("LotV", version)
datapacks = builds = {"WoL": wol_builds, "HotS": hots_builds, "LotV": lotv_builds}
|
{
"content_hash": "9e0551bf8e0e9f2b322f265a0abfa672",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 123,
"avg_line_length": 31.62962962962963,
"alnum_prop": 0.5813817330210773,
"repo_name": "StoicLoofah/sc2reader",
"id": "6cbb942592268fe65bece83eaf26169b5505a75d",
"size": "15396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sc2reader/data/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "441549"
}
],
"symlink_target": ""
}
|
from charms.reactive import (
hook,
when,
only_once,
is_state
)
import os.path as path
from charmhelpers.core import hookenv
from charmhelpers.core.host import service_restart
from charmhelpers.core.templating import render
from subprocess import call
from charms.layer import nginx, dokuwiki, php
config = hookenv.config()
# HOOKS -----------------------------------------------------------------------
@hook('config-changed')
def config_changed():
if not is_state('nginx.available'):
return
# Define user
app_path = nginx.get_app_path()
render(source='users.auth.php',
target=path.join(app_path, 'conf/users.auth.php'),
context=config, perms=0o664)
call('chown www-data:www-data -R {}'.format(app_path), shell=True)
call('chmod 775 -R {}/conf'.format(app_path), shell=True)
call('mkdir -p {app_path}/data && chmod 775 -R {app_path}/data'.format(
app_path=app_path), shell=True)
php.restart()
service_restart('nginx')
hookenv.status_set('active', 'Ready')
# REACTORS --------------------------------------------------------------------
@when('nginx.available')
@when('php.ready')
@only_once
def install_app():
""" Performs application installation
"""
hookenv.log('Installing Dokuwiki', 'info')
# Configure NGINX vhost
nginx.configure_site('default', 'vhost.conf',
listen_address=php.socket())
# Update application
dokuwiki.download_archive()
# Needs to set dokuwiki directory permissions for installation
app_path = nginx.get_app_path()
render(source='local.php',
target=path.join(app_path, 'conf/local.php'),
context=config, perms=0o644)
render(source='acl.auth.php',
target=path.join(app_path, 'conf/acl.auth.php'),
context=config, perms=0o644)
render(source='plugins.local.php',
target=path.join(app_path, 'conf/plugins.local.php'),
context=config, perms=0o644)
# Clean up install.php as we don't need it
call("rm -f {}/conf/install.php", shell=True)
php.restart()
service_restart('nginx')
hookenv.status_set('active', 'Dokuwiki is installed!')
|
{
"content_hash": "d449d0b680072554dd97844db8eb5b4b",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 28.844155844155843,
"alnum_prop": 0.6078343088698784,
"repo_name": "battlemidget/juju-charm-dokuwiki",
"id": "c485f48176dcfecf7571f1bbea0cef6a36a5a6df",
"size": "2221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reactive/dokuwiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "1074"
},
{
"name": "Python",
"bytes": "4335"
},
{
"name": "Shell",
"bytes": "796"
}
],
"symlink_target": ""
}
|
import pyarrow
import six
from six.moves.urllib.parse import urlparse
from petastorm.hdfs.namenode import HdfsNamenodeResolver, HdfsConnector
class CarbonFilesystemResolver(object):
"""Resolves a dataset URL, makes a connection via pyarrow, and provides a filesystem object."""
def __init__(self, dataset_url, key=None, secret=None, endpoint=None, proxy=None, proxy_port=None,
hadoop_configuration=None, connector=HdfsConnector, hdfs_driver='libhdfs3'):
"""
Given a dataset URL and an optional hadoop configuration, parse and interpret the URL to
instantiate a pyarrow filesystem.
Interpretation of the URL ``scheme://hostname:port/path`` occurs in the following order:
1. If no ``scheme``, no longer supported, so raise an exception!
2. If ``scheme`` is ``file``, use local filesystem path.
3. If ``scheme`` is ``hdfs``:
a. Try the ``hostname`` as a namespace and attempt to connect to a name node.
1. If that doesn't work, try connecting directly to namenode ``hostname:port``.
b. If no host, connect to the default name node.
5. If ``scheme`` is ``s3``, use s3fs. The user must manually install s3fs before using s3
6. Fail otherwise.
:param dataset_url: The hdfs URL or absolute path to the dataset
:param key: access key of obs
:param secret: secret key of obs
:param endpoint: endpoint of obs
:param proxy: proxy
:param proxy_port: proxy_port
:param hadoop_configuration: an optional hadoop configuration
:param connector: the HDFS connector object to use (ONLY override for testing purposes)
:param hdfs_driver: A string denoting the hdfs driver to use (if using a dataset on hdfs). Current choices are
libhdfs (java through JNI) or libhdfs3 (C++)
"""
# Cache both the original URL and the resolved, urlparsed dataset_url
self._dataset_url = dataset_url
self._parsed_dataset_url = None
# Cache the instantiated filesystem object
self._filesystem = None
if isinstance(self._dataset_url, six.string_types):
self._parsed_dataset_url = urlparse(self._dataset_url)
else:
self._parsed_dataset_url = self._dataset_url
if not self._parsed_dataset_url.scheme:
# Case 1
raise ValueError('ERROR! A scheme-less dataset url ({}) is no longer supported. '
'Please prepend "file://" for local filesystem.'.format(self._parsed_dataset_url.scheme))
elif self._parsed_dataset_url.scheme == 'file':
# Case 2: definitely local
self._filesystem = pyarrow.localfs
elif self._parsed_dataset_url.scheme == 'hdfs':
if hdfs_driver == 'libhdfs3':
# libhdfs3 does not do any namenode resolution itself so we do it manually. This is not necessary
# if using libhdfs
# Obtain singleton and force hadoop config evaluation
namenode_resolver = HdfsNamenodeResolver(hadoop_configuration)
# Since we can't tell for sure, first treat the URL as though it references a name service
if self._parsed_dataset_url.netloc:
# Case 3a: Use the portion of netloc before any port, which doesn't get lowercased
nameservice = self._parsed_dataset_url.netloc.split(':')[0]
namenodes = namenode_resolver.resolve_hdfs_name_service(nameservice)
if namenodes:
self._filesystem = connector.connect_to_either_namenode(namenodes)
if self._filesystem is None:
# Case 3a1: That didn't work; try the URL as a namenode host
self._filesystem = connector.hdfs_connect_namenode(self._parsed_dataset_url)
else:
# Case 3b: No netloc, so let's try to connect to default namenode
# HdfsNamenodeResolver will raise exception if it fails to connect.
nameservice, namenodes = namenode_resolver.resolve_default_hdfs_service()
filesystem = connector.connect_to_either_namenode(namenodes)
if filesystem is not None:
# Properly replace the parsed dataset URL once default namenode is confirmed
self._parsed_dataset_url = urlparse(
'hdfs://{}{}'.format(nameservice, self._parsed_dataset_url.path))
self._filesystem = filesystem
else:
self._filesystem = connector.hdfs_connect_namenode(self._parsed_dataset_url, hdfs_driver)
elif self._parsed_dataset_url.scheme == "s3a":
# Case 5
# S3 support requires s3fs to be installed
try:
import s3fs
except ImportError:
raise ValueError('Must have s3fs installed in order to use datasets on s3. '
'Please install s3fs and try again.')
if not self._parsed_dataset_url.netloc:
raise ValueError('URLs must be of the form s3://bucket/path')
if key is None or secret is None or endpoint is None:
raise ValueError('key, secret, endpoint should not be None')
http_proxy = 'http://' + proxy + ':' + str(proxy_port) if (
proxy is not None and proxy_port is not None) else None
https_proxy = 'https://' + proxy + ':' + str(proxy_port) if (
proxy is not None and proxy_port is not None) else None
config_kwargs = {'proxies': {'http': http_proxy, 'https': https_proxy}} if (
http_proxy is not None) else None
fs = s3fs.S3FileSystem(key=key,
secret=secret,
client_kwargs={'endpoint_url': endpoint},
config_kwargs=config_kwargs)
self._filesystem = pyarrow.filesystem.S3FSWrapper(fs)
else:
# Case 6
raise ValueError('Unsupported scheme in dataset url {}. '
'Currently, only "file" and "hdfs" are supported.'.format(self._parsed_dataset_url.scheme))
def parsed_dataset_url(self):
"""
:return: The urlparse'd dataset_url
"""
return self._parsed_dataset_url
def get_dataset_path(self):
"""
The dataset path is different than the one in `_parsed_dataset_url` for some filesystems.
For example s3fs expects the bucket name to be included in the path and doesn't support
paths that start with a `/`
"""
if isinstance(self._filesystem, pyarrow.filesystem.S3FSWrapper):
# s3fs expects paths of the form `bucket/path`
return self._parsed_dataset_url.netloc + self._parsed_dataset_url.path
return self._parsed_dataset_url.path
def filesystem(self):
"""
:return: The pyarrow filesystem object
"""
return self._filesystem
|
{
"content_hash": "653ebf86db0d7c6fa4db55f9bc5b06e2",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 114,
"avg_line_length": 43.852348993288594,
"alnum_prop": 0.6603917967554331,
"repo_name": "zzcclp/carbondata",
"id": "7824b4bea79a6f6d67312d3934c2ed7c86775c0f",
"size": "7316",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/pycarbon/core/carbon_fs_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "16022"
},
{
"name": "Batchfile",
"bytes": "1639"
},
{
"name": "C#",
"bytes": "86"
},
{
"name": "C++",
"bytes": "110888"
},
{
"name": "CMake",
"bytes": "1555"
},
{
"name": "Java",
"bytes": "7859129"
},
{
"name": "Python",
"bytes": "368778"
},
{
"name": "Scala",
"bytes": "12011736"
},
{
"name": "Shell",
"bytes": "7259"
},
{
"name": "Thrift",
"bytes": "23385"
}
],
"symlink_target": ""
}
|
from jawaf.server import get_jawaf
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def _address_string(addr):
"""Convenience method to treat a given address field as a string or tuple/list
:param addr: String or List/Tuple. Single address or addresses."""
if isinstance(addr, str):
return addr
return ','.join(addr)
async def send_mail(
subject, message, from_address, to, cc=None, bcc=None, html_message=None
):
"""Send mail using async smtp lib.
:param subject: String. Subject.
:param message: String. Message.
:param from_address: String. From address.
:param to: String or List/Tuple. To address.
:param cc: String or List/Tuple. Email CC.
:param bcc: String or List/Tuple. Email BCC.
:param html_message: String. HTML Message (will send multipart message).
"""
if html_message:
msg = MIMEMultipart('alternative')
msg.attach(MIMEText(message, 'plain'))
msg.attach(MIMEText(html_message, 'html'))
else:
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = from_address
msg['To'] = _address_string(to)
if cc:
msg['Cc'] = _address_string(cc)
recipients = []
for address in [to, cc, bcc]:
if isinstance(address, str):
address = [address]
if address:
recipients.extend(address)
await get_jawaf().get_smtp().sendmail(from_address, recipients, msg.as_string())
|
{
"content_hash": "2fcdc32938c79cca64b47b995a44206b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 34.627906976744185,
"alnum_prop": 0.6501007387508395,
"repo_name": "danpozmanter/jawaf",
"id": "9cb3608887a9ac912cc8d912c1fc22b97bdd73a9",
"size": "1489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jawaf/mail.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "131545"
},
{
"name": "Smarty",
"bytes": "5172"
}
],
"symlink_target": ""
}
|
import os, sys, config
# Set the help menu
def getSyntax():
return "Usage: pyvmrun-getip <vm_name>"
# Parsing arguments
if len(sys.argv) == 1:
print getSyntax()
exit(1)
else:
conf = config.Config()
vm_start_dest = conf.getVmsPathDir() + "/" + sys.argv[1] + ".vmwarevm/" + sys.argv[1] + ".vmx"
if not os.path.exists(vm_start_dest):
print "Virtual machine not found."
exit(1)
cmd = "vmrun getGuestIPAddress '" + vm_start_dest + "'"
os.system(cmd)
|
{
"content_hash": "2456e79e705a5ba3ddafc9d487571f9a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 100,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.606425702811245,
"repo_name": "slariviere/py-vmrum",
"id": "87e33ced8c50b6ca4ddaee21e8bb47c8546c3281",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvmrun-getip.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6130"
},
{
"name": "Shell",
"bytes": "1938"
}
],
"symlink_target": ""
}
|
import logging
from ..requests import requests
from itchatmp.config import COROUTINE
from itchatmp.returnvalues import ReturnValue
from itchatmp.utils import retry, encode_send_dict
logger = logging.getLogger('itchatmp')
def create_producer(serverUrl):
def _create(menuDict, agentId=None, accessToken=None):
data = encode_send_dict(menuDict)
if data is None: return ReturnValue({'errcode': -10001})
url = '%s/cgi-bin/menu/create?access_token=%s' % \
(serverUrl, accessToken)
if agentId is not None: url += '&agentid=%s' % agentId
r = requests.post(url, data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
return _create
def get_producer(serverUrl):
def _get(agentId=None, accessToken=None):
url = '%s/cgi-bin/menu/get?access_token=%s' % (serverUrl, accessToken)
if agentId is not None: url += '&agentid=%s' % agentId
r = requests.get(url)
def _wrap_result(result):
result = result.json()
if 'menu' in result: result['errcode'] = 0
return ReturnValue(result)
r._wrap_result = _wrap_result
return r
return _get
def delete_producer(serverUrl):
def _delete(agentId=None, accessToken=None):
url = '%s/cgi-bin/menu/delete?access_token=%s' % (serverUrl, accessToken)
if agentId is not None: url += '&agentid=%s' % agentId
r = requests.get(url)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
return _delete
|
{
"content_hash": "c9cb702fef27e770807ead3dc02d1962",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 81,
"avg_line_length": 37.30434782608695,
"alnum_prop": 0.6101398601398601,
"repo_name": "littlecodersh/itchatmp",
"id": "1a8c70ae20d10eaa09eb7f476ab9b7ab046f61c8",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "itchatmp/controllers/mpapi/base/menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157270"
}
],
"symlink_target": ""
}
|
from skimage import transform
from fylm.service.utilities import ImageUtilities, timer
from fylm.service.base import BaseSetService
from skimage.morphology import skeletonize
from fylm.model.constants import Constants
import nd2reader
import numpy as np
import math
import logging
log = logging.getLogger(__name__)
class RotationSet(BaseSetService):
"""
Determines the rotational skew of an image.
"""
def __init__(self, experiment):
super(RotationSet, self).__init__()
self._experiment = experiment
self._name = "rotation corrections"
@timer
def save_action(self, rotation_model):
"""
Calculates the rotation offset for a single field of view and time_period.
:type rotation_model: fylm.model.rotation.Rotation()
"""
log.info("Creating rotation file %s" % rotation_model.filename)
# This is a pretty naive loop - the same file will get opened 8-12 times
# There are obvious ways to optimize this but that can be done later if it matters
# It probably doesn't matter though and I like simple things
nd2_filename = self._experiment.get_nd2_from_time_period(rotation_model.time_period)
nd2 = nd2reader.Nd2(nd2_filename)
# gets the first in-focus image from the first timpoint in the stack
# TODO: Update nd2reader to figure out which one is in focus or to be able to set it
image = nd2.get_image(0, rotation_model.field_of_view, "", 1)
offset = self._determine_rotation_offset(image.data)
rotation_model.offset = offset
@staticmethod
def _determine_rotation_offset(image):
"""
Finds rotational skew so that the sides of the central trench are (nearly) perfectly vertical.
:param image: raw image data in a 2D (i.e. grayscale) numpy array
:type image: np.array()
"""
segmentation = ImageUtilities.create_vertical_segments(image)
# Draw a line that follows the center of the segments at each point, which should be roughly vertical
# We should expect this to give us four approximately-vertical lines, possibly with many gaps in each line
skeletons = skeletonize(segmentation)
# Use the Hough transform to get the closest lines that approximate those four lines
hough = transform.hough_line(skeletons, np.arange(-Constants.FIFTEEN_DEGREES_IN_RADIANS,
Constants.FIFTEEN_DEGREES_IN_RADIANS,
0.0001))
# Create a list of the angles (in radians) of all of the lines the Hough transform produced, with 0.0 being
# completely vertical
# These angles correspond to the angles of the four sides of the channels, which we need to correct for
angles = [angle for _, angle, dist in zip(*transform.hough_line_peaks(*hough))]
if not angles:
log.warn("Image skew could not be calculated. The image is probably invalid.")
return 0.0
else:
# Get the average angle and convert it to degrees
offset = sum(angles) / len(angles) * 180.0 / math.pi
if offset > Constants.ACCEPTABLE_SKEW_THRESHOLD:
log.warn("Image is heavily skewed. Check that the images are valid.")
return offset
|
{
"content_hash": "5acc881b26805cd04b1e768e749c71fd",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 115,
"avg_line_length": 45.78378378378378,
"alnum_prop": 0.654073199527745,
"repo_name": "jimrybarski/fylm",
"id": "67ed10576c13554df7c0167d9e6c31ebb41a7466",
"size": "3388",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "fylm/service/rotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171807"
}
],
"symlink_target": ""
}
|
IVERSION = (0, 12, 2)
VERSION = ".".join(str(i) for i in IVERSION)
MINORVERSION = ".".join(str(i) for i in IVERSION[:2])
NAME = "mitmproxy"
NAMEVERSION = NAME + " " + VERSION
NEXT_MINORVERSION = list(IVERSION)
NEXT_MINORVERSION[1] += 1
NEXT_MINORVERSION = ".".join(str(i) for i in NEXT_MINORVERSION[:2])
|
{
"content_hash": "33b1063e5ef025ae0ac36eba38e17415",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.6688524590163935,
"repo_name": "Fuzion24/mitmproxy",
"id": "7836c84981334d51b68ce8455fa2931f8f7aed9f",
"size": "305",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "libmproxy/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "340350"
},
{
"name": "HTML",
"bytes": "97993"
},
{
"name": "JavaScript",
"bytes": "1728505"
},
{
"name": "Python",
"bytes": "681439"
},
{
"name": "Shell",
"bytes": "5474"
}
],
"symlink_target": ""
}
|
from django.core.management.base import NoArgsCommand
from flows.statestore.django_store import StateModel
class Command(NoArgsCommand):
help = "Removes expired flow state from the database (only valid if using the Django state store)"
def handle_noargs(self, **options):
count = StateModel.objects.remove_expired_state()
print 'Deleted %d expired tasks\' state' % count
|
{
"content_hash": "5d2e7564071aea2d3bcc7f20d18ed707",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 102,
"avg_line_length": 44,
"alnum_prop": 0.7474747474747475,
"repo_name": "laterpay/django-flows",
"id": "83db8abdf83ced83ddcb0a471f679838a42fa79d",
"size": "396",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flows/management/commands/cleanupflows.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "55976"
}
],
"symlink_target": ""
}
|
from zerver.lib.test_classes import WebhookTestCase
class BasecampHookTests(WebhookTestCase):
STREAM_NAME = 'basecamp'
URL_TEMPLATE = u"/api/v1/external/basecamp?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'basecamp'
EXPECTED_TOPIC = "Zulip HQ"
def test_basecamp_makes_doc_active(self) -> None:
expected_message = u"Tomasz activated the document [New doc](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214)"
self._send_and_test_message('doc_active', expected_message)
def test_basecamp_makes_doc_archived(self) -> None:
expected_message = u"Tomasz archived the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)"
self._send_and_test_message('doc_archived', expected_message)
def test_basecamp_makes_doc_changed_content(self) -> None:
expected_message = u"Tomasz changed content of the document [New doc edit](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214)"
self._send_and_test_message('doc_content_changed', expected_message)
def test_basecamp_makes_doc_changed_title(self) -> None:
expected_message = u"Tomasz changed title of the document [New doc edit](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214)"
self._send_and_test_message('doc_title_changed', expected_message)
def test_basecamp_makes_doc_publicized(self) -> None:
expected_message = u"Tomasz publicized the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)"
self._send_and_test_message('doc_publicized', expected_message)
def test_basecamp_makes_doc_created(self) -> None:
expected_message = u"Tomasz created the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)"
self._send_and_test_message('doc_created', expected_message)
def test_basecamp_makes_doc_trashed(self) -> None:
expected_message = u"Tomasz trashed the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)"
self._send_and_test_message('doc_trashed', expected_message)
def test_basecamp_makes_doc_unarchived(self) -> None:
expected_message = u"Tomasz unarchived the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988)"
self._send_and_test_message('doc_unarchive', expected_message)
def test_basecamp_makes_questions_answer_archived(self) -> None:
expected_message = u"Tomasz archived the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('questions_answer_archived', expected_message)
def test_basecamp_makes_questions_answer_content_changed(self) -> None:
expected_message = u"Tomasz changed content of the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('questions_answer_content_changed', expected_message)
def test_basecamp_makes_questions_answer_created(self) -> None:
expected_message = u"Tomasz created the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('questions_answer_created', expected_message)
def test_basecamp_makes_questions_answer_trashed(self) -> None:
expected_message = u"Tomasz trashed the [answer](https://3.basecamp.com/3688623/buckets/2957043/question_answers/432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('questions_answer_trashed', expected_message)
def test_basecamp_makes_questions_answer_unarchived(self) -> None:
expected_message = u"Tomasz unarchived the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('questions_answer_unarchived', expected_message)
def test_basecamp_makes_question_archived(self) -> None:
expected_message = u"Tomasz archived the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('question_archived', expected_message)
def test_basecamp_makes_question_created(self) -> None:
expected_message = u"Tomasz created the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('question_created', expected_message)
def test_basecamp_makes_question_trashed(self) -> None:
expected_message = u"Tomasz trashed the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('question_trashed', expected_message)
def test_basecamp_makes_question_unarchived(self) -> None:
expected_message = u"Tomasz unarchived the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)"
self._send_and_test_message('question_unarchived', expected_message)
def test_basecamp_makes_message_archived(self) -> None:
expected_message = u"Tomasz archived the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)"
self._send_and_test_message('message_archived', expected_message)
def test_basecamp_makes_message_content_change(self) -> None:
expected_message = u"Tomasz changed content of the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)"
self._send_and_test_message('message_content_changed', expected_message)
def test_basecamp_makes_message_created(self) -> None:
expected_message = u"Tomasz created the message [Message Title](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)"
self._send_and_test_message('message_created', expected_message)
def test_basecamp_makes_message_title_change(self) -> None:
expected_message = u"Tomasz changed subject of the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)"
self._send_and_test_message('message_title_changed', expected_message)
def test_basecamp_makes_message_trashed(self) -> None:
expected_message = u"Tomasz trashed the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)"
self._send_and_test_message('message_trashed', expected_message)
def test_basecamp_makes_message_unarchived(self) -> None:
expected_message = u"Tomasz unarchived the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605)"
self._send_and_test_message('message_unarchived', expected_message)
def test_basecamp_makes_todo_list_created(self) -> None:
expected_message = u"Tomasz created the todo list [NEW TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190)"
self._send_and_test_message('todo_list_created', expected_message)
def test_basecamp_makes_todo_list_description_changed(self) -> None:
expected_message = u"Tomasz changed description of the todo list [NEW TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190)"
self._send_and_test_message('todo_list_description_changed', expected_message)
def test_basecamp_makes_todo_list_modified(self) -> None:
expected_message = u"Tomasz changed name of the todo list [NEW Name TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190)"
self._send_and_test_message('todo_list_name_changed', expected_message)
def test_basecamp_makes_todo_assignment_changed(self) -> None:
expected_message = u"Tomasz changed assignment of the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)"
self._send_and_test_message('todo_assignment_changed', expected_message)
def test_basecamp_makes_todo_completed(self) -> None:
expected_message = u"Tomasz completed the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)"
self._send_and_test_message('todo_completed', expected_message)
def test_basecamp_makes_todo_created(self) -> None:
expected_message = u"Tomasz created the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)"
self._send_and_test_message('todo_created', expected_message)
def test_basecamp_makes_comment_created(self) -> None:
expected_message = u"Tomasz created the [comment](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624#__recording_427058780) of the task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624)"
self._send_and_test_message('comment_created', expected_message)
def _send_and_test_message(self, fixture_name: str, expected_message: str) -> None:
self.send_and_test_stream_message(fixture_name, self.EXPECTED_TOPIC, expected_message)
|
{
"content_hash": "67820194f1109bb0eff9ee8d11d3d5f5",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 272,
"avg_line_length": 73.85384615384615,
"alnum_prop": 0.7352359129257369,
"repo_name": "dhcrzf/zulip",
"id": "2b1ecc919362e1c470a4f2851a8badcdb149d202",
"size": "9626",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/webhooks/basecamp/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "436713"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "673974"
},
{
"name": "JavaScript",
"bytes": "2951950"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "72908"
},
{
"name": "Python",
"bytes": "6188005"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "118284"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
import base64
import bz2
import collections
import datetime
import io
import os
import threading
import time
import sys
from xml.dom import minidom
from xml.etree import ElementTree
import zipfile
import benchexec
from benchexec.model import MEMLIMIT, TIMELIMIT, CORELIMIT
from benchexec import filewriter
from benchexec import intel_cpu_energy
from benchexec import result
from benchexec import util
RESULT_XML_PUBLIC_ID = "+//IDN sosy-lab.org//DTD BenchExec result 3.0//EN"
RESULT_XML_SYSTEM_ID = "https://www.sosy-lab.org/benchexec/result-3.0.dtd"
# colors for column status in terminal
COLOR_GREEN = "\033[32;1m{0}\033[m"
COLOR_RED = "\033[31;1m{0}\033[m"
COLOR_ORANGE = "\033[33;1m{0}\033[m"
COLOR_MAGENTA = "\033[35;1m{0}\033[m"
COLOR_DEFAULT = "{0}"
UNDERLINE = "\033[4m{0}\033[0m"
COLOR_DIC = collections.defaultdict(lambda: COLOR_DEFAULT)
TERMINAL_TITLE = ""
if util.should_color_output():
COLOR_DIC.update(
{
result.CATEGORY_CORRECT: COLOR_GREEN,
result.CATEGORY_WRONG: COLOR_RED,
result.CATEGORY_UNKNOWN: COLOR_ORANGE,
result.CATEGORY_ERROR: COLOR_MAGENTA,
result.CATEGORY_MISSING: COLOR_DEFAULT,
}
)
if sys.stdout.isatty():
_term = os.environ.get("TERM", "")
if _term.startswith(("xterm", "rxvt")):
TERMINAL_TITLE = "\033]0;Task {0}\007"
elif _term.startswith("screen"):
TERMINAL_TITLE = "\033kTask {0}\033\\"
LEN_OF_STATUS = 25
# the number of digits after the decimal separator for text output of time columns with times
TIME_PRECISION = 2
_BYTE_FACTOR = 1000 # byte in kilobyte
class OutputHandler(object):
"""
The class OutputHandler manages all outputs to the terminal and to files.
"""
print_lock = threading.Lock()
def __init__(self, benchmark, sysinfo, compress_results):
"""
The constructor of OutputHandler collects information about the benchmark and the computer.
"""
self.compress_results = compress_results
self.all_created_files = set()
self.benchmark = benchmark
self.statistics = Statistics()
version = self.benchmark.tool_version
memlimit = None
timelimit = None
corelimit = None
if self.benchmark.rlimits.memory:
memlimit = str(self.benchmark.rlimits.memory) + "B"
if self.benchmark.rlimits.cputime:
timelimit = str(self.benchmark.rlimits.cputime) + "s"
if self.benchmark.rlimits.cpu_cores:
corelimit = str(self.benchmark.rlimits.cpu_cores)
# create folder for file-specific log-files.
os.makedirs(benchmark.log_folder, exist_ok=True)
self.store_header_in_xml(version, memlimit, timelimit, corelimit)
self.write_header_to_log(sysinfo)
if sysinfo:
# store systemInfo in XML
self.store_system_info(
sysinfo.os,
sysinfo.cpu_model,
sysinfo.cpu_number_of_cores,
sysinfo.cpu_max_frequency,
sysinfo.memory,
sysinfo.hostname,
environment=sysinfo.environment,
cpu_turboboost=sysinfo.cpu_turboboost,
)
self.xml_file_names = []
if compress_results:
self.log_zip = zipfile.ZipFile(
benchmark.log_zip, mode="w", compression=zipfile.ZIP_DEFLATED
)
self.log_zip_lock = threading.Lock()
self.all_created_files.add(benchmark.log_zip)
def store_system_info(
self,
opSystem,
cpu_model,
cpu_number_of_cores,
cpu_max_frequency,
memory,
hostname,
runSet=None,
environment={},
cpu_turboboost=None,
):
for systemInfo in self.xml_header.findall("systeminfo"):
if systemInfo.attrib["hostname"] == hostname:
return
osElem = ElementTree.Element("os", name=opSystem)
cpuElem = ElementTree.Element(
"cpu",
model=cpu_model,
cores=cpu_number_of_cores,
frequency=str(cpu_max_frequency) + "Hz",
)
if cpu_turboboost is not None:
cpuElem.set("turboboostActive", str(cpu_turboboost).lower())
ramElem = ElementTree.Element("ram", size=str(memory) + "B")
systemInfo = ElementTree.Element("systeminfo", hostname=hostname)
systemInfo.append(osElem)
systemInfo.append(cpuElem)
systemInfo.append(ramElem)
env = ElementTree.SubElement(systemInfo, "environment")
for var, value in sorted(environment.items()):
elem = ElementTree.SubElement(env, "var", name=var)
if util.is_legal_for_xml(value):
elem.text = value
else:
elem.text = base64.standard_b64encode(value.encode()).decode()
elem.attrib["encoding"] = "base64"
self.xml_header.append(systemInfo)
if runSet:
# insert before <run> tags to conform with DTD
i = None
for i, elem in enumerate(runSet.xml): # noqa: B007
if elem.tag == "run":
break
if i is None:
runSet.xml.append(systemInfo)
else:
runSet.xml.insert(i, systemInfo)
def set_error(self, msg, runSet=None):
"""
Mark the benchmark as erroneous, e.g., because the benchmarking tool crashed.
The message is intended as explanation for the user.
"""
self.xml_header.set("error", msg or "unknown error")
if runSet:
runSet.xml.set("error", msg or "unknown error")
def store_header_in_xml(self, version, memlimit, timelimit, corelimit):
# store benchmarkInfo in XML
self.xml_header = ElementTree.Element(
"result",
benchmarkname=self.benchmark.name,
date=self.benchmark.start_time.strftime("%Y-%m-%d %H:%M:%S %Z"),
starttime=self.benchmark.start_time.isoformat(),
tool=self.benchmark.tool_name,
version=version,
toolmodule=self.benchmark.tool_module,
generator="BenchExec " + benchexec.__version__,
)
if self.benchmark.display_name:
self.xml_header.set("displayName", self.benchmark.display_name)
if memlimit is not None:
self.xml_header.set(MEMLIMIT, memlimit)
if timelimit is not None:
self.xml_header.set(TIMELIMIT, timelimit)
if corelimit is not None:
self.xml_header.set(CORELIMIT, corelimit)
if self.benchmark.description:
description_tag = ElementTree.Element("description")
description_tag.text = self.benchmark.description
self.xml_header.append(description_tag)
# store columnTitles in XML, this are the default columns, that are shown in a default html-table from table-generator
columntitlesElem = ElementTree.Element("columns")
columntitlesElem.append(ElementTree.Element("column", title="status"))
columntitlesElem.append(ElementTree.Element("column", title="cputime"))
columntitlesElem.append(ElementTree.Element("column", title="walltime"))
for column in self.benchmark.columns:
columnElem = ElementTree.Element("column", title=column.title)
columntitlesElem.append(columnElem)
self.xml_header.append(columntitlesElem)
def write_header_to_log(self, sysinfo):
"""
This method writes information about benchmark and system into txt_file.
"""
runSetName = None
run_sets = [
runSet for runSet in self.benchmark.run_sets if runSet.should_be_executed()
]
if len(run_sets) == 1:
# in case there is only a single run set to to execute, we can use its name
runSetName = run_sets[0].name
columnWidth = 25
simpleLine = "-" * 60 + "\n\n"
def format_line(key, value):
if value is None:
return ""
return ((key + ":").ljust(columnWidth) + str(value)).strip() + "\n"
def format_byte(key, value):
if value is None:
return ""
return format_line(key, str(value / _BYTE_FACTOR / _BYTE_FACTOR) + " MB")
def format_time(key, value):
if value is None:
return ""
return format_line(key, str(value) + " s")
header = (
" BENCHMARK INFORMATION\n"
+ (
(self.benchmark.display_name + "\n")
if self.benchmark.display_name
else ""
)
+ format_line("benchmark definition", self.benchmark.benchmark_file)
+ format_line("name", self.benchmark.name)
+ format_line("run sets", ", ".join(run_set.name for run_set in run_sets))
+ format_line(
"date", self.benchmark.start_time.strftime("%a, %Y-%m-%d %H:%M:%S %Z")
)
+ format_line(
"tool", self.benchmark.tool_name + " " + self.benchmark.tool_version
)
+ format_line("tool executable", self.benchmark.executable)
+ format_line(
"options",
" ".join(map(util.escape_string_shell, self.benchmark.options)),
)
+ format_line(
"property file", util.text_or_none(self.benchmark.propertytag)
)
)
if self.benchmark.num_of_threads > 1:
header += format_line("parallel runs", self.benchmark.num_of_threads)
header += (
"resource limits:\n"
+ format_byte("- memory", self.benchmark.rlimits.memory)
+ format_time("- time", self.benchmark.rlimits.cputime)
+ format_line("- cpu cores", self.benchmark.rlimits.cpu_cores)
)
header += (
"hardware requirements:\n"
+ format_line("- cpu model", self.benchmark.requirements.cpu_model)
+ format_line("- cpu cores", self.benchmark.requirements.cpu_cores)
+ format_byte("- memory", self.benchmark.requirements.memory)
+ simpleLine
)
if sysinfo:
header += (
" SYSTEM INFORMATION\n"
+ format_line("host", sysinfo.hostname)
+ format_line("os", sysinfo.os)
+ format_line("cpu", sysinfo.cpu_model)
+ format_line("- cores", sysinfo.cpu_number_of_cores)
+ format_line(
"- max frequency",
str(sysinfo.cpu_max_frequency / 1000 / 1000) + " MHz",
)
+ format_line("- turbo boost enabled", sysinfo.cpu_turboboost)
+ format_byte("ram", sysinfo.memory)
+ simpleLine
)
self.description = header
# write to file
txt_file_name = self.get_filename(runSetName, "txt")
self.txt_file = filewriter.FileWriter(txt_file_name, self.description)
self.all_created_files.add(txt_file_name)
def output_before_run_set(self, runSet, start_time=None):
"""
The method output_before_run_set() calculates the length of the
first column for the output in terminal and stores information
about the runSet in XML.
@param runSet: current run set
"""
xml_file_name = self.get_filename(runSet.name, "xml")
identifier_names = [run.identifier for run in runSet.runs]
# common prefix of file names
runSet.common_prefix = util.common_base_dir(identifier_names)
if runSet.common_prefix:
runSet.common_prefix += os.path.sep
# length of the first column in terminal
runSet.max_length_of_filename = (
max(len(file) for file in identifier_names) if identifier_names else 20
)
runSet.max_length_of_filename = max(
20, runSet.max_length_of_filename - len(runSet.common_prefix)
)
# write run set name to terminal
numberOfFiles = len(runSet.runs)
numberOfFilesStr = (
" (1 file)" if numberOfFiles == 1 else f" ({numberOfFiles} files)"
)
util.printOut(
"\nexecuting run set"
+ (" '" + runSet.name + "'" if runSet.name else "")
+ numberOfFilesStr
+ TERMINAL_TITLE.format(runSet.full_name)
)
# write information about the run set into txt_file
self.writeRunSetInfoToLog(runSet)
# prepare information for text output
for run in runSet.runs:
run.resultline = self.format_sourcefile_name(run.identifier, runSet)
if run.sourcefiles:
adjusted_identifier = util.relative_path(run.identifier, xml_file_name)
else:
# If no source files exist the task doesn't point to any file that could be downloaded.
# In this case, the name doesn't have to be adjusted because it's no path.
adjusted_identifier = run.identifier
# prepare XML structure for each run and runSet
run.xml = ElementTree.Element("run", name=adjusted_identifier)
if run.sourcefiles:
adjusted_sourcefiles = (
util.relative_path(s, xml_file_name) for s in run.sourcefiles
)
run.xml.set("files", "[" + ", ".join(adjusted_sourcefiles) + "]")
if run.specific_options:
run.xml.set("options", " ".join(run.specific_options))
if run.properties:
all_properties = (prop.name for prop in run.properties)
run.xml.set("properties", " ".join(sorted(all_properties)))
if len(run.properties) == 1:
prop = run.properties[0]
run.xml.set(
"propertyFile", util.relative_path(prop.filename, xml_file_name)
)
expected_result = str(run.expected_results.get(prop.filename, ""))
if expected_result:
run.xml.set("expectedVerdict", expected_result)
block_name = runSet.blocks[0].name if len(runSet.blocks) == 1 else None
runSet.xml = self.runs_to_xml(runSet, runSet.runs, block_name)
if start_time:
runSet.xml.set("starttime", start_time.isoformat())
elif not self.benchmark.config.start_time:
runSet.xml.set("starttime", util.read_local_time().isoformat())
# write (empty) results to XML
runSet.xml_file_name = xml_file_name
self._write_rough_result_xml_to_file(runSet.xml, runSet.xml_file_name)
runSet.xml_file_last_modified_time = time.monotonic()
self.all_created_files.add(runSet.xml_file_name)
self.xml_file_names.append(runSet.xml_file_name)
def output_for_skipping_run_set(self, runSet, reason=None):
"""
This function writes a simple message to terminal and logfile,
when a run set is skipped.
There is no message about skipping a run set in the xml-file.
"""
# print to terminal
util.printOut(
"\nSkipping run set"
+ (" '" + runSet.name + "'" if runSet.name else "")
+ (" " + reason if reason else "")
)
# write into txt_file
runSetInfo = "\n\n"
if runSet.name:
runSetInfo += runSet.name + "\n"
runSetInfo += (
f"Run set {runSet.index} of {len(self.benchmark.run_sets)}: "
f"skipped {reason or ''}".rstrip()
)
runSetInfo += "\n"
self.txt_file.append(runSetInfo)
def writeRunSetInfoToLog(self, runSet):
"""
This method writes the information about a run set into the txt_file.
"""
runSetInfo = "\n\n"
if runSet.name:
runSetInfo += runSet.name + "\n"
runSetInfo += (
f"Run set {runSet.index} of {len(self.benchmark.run_sets)} "
f"with options '{' '.join(runSet.options)}' and "
f"propertyfile '{util.text_or_none(runSet.propertytag)}'\n\n"
)
titleLine = self.create_output_line(
runSet,
"inputfile",
"status",
"cpu time",
"wall time",
"host",
self.benchmark.columns,
True,
)
runSet.simpleLine = "-" * (len(titleLine))
runSetInfo += titleLine + "\n" + runSet.simpleLine + "\n"
# write into txt_file
self.txt_file.append(runSetInfo)
def output_before_run(self, run):
"""
The method output_before_run() prints the name of a file to terminal.
It returns the name of the logfile.
@param run: a Run object
"""
# output in terminal
runSet = run.runSet
try:
OutputHandler.print_lock.acquire()
try:
runSet.started_runs += 1
except AttributeError:
runSet.started_runs = 1
timeStr = time.strftime("%H:%M:%S", time.localtime()) + " "
progressIndicator = f" ({runSet.started_runs}/{len(runSet.runs)})"
terminalTitle = TERMINAL_TITLE.format(runSet.full_name + progressIndicator)
if self.benchmark.num_of_threads == 1:
util.printOut(
terminalTitle
+ timeStr
+ self.format_sourcefile_name(run.identifier, runSet),
"",
)
else:
util.printOut(
terminalTitle
+ timeStr
+ "starting "
+ self.format_sourcefile_name(run.identifier, runSet)
)
finally:
OutputHandler.print_lock.release()
def output_after_run(self, run):
"""
The method output_after_run() prints filename, result, time and status
of a run to terminal and stores all data in XML
"""
# format times, type is changed from float to string!
cputime_str = util.format_number(run.values.get("cputime"), TIME_PRECISION)
walltime_str = util.format_number(run.values.get("walltime"), TIME_PRECISION)
# format numbers, number_of_digits is optional, so it can be None
for column in run.columns:
if column.number_of_digits is not None:
# if the number ends with "s" or another letter, remove it
if (not column.value.isdigit()) and column.value[-2:-1].isdigit():
column.value = column.value[:-1]
try:
floatValue = float(column.value)
column.value = util.format_number(
floatValue, column.number_of_digits
)
except ValueError: # if value is no float, don't format it
pass
# store information in run
run.resultline = self.create_output_line(
run.runSet,
run.identifier,
run.status,
cputime_str,
walltime_str,
run.values.get("host"),
run.columns,
)
self.add_values_to_run_xml(run)
# output in terminal/console
statusStr = COLOR_DIC[run.category].format(run.status.ljust(LEN_OF_STATUS))
try:
OutputHandler.print_lock.acquire()
valueStr = statusStr + cputime_str.rjust(8) + walltime_str.rjust(8)
if self.benchmark.num_of_threads == 1:
util.printOut(valueStr)
else:
timeStr = time.strftime("%H:%M:%S", time.localtime()) + " " * 14
util.printOut(
timeStr
+ self.format_sourcefile_name(run.identifier, run.runSet)
+ valueStr
)
# write result in txt_file and XML
self.txt_file.append(run.resultline + "\n", keep=False)
self.statistics.add_result(run)
# we don't want to write this file to often, it can slow down the whole script,
# so we wait at least 10 seconds between two write-actions
currentTime = time.monotonic()
if currentTime - run.runSet.xml_file_last_modified_time > 60:
self._write_rough_result_xml_to_file(
run.runSet.xml, run.runSet.xml_file_name
)
run.runSet.xml_file_last_modified_time = time.monotonic()
finally:
OutputHandler.print_lock.release()
if self.compress_results:
log_file_path = os.path.relpath(
run.log_file, os.path.join(self.benchmark.log_folder, os.pardir)
)
with self.log_zip_lock:
self.log_zip.write(run.log_file, log_file_path)
os.remove(run.log_file)
else:
self.all_created_files.add(run.log_file)
if os.path.isdir(run.result_files_folder):
self.all_created_files.add(run.result_files_folder)
def output_after_run_set(
self, runSet, cputime=None, walltime=None, energy={}, cache={}, end_time=None
):
"""
The method output_after_run_set() stores the times of a run set in XML.
@params cputime, walltime: accumulated times of the run set
"""
self.add_values_to_run_set_xml(runSet, cputime, walltime, energy, cache)
if end_time:
runSet.xml.set("endtime", end_time.isoformat())
elif not self.benchmark.config.start_time:
runSet.xml.set("endtime", util.read_local_time().isoformat())
# Write results to files. This overwrites the intermediate files written
# from output_after_run with the proper results.
self._write_pretty_result_xml_to_file(runSet.xml, runSet.xml_file_name)
if len(runSet.blocks) > 1:
for block in runSet.blocks:
blockFileName = self.get_filename(runSet.name, block.name + ".xml")
block_xml = self.runs_to_xml(runSet, block.runs, block.name)
block_xml.set("starttime", runSet.xml.get("starttime"))
if runSet.xml.get("endtime"):
block_xml.set("endtime", runSet.xml.get("endtime"))
self._write_pretty_result_xml_to_file(block_xml, blockFileName)
self.txt_file.append(self.run_set_to_text(runSet, cputime, walltime, energy))
def run_set_to_text(self, runSet, cputime=0, walltime=0, energy={}):
lines = []
# store values of each run
for run in runSet.runs:
lines.append(run.resultline)
lines.append(runSet.simpleLine)
# write endline into txt_file
endline = f"Run set {runSet.index}"
# format time, type is changed from float to string!
cputime_str = (
"None" if cputime is None else util.format_number(cputime, TIME_PRECISION)
)
walltime_str = (
"None" if walltime is None else util.format_number(walltime, TIME_PRECISION)
)
lines.append(
self.create_output_line(
runSet, endline, "done", cputime_str, walltime_str, "-", []
)
)
return "\n".join(lines) + "\n"
def runs_to_xml(self, runSet, runs, blockname=None):
"""
This function creates the XML structure for a list of runs
"""
# copy benchmarkinfo, limits, columntitles, systeminfo from xml_header
runsElem = util.copy_of_xml_element(self.xml_header)
runsElem.set("options", " ".join(runSet.options))
if blockname is not None:
runsElem.set("block", blockname)
runsElem.set(
"name",
((runSet.real_name + ".") if runSet.real_name else "") + blockname,
)
elif runSet.real_name:
runsElem.set("name", runSet.real_name)
# collect XMLelements from all runs
for run in runs:
runsElem.append(run.xml)
return runsElem
def add_values_to_run_xml(self, run):
"""
This function adds the result values to the XML representation of a run.
"""
runElem = run.xml
for elem in list(runElem):
runElem.remove(elem)
self.add_column_to_xml(runElem, "status", run.status)
self.add_column_to_xml(runElem, "@category", run.category) # hidden
self.add_column_to_xml(runElem, "", run.values)
for column in run.columns:
self.add_column_to_xml(runElem, column.title, column.value)
# Sort child elements by hidden and title attributes
runElem[:] = sorted(
runElem, key=lambda elem: (elem.get("hidden", ""), elem.get("title"))
)
def add_values_to_run_set_xml(self, runSet, cputime, walltime, energy, cache):
"""
This function adds the result values to the XML representation of a runSet.
"""
self.add_column_to_xml(runSet.xml, "cputime", cputime)
self.add_column_to_xml(runSet.xml, "walltime", walltime)
energy = intel_cpu_energy.format_energy_results(energy)
for energy_key, energy_value in energy.items():
self.add_column_to_xml(runSet.xml, energy_key, energy_value)
for cache_key, cache_value in cache.items():
self.add_column_to_xml(runSet.xml, cache_key, cache_value)
def add_column_to_xml(self, xml, title, value, prefix="", value_suffix=""):
if value is None:
return
if isinstance(value, dict):
for key, item_value in value.items():
if prefix:
common_prefix = prefix + "_" + title
else:
common_prefix = title
self.add_column_to_xml(xml, key, item_value, prefix=common_prefix)
return
if hasattr(value, "__getitem__") and not isinstance(value, (str, bytes)):
value = ",".join(map(str, value))
elif isinstance(value, datetime.datetime):
value = value.isoformat()
if prefix:
title = prefix + "_" + title
if title[0] == "@":
hidden = True
title = title[1:]
else:
hidden = False
if not value_suffix and not isinstance(value, (str, bytes)):
if title.startswith("cputime") or title.startswith("walltime"):
value_suffix = "s"
elif title.startswith("cpuenergy"):
value_suffix = "J"
elif title.startswith("blkio-") or title.startswith("memory"):
value_suffix = "B"
elif title.startswith("llc"):
if not title.startswith("llc_misses"):
value_suffix = "B"
elif title.startswith("mbm"):
value_suffix = "B/s"
value = f"{value}{value_suffix}"
element = ElementTree.Element("column", title=title, value=value)
if hidden:
element.set("hidden", "true")
xml.append(element)
def create_output_line(
self,
runSet,
sourcefile,
status,
cputime_delta,
walltime_delta,
host,
columns,
isFirstLine=False,
):
"""
@param sourcefile: title of a sourcefile
@param status: status of programm
@param cputime_delta: time from running the programm
@param walltime_delta: time from running the programm
@param columns: list of columns with a title or a value
@param isFirstLine: boolean for different output of headline and other lines
@return: a line for the outputFile
"""
lengthOfTime = 12
minLengthOfColumns = 8
outputLine = (
self.format_sourcefile_name(sourcefile, runSet)
+ status.ljust(LEN_OF_STATUS)
+ cputime_delta.rjust(lengthOfTime)
+ walltime_delta.rjust(lengthOfTime)
+ str(host).rjust(lengthOfTime)
)
for column in columns:
columnLength = max(minLengthOfColumns, len(column.title)) + 2
if isFirstLine:
value = column.title
else:
value = column.value
outputLine = outputLine + str(value).rjust(columnLength)
return outputLine
def output_after_benchmark(self, isStoppedByInterrupt):
stats = str(self.statistics)
util.printOut(stats)
self.txt_file.append(stats)
if self.xml_file_names:
def _find_file_relative(name):
"""
Find a file with the given name in the same directory as this script.
Returns a path relative to the current directory, or None.
"""
main_dir = os.path.dirname(sys.argv[0])
search_dirs = [
main_dir,
os.path.join(main_dir, os.path.pardir, "bin"),
os.path.join(os.path.dirname(__file__), os.path.pardir),
]
path = util.find_executable2(name, search_dirs)
if not path:
return None
if os.path.dirname(path) in util.get_path():
# in PATH, just use command name
return os.path.basename(path)
path = os.path.relpath(path)
if path == name:
path = "./" + path # for easier copy and paste into a shell
return path
tableGeneratorPath = _find_file_relative(
"table-generator.py"
) or _find_file_relative("table-generator")
if tableGeneratorPath:
xml_file_names = (
[file + ".bz2" for file in self.xml_file_names]
if self.compress_results
else self.xml_file_names
)
cmdline = [tableGeneratorPath] + xml_file_names
util.printOut(
"In order to get HTML and CSV tables, run\n"
+ " ".join(map(util.escape_string_shell, cmdline)),
)
if isStoppedByInterrupt:
util.printOut(
"\nScript was interrupted by user, some runs may not be done.\n"
)
def close(self):
"""Do all necessary cleanup."""
self.txt_file.close()
if self.compress_results:
with self.log_zip_lock:
zip_is_empty = not self.log_zip.namelist()
self.log_zip.close()
if zip_is_empty:
# remove useless ZIP file, e.g., because all runs were skipped
os.remove(self.benchmark.log_zip)
self.all_created_files.remove(self.benchmark.log_zip)
# remove useless log folder if it is empty,
# e.g., because all logs were written to the ZIP file
try:
os.rmdir(self.benchmark.log_folder)
except OSError:
pass
def get_filename(self, runSetName, fileExtension):
"""
This function returns the name of the file for a run set
with an extension ("txt", "xml").
"""
fileName = self.benchmark.output_base_name + ".results."
if runSetName:
fileName += runSetName + "."
return fileName + fileExtension
def format_sourcefile_name(self, fileName, runSet):
"""
Formats the file name of a program for printing on console.
"""
if fileName.startswith(runSet.common_prefix):
fileName = fileName[len(runSet.common_prefix) :]
return fileName.ljust(runSet.max_length_of_filename + 4)
def _write_rough_result_xml_to_file(self, xml, filename):
"""Write a rough string version of the XML (for temporary files)."""
# Write content to temp file first
error = xml.get("error", None)
xml.set("error", "incomplete") # Mark result file as incomplete
temp_filename = filename + ".tmp"
with open(temp_filename, "wb") as file:
ElementTree.ElementTree(xml).write(
file, encoding="utf-8", xml_declaration=True
)
os.rename(temp_filename, filename)
if error is not None:
xml.set("error", error)
else:
del xml.attrib["error"]
def _write_pretty_result_xml_to_file(self, xml, filename):
"""Writes a nicely formatted XML file with DOCTYPE, and compressed if necessary."""
if self.compress_results:
actual_filename = filename + ".bz2"
open_func = bz2.BZ2File
else:
# write content to temp file first to prevent losing data
# in existing file if writing fails
actual_filename = filename + ".tmp"
open_func = open
with io.TextIOWrapper(
open_func(actual_filename, "wb"), encoding="utf-8"
) as file:
rough_string = ElementTree.tostring(xml, encoding="unicode")
reparsed = minidom.parseString(rough_string)
doctype = minidom.DOMImplementation().createDocumentType(
"result", RESULT_XML_PUBLIC_ID, RESULT_XML_SYSTEM_ID
)
reparsed.insertBefore(doctype, reparsed.documentElement)
reparsed.writexml(
file, indent="", addindent=" ", newl="\n", encoding="utf-8"
)
if self.compress_results:
# try to delete uncompressed file (would have been overwritten in no-compress-mode)
try:
os.remove(filename)
except OSError:
pass
self.all_created_files.discard(filename)
self.all_created_files.add(actual_filename)
else:
os.rename(actual_filename, filename)
self.all_created_files.add(filename)
return filename
class Statistics(object):
def __init__(self):
self.dic = collections.defaultdict(int)
self.counter = 0
self.score = 0
self.max_score = None
def add_result(self, run):
self.counter += 1
self.dic[run.category] += 1
self.dic[(run.category, result.get_result_classification(run.status))] += 1
for prop in run.properties:
self.score += prop.compute_score(run.category, run.status) or 0
max_score = prop.max_score(run.expected_results.get(prop.filename))
if max_score is not None:
self.max_score = max_score + (self.max_score or 0)
def __str__(self):
correct = self.dic[result.CATEGORY_CORRECT]
correct_true = self.dic[(result.CATEGORY_CORRECT, result.RESULT_CLASS_TRUE)]
correct_false = correct - correct_true
incorrect = self.dic[result.CATEGORY_WRONG]
incorrect_true = self.dic[(result.CATEGORY_WRONG, result.RESULT_CLASS_TRUE)]
incorrect_false = incorrect - incorrect_true
width = 6
output = [
"",
"Statistics:" + str(self.counter).rjust(width + 9) + " Files",
" correct: " + str(correct).rjust(width),
" correct true: " + str(correct_true).rjust(width),
" correct false: " + str(correct_false).rjust(width),
" incorrect: " + str(incorrect).rjust(width),
" incorrect true: " + str(incorrect_true).rjust(width),
" incorrect false:" + str(incorrect_false).rjust(width),
" unknown: "
+ str(
self.dic[result.CATEGORY_UNKNOWN] + self.dic[result.CATEGORY_ERROR]
).rjust(width),
]
if self.max_score is not None:
output.append(
" Score: "
+ str(self.score).rjust(width)
+ " (max: "
+ str(self.max_score)
+ ")"
)
output.append("")
return "\n".join(output)
|
{
"content_hash": "a5e8a6c89375c8a1d06b4063a295f948",
"timestamp": "",
"source": "github",
"line_count": 969,
"max_line_length": 126,
"avg_line_length": 37.539731682146545,
"alnum_prop": 0.5604794369914229,
"repo_name": "ultimate-pa/benchexec",
"id": "cded46e98475c82b36d03c9604a322c3f801696b",
"size": "36606",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "benchexec/outputhandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3210"
},
{
"name": "CSS",
"bytes": "609"
},
{
"name": "Dockerfile",
"bytes": "3164"
},
{
"name": "Gnuplot",
"bytes": "5032"
},
{
"name": "HTML",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "75586"
},
{
"name": "Jinja",
"bytes": "285"
},
{
"name": "PHP",
"bytes": "4241"
},
{
"name": "Python",
"bytes": "1212689"
},
{
"name": "Roff",
"bytes": "3145"
},
{
"name": "SCSS",
"bytes": "25181"
},
{
"name": "Shell",
"bytes": "7671"
},
{
"name": "TeX",
"bytes": "7458"
}
],
"symlink_target": ""
}
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings, assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from nose import SkipTest
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
"""
Principle of Lars is to keep covariances tied and decreasing
"""
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
"""
The same, with precomputed Gram matrix
"""
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
"""
Test that lars_path with precomputed Gram and Xy gives the right answer
"""
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
"""
Test that Lars gives least square solution at the end
of the path
"""
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
"""
Test that Lars Lasso gives least square solution at the end
of the path
"""
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
"""Check that lars_path is robust to collinearity in input"""
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
"""
Test that the ``return_path=False`` option returns the correct output
"""
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
"""
Test that the ``return_path=False`` option with Gram remains correct
"""
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
"""
Test that the ``return_path=False`` option with Gram and Xy remains correct
"""
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
# In this test the "drop for good strategy" of lars_path is necessary
# to give a good answer
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
in_warn_message = 'Dropping a regressor'
f = assert_warns_message
alphas, active, coef_path = f(ConvergenceWarning, in_warn_message,
linear_model.lars_path, X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results.
"""
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results when early stopping is used.
(test : before, in the middle, and in the last part of the path)
"""
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
import struct
if struct.calcsize('P') * 8 == 32:
# FIXME in master
raise SkipTest("skip unstable test on 32 bit arch")
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
f = assert_warns_message
def in_warn_message(msg):
return 'Early stopping' in msg or 'Dropping a regressor' in msg
lars_alphas, _, lars_coef = f(ConvergenceWarning,
in_warn_message,
linear_model.lars_path, X, y, method='lasso')
with ignore_warnings():
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
lasso_coef = np.zeros((w.shape[0], len(lars_alphas)))
iter_models = enumerate(linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
return_models=True,
fit_intercept=False))
for i, model in iter_models:
lasso_coef[:, i] = model.coef_
np.testing.assert_array_almost_equal(lars_coef, lasso_coef, decimal=1)
np.testing.assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
np.testing.assert_array_almost_equal(lasso_coef, lasso_coef2, decimal=1)
def test_lars_drop_for_good():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
with ignore_warnings():
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
"""
assure that at least some features get added if necessary
test for 6d2b4c
"""
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
"""
Assure that estimators receiving multidimensional y do the right thing
"""
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
""" Test the LassoLarsCV object by checking that the optimal alpha
increases as the number of samples increases.
This property is not actually garantied in general and is just a
property of the given dataset, with the given steps chosen.
"""
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
""" Test the LassoLarsIC object by checking that
- some good features are selected.
- alpha_bic > alpha_aic
- n_nonzero_bic < n_nonzero_aic
"""
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
if __name__ == '__main__':
import nose
nose.runmodule()
|
{
"content_hash": "9bebdf3cf4a6e71e35a1adbfff49f972",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 79,
"avg_line_length": 35.54602510460251,
"alnum_prop": 0.5798363839679831,
"repo_name": "chaluemwut/fbserver",
"id": "02a0ac99bd164e922cf6c7f444b36ec4fcf298fb",
"size": "16991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/sklearn/linear_model/tests/test_least_angle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "285664"
},
{
"name": "C++",
"bytes": "3368760"
},
{
"name": "CSS",
"bytes": "13796"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "147096"
},
{
"name": "JavaScript",
"bytes": "19910"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "19806072"
},
{
"name": "Shell",
"bytes": "3769"
}
],
"symlink_target": ""
}
|
"""Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
"""
from test_framework.test_framework import VergeTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(VergeTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-checkmempool"]]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises_rpc_error(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
|
{
"content_hash": "18243a71c4e51b2dd3798b3674c0134a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 124,
"avg_line_length": 41.02040816326531,
"alnum_prop": 0.673134328358209,
"repo_name": "vergecurrency/VERGE",
"id": "4ffc6b8ef7fdb7a98d19eda9298316f5477462da",
"size": "2224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/mempool_spend_coinbase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "4146809"
},
{
"name": "C++",
"bytes": "6598251"
},
{
"name": "CMake",
"bytes": "58"
},
{
"name": "Dockerfile",
"bytes": "7558"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "30330"
},
{
"name": "M4",
"bytes": "214935"
},
{
"name": "Makefile",
"bytes": "106252"
},
{
"name": "Objective-C",
"bytes": "4891"
},
{
"name": "Objective-C++",
"bytes": "6640"
},
{
"name": "Python",
"bytes": "1404272"
},
{
"name": "QMake",
"bytes": "754"
},
{
"name": "Shell",
"bytes": "102969"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "f284d64f3a80636ca0c5138c2f4fa616",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 26,
"alnum_prop": 0.688034188034188,
"repo_name": "dresl/django_choice_and_question",
"id": "b0cd5b738d2348c695a542014697ef9b642c2538",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "117"
},
{
"name": "HTML",
"bytes": "4734"
},
{
"name": "Python",
"bytes": "14431"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns
from django.conf.urls import url
from trove_dashboard.content.databases.upgrade import views
VIEWS_MOD = ('trove_dashboard.content.databases.upgrade.views')
urlpatterns = patterns(
VIEWS_MOD,
url(r'^upgrade_instance$', views.UpgradeInstanceView.as_view(),
name='upgrade_instance'),
)
|
{
"content_hash": "2cb824ffb3a220f5427adf70b3b1b73c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.7485380116959064,
"repo_name": "Tesora-Release/tesora-trove-dashboard",
"id": "830adc2344089445a8a6c9baeb059a7820c08877",
"size": "946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove_dashboard/content/databases/upgrade/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4087"
},
{
"name": "HTML",
"bytes": "33250"
},
{
"name": "JavaScript",
"bytes": "1933"
},
{
"name": "Python",
"bytes": "432226"
},
{
"name": "Shell",
"bytes": "18300"
}
],
"symlink_target": ""
}
|
"""
Second quantization operators and states for bosons.
This follow the formulation of Fetter and Welecka, "Quantum Theory
of Many-Particle Systems."
"""
from sympy import (
Basic, Function, var, Mul, sympify, Integer, Add, sqrt,
Number, Matrix, zeros, Pow, I, S,Symbol, latex, cache
)
from sympy.utilities import deprecated, iff
from sympy.core.cache import cacheit
__all__ = [
'Dagger',
'KroneckerDelta',
'BosonicOperator',
'AnnihilateBoson',
'CreateBoson',
'AnnihilateFermion',
'CreateFermion',
'FockState',
'FockStateBra',
'FockStateKet',
'BBra',
'BKet',
'FBra',
'FKet',
'F',
'Fd',
'B',
'Bd',
'apply_operators',
'InnerProduct',
'BosonicBasis',
'VarBosonicBasis',
'FixedBosonicBasis',
'Commutator',
'matrix_rep',
'contraction',
'wicks',
'NO',
'evaluate_deltas',
'SymTuple',
'AntiSymmetricTensor',
'substitute_dummies',
'PermutationOperator',
'simplify_index_permutations',
]
class SecondQuantizationError(Exception):
pass
class AppliesOnlyToSymbolicIndex(SecondQuantizationError):
pass
class ContractionAppliesOnlyToFermions(SecondQuantizationError):
pass
class ViolationOfPauliPrinciple(SecondQuantizationError):
pass
class SubstitutionOfAmbigousOperatorFailed(SecondQuantizationError):
pass
class WicksTheoremDoesNotApply(SecondQuantizationError):
pass
class Dagger(Basic):
"""
Hermitian conjugate of creation/annihilation operators.
Example:
>>> from sympy import I
>>> from sympy.physics.secondquant import Dagger, B, Bd
>>> Dagger(2*I)
-2*I
>>> Dagger(B(0))
CreateBoson(0)
>>> Dagger(Bd(0))
AnnihilateBoson(0)
"""
def __new__(cls, arg):
arg = sympify(arg)
r = cls.eval(arg)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, arg)
return obj
@classmethod
def eval(cls, arg):
"""
Evaluates the Dagger instance.
Example:
>>> from sympy import I
>>> from sympy.physics.secondquant import Dagger, B, Bd
>>> Dagger(2*I)
-2*I
>>> Dagger(B(0))
CreateBoson(0)
>>> Dagger(Bd(0))
AnnihilateBoson(0)
The eval() method is called automatically.
"""
try:
d = arg._dagger_()
except:
if isinstance(arg, Basic):
if arg.is_Add:
return Add(*tuple(map(Dagger, arg.args)))
if arg.is_Mul:
return Mul(*tuple(map(Dagger, reversed(arg.args))))
if arg.is_Number:
return arg
if arg.is_Pow:
return Pow(Dagger(arg.args[0]),arg.args[1])
if arg == I:
return -arg
else:
return None
else:
return d
def _eval_subs(self, old, new):
r = Dagger(self.args[0].subs(old, new))
return r
def _dagger_(self):
return self.args[0]
class TensorSymbol(Function):
is_commutative = True
class SymTuple(Basic):
def __new__(cls, arg_tuple, **kw_args):
"""
the wrapped tuple is available as self.args
"""
obj = Basic.__new__(cls,*arg_tuple, **kw_args)
return obj
def __getitem__(self,i):
if isinstance(i,slice):
indices = i.indices(len(self))
return SymTuple(tuple([self.args[i] for i in range(*indices)]))
return self.args[i]
def __len__(self):
return len(self.args)
def __contains__(self,item):
return item in self.args
def _eval_subs(self,old,new):
if self==old:
return new
t=tuple([ el._eval_subs(old,new) for el in self.args])
return self.__class__(t)
def _tuple_wrapper(method):
"""
Decorator that makes any tuple in arguments into SymTuple
"""
def wrap_tuples(*args, **kw_args):
newargs=[]
for arg in args:
if type(arg) is tuple:
newargs.append(SymTuple(arg))
else:
newargs.append(arg)
return method(*newargs, **kw_args)
return wrap_tuples
class AntiSymmetricTensor(TensorSymbol):
nargs = 3
@_tuple_wrapper
def __new__(cls, symbol, upper, lower):
return TensorSymbol.__new__(cls, symbol, upper, lower)
@classmethod
def eval(cls, symbol, upper, lower):
"""
Simplifies the tensor.
Upper and lower are tuples with indices.
Examples:
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> AntiSymmetricTensor('t', (a, b), (i, j))
AntiSymmetricTensor(t, SymTuple(a, b), SymTuple(i, j))
>>> AntiSymmetricTensor('t', (b, a), (i, j))
-AntiSymmetricTensor(t, SymTuple(a, b), SymTuple(i, j))
>>> -AntiSymmetricTensor('t', (b, a), (i, j))
AntiSymmetricTensor(t, SymTuple(a, b), SymTuple(i, j))
As you can see, the eval() method is automatically called.
"""
try:
upper,sign = _sort_anticommuting_fermions(upper)
if sign%2:
upper = tuple(upper)
return -cls(symbol,upper,lower)
if sign:
upper = tuple(upper)
return cls(symbol,upper,lower)
lower,sign = _sort_anticommuting_fermions(lower)
if sign%2:
upper = tuple(upper)
lower = tuple(lower)
return -cls(symbol,upper,lower)
if sign:
upper = tuple(upper)
lower = tuple(lower)
return cls(symbol,upper,lower)
except ViolationOfPauliPrinciple:
return S.Zero
def _latex_(self,printer):
return "%s^{%s}_{%s}" %(
self.symbol,
"".join([ i.name for i in self.args[1]]),
"".join([ i.name for i in self.args[2]])
)
@property
def symbol(self):
"""
Returns the symbol of the tensor.
Example:
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> AntiSymmetricTensor('t', (a, b), (i, j))
AntiSymmetricTensor(t, SymTuple(a, b), SymTuple(i, j))
>>> AntiSymmetricTensor('t', (a, b), (i, j)).symbol
t
"""
return self.args[0]
@property
def upper(self):
"""
Returns the upper indices.
Example:
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> AntiSymmetricTensor('t', (a, b), (i, j))
AntiSymmetricTensor(t, SymTuple(a, b), SymTuple(i, j))
>>> AntiSymmetricTensor('t', (a, b), (i, j)).upper
SymTuple(a, b)
"""
return self.args[1]
@property
def lower(self):
"""
Returns the lower indices.
Example:
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> AntiSymmetricTensor('t', (a, b), (i, j))
AntiSymmetricTensor(t, SymTuple(a, b), SymTuple(i, j))
>>> AntiSymmetricTensor('t', (a, b), (i, j)).lower
SymTuple(i, j)
"""
return self.args[2]
def __str__(self):
return "%s(%s,%s)" %self.args
def doit(self, **kw_args):
return self
def _eval_subs(self, old, new):
if old == self:
return new
if old in self.upper:
return self.__class__(self.symbol,
self.args[1]._eval_subs(old,new),self.args[2])
if old in self.lower:
return self.__class__(self.symbol,
self.args[1], self.args[2]._eval_subs(old,new))
return self
class KroneckerDelta(Function):
"""
Discrete delta function.
>>> from sympy import symbols
>>> from sympy.physics.secondquant import KroneckerDelta
>>> i, j, k = symbols('i j k')
>>> KroneckerDelta(i, j)
KroneckerDelta(i, j)
>>> KroneckerDelta(i, i)
1
>>> KroneckerDelta(i, i+1)
0
>>> KroneckerDelta(i, i+1+k)
KroneckerDelta(i, 1 + i + k)
"""
nargs = 2
is_commutative=True
@classmethod
def eval(cls, i, j):
"""
Evaluates the discrete delta function.
>>> from sympy import symbols
>>> from sympy.physics.secondquant import KroneckerDelta
>>> i, j, k = symbols('i j k')
>>> KroneckerDelta(i, j)
KroneckerDelta(i, j)
>>> KroneckerDelta(i, i)
1
>>> KroneckerDelta(i, i+1)
0
>>> KroneckerDelta(i, i+1+k)
KroneckerDelta(i, 1 + i + k)
# indirect doctest
"""
if i > j:
return cls(j,i)
diff = i-j
if diff == 0:
return Integer(1)
elif diff.is_number:
return S.Zero
if i.assumptions0.get("below_fermi") and j.assumptions0.get("above_fermi"):
return S.Zero
if j.assumptions0.get("below_fermi") and i.assumptions0.get("above_fermi"):
return S.Zero
def _eval_subs(self, old, new):
r = KroneckerDelta(self.args[0].subs(old, new), self.args[1].subs(old, new))
return r
@property
def is_above_fermi(self):
"""
True if Delta can be non-zero above fermi
>>> from sympy.physics.secondquant import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p,a).is_above_fermi
True
>>> KroneckerDelta(p,i).is_above_fermi
False
>>> KroneckerDelta(p,q).is_above_fermi
True
"""
if self.args[0].assumptions0.get("below_fermi"):
return False
if self.args[1].assumptions0.get("below_fermi"):
return False
return True
@property
def is_below_fermi(self):
"""
True if Delta can be non-zero below fermi
>>> from sympy.physics.secondquant import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p,a).is_below_fermi
False
>>> KroneckerDelta(p,i).is_below_fermi
True
>>> KroneckerDelta(p,q).is_below_fermi
True
"""
if self.args[0].assumptions0.get("above_fermi"):
return False
if self.args[1].assumptions0.get("above_fermi"):
return False
return True
@property
def is_only_above_fermi(self):
"""
True if Delta is restricted to above fermi
>>> from sympy.physics.secondquant import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p,a).is_only_above_fermi
True
>>> KroneckerDelta(p,q).is_only_above_fermi
False
>>> KroneckerDelta(p,i).is_only_above_fermi
False
"""
return ( self.args[0].assumptions0.get("above_fermi")
or
self.args[1].assumptions0.get("above_fermi")
) or False
@property
def is_only_below_fermi(self):
"""
True if Delta is restricted to below fermi
>>> from sympy.physics.secondquant import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p,i).is_only_below_fermi
True
>>> KroneckerDelta(p,q).is_only_below_fermi
False
>>> KroneckerDelta(p,a).is_only_below_fermi
False
"""
return ( self.args[0].assumptions0.get("below_fermi")
or
self.args[1].assumptions0.get("below_fermi")
) or False
@property
def indices_contain_equal_information(self):
"""
Returns True if indices are either both above or below fermi.
Example:
>>> from sympy.physics.secondquant import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p, q).indices_contain_equal_information
True
>>> KroneckerDelta(p, q+1).indices_contain_equal_information
True
>>> KroneckerDelta(i, p).indices_contain_equal_information
False
"""
if (self.args[0].assumptions0.get("below_fermi") and
self.args[1].assumptions0.get("below_fermi")):
return True
if (self.args[0].assumptions0.get("above_fermi")
and self.args[1].assumptions0.get("above_fermi")):
return True
# if both indices are general we are True, else false
return self.is_below_fermi and self.is_above_fermi
@property
def preferred_index(self):
"""
Returns the index which is preferred to keep in the final expression.
The preferred index is the index with more information regarding fermi
level. If indices contain same information, 'a' is preferred before
'b'.
>>> from sympy.physics.secondquant import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> j = Symbol('j',below_fermi=True)
>>> p = Symbol('p')
>>> KroneckerDelta(p,i).preferred_index
i
>>> KroneckerDelta(p,a).preferred_index
a
>>> KroneckerDelta(i,j).preferred_index
i
"""
if self._get_preferred_index():
return self.args[1]
else:
return self.args[0]
@property
def killable_index(self):
"""
Returns the index which is preferred to substitute in the final expression.
The index to substitute is the index with less information regarding fermi
level. If indices contain same information, 'a' is preferred before
'b'.
>>> from sympy.physics.secondquant import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> j = Symbol('j',below_fermi=True)
>>> p = Symbol('p')
>>> KroneckerDelta(p,i).killable_index
p
>>> KroneckerDelta(p,a).killable_index
p
>>> KroneckerDelta(i,j).killable_index
j
"""
if self._get_preferred_index():
return self.args[0]
else:
return self.args[1]
def _get_preferred_index(self):
"""
Returns the index which is preferred to keep in the final expression.
The preferred index is the index with more information regarding fermi
level. If indices contain same information, index 0 is returned.
"""
if not self.is_above_fermi:
if self.args[0].assumptions0.get("below_fermi"):
return 0
else:
return 1
elif not self.is_below_fermi:
if self.args[0].assumptions0.get("above_fermi"):
return 0
else:
return 1
else:
return 0
def _dagger_(self):
return self
def _latex_(self,printer):
return "\\delta_{%s%s}"% (self.args[0].name,self.args[1].name)
def __repr__(self):
return "KroneckerDelta(%s,%s)"% (self.args[0],self.args[1])
def __str__(self):
if not self.is_above_fermi:
return 'd<(%s,%s)'% (self.args[0],self.args[1])
elif not self.is_below_fermi:
return 'd>(%s,%s)'% (self.args[0],self.args[1])
else:
return 'd(%s,%s)'% (self.args[0],self.args[1])
class SqOperator(Basic):
"""
Base class for Second Quantization operators.
"""
op_symbol = 'sq'
def __new__(cls, k):
obj = Basic.__new__(cls, sympify(k), commutative=False)
return obj
def _eval_subs(self, old, new):
r = self.__class__(self.args[0].subs(old, new))
return r
@property
def state(self):
"""
Returns the state index related to this operator.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd, B, Bd
>>> p = Symbol('p')
>>> F(p).state
p
>>> Fd(p).state
p
>>> B(p).state
p
>>> Bd(p).state
p
"""
return self.args[0]
@property
def is_symbolic(self):
"""
Returns True if the state is a symbol (as opposed to a number).
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> p = Symbol('p')
>>> F(p).is_symbolic
True
>>> F(1).is_symbolic
False
"""
if self.state.is_Integer:
return False
else:
return True
def doit(self,**kw_args):
"""
FIXME: hack to prevent crash further up...
"""
return self
def __repr__(self):
return NotImplemented
def __str__(self):
return "%s(%r)" % (self.op_symbol, self.state)
def apply_operator(self, state):
"""
Applies an operator to itself.
"""
raise NotImplementedError('implement apply_operator in a subclass')
class BosonicOperator(SqOperator):
pass
class Annihilator(SqOperator):
pass
class Creator(SqOperator):
pass
class AnnihilateBoson(BosonicOperator, Annihilator):
"""
Bosonic annihilation operator
"""
op_symbol = 'b'
def _dagger_(self):
return CreateBoson(self.state)
def apply_operator(self, state):
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element])
return amp*state.down(element)
else:
return Mul(self,state)
def __repr__(self):
return "AnnihilateBoson(%s)"%self.state
class CreateBoson(BosonicOperator, Creator):
"""
Bosonic creation operator
"""
op_symbol = 'b+'
def _dagger_(self):
return AnnihilateBoson(self.state)
def apply_operator(self, state):
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element] + 1)
return amp*state.up(element)
else:
return Mul(self,state)
def __repr__(self):
return "CreateBoson(%s)"%self.state
B = AnnihilateBoson
Bd = CreateBoson
class FermionicOperator(SqOperator):
@property
def is_restricted(self):
"""
Is this FermionicOperator restricted with respect to fermi level?
Return values:
1 : restricted to orbits above fermi
0 : no restriction
-1 : restricted to orbits below fermi
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_restricted
1
>>> Fd(a).is_restricted
1
>>> F(i).is_restricted
-1
>>> Fd(i).is_restricted
-1
>>> F(p).is_restricted
0
>>> Fd(p).is_restricted
0
"""
ass = self.args[0].assumptions0
if ass.get("below_fermi"): return -1
if ass.get("above_fermi"): return 1
return 0
@property
def is_above_fermi(self):
"""
Does the index of this FermionicOperator allow values above fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_above_fermi
True
>>> F(i).is_above_fermi
False
>>> F(p).is_above_fermi
True
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("below_fermi")
@property
def is_below_fermi(self):
"""
Does the index of this FermionicOperator allow values below fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_below_fermi
False
>>> F(i).is_below_fermi
True
>>> F(p).is_below_fermi
True
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("above_fermi")
@property
def is_only_below_fermi(self):
"""
Is the index of this FermionicOperator restricted to values below fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_below_fermi
False
>>> F(i).is_only_below_fermi
True
>>> F(p).is_only_below_fermi
False
The same applies to creation operators Fd
"""
return self.is_below_fermi and not self.is_above_fermi
@property
def is_only_above_fermi(self):
"""
Is the index of this FermionicOperator restricted to values above fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_above_fermi
True
>>> F(i).is_only_above_fermi
False
>>> F(p).is_only_above_fermi
False
The same applies to creation operators Fd
"""
return self.is_above_fermi and not self.is_below_fermi
def __cmp__(self,other):
if self is other: return 0
# check that we have only FermionicOperator
if not isinstance(other, FermionicOperator):
return SqOperator.__cmp__(other)
# only q-creator or only q-annihilator
if self.is_only_q_creator and other.is_q_annihilator: return -1
if self.is_q_creator and other.is_only_q_annihilator: return -1
if other.is_only_q_creator and self.is_q_annihilator: return +1
if other.is_q_creator and self.is_only_q_annihilator: return +1
# push creators to the left by reversing sign of class compare
c = cmp(self.__class__, other.__class__)
if c: return -c
# standard hash-sorting from Basic, pasted here for speed
st = self._hashable_content()
ot = other._hashable_content()
c = cmp(len(st),len(ot))
if c: return c
for l,r in zip(st,ot):
if isinstance(l, Basic):
c = l.compare(r)
else:
c = cmp(l, r)
if c: return c
return 0
def __lt__(self,other):
return self.__cmp__(other) == -1
def __gt__(self,other):
return self.__cmp__(other) == 1
def __ge__(self,other):
return self.__cmp__(other) >= 0
def __le__(self,other):
return self.__cmp__(other) <= 0
class AnnihilateFermion(FermionicOperator, Annihilator):
"""
Fermionic annihilation operator
"""
op_symbol = 'f'
def _dagger_(self):
return CreateFermion(self.state)
def apply_operator(self, state):
if isinstance(state, FockStateFermionKet):
element = self.state
return state.down(element)
elif isinstance(state, Mul):
c_part, nc_part = split_commutative_parts(state)
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part+[nc_part[0].down(element)]+nc_part[1:]))
else:
return Mul(self,state)
else:
return Mul(self,state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_q_creator
0
>>> F(i).is_q_creator
-1
>>> F(p).is_q_creator
-1
"""
if self.is_below_fermi: return -1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a',above_fermi=1)
>>> i = Symbol('i',below_fermi=1)
>>> p = Symbol('p')
>>> F(a).is_q_annihilator
1
>>> F(i).is_q_annihilator
0
>>> F(p).is_q_annihilator
1
"""
if self.is_above_fermi: return 1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_q_creator
False
>>> F(i).is_only_q_creator
True
>>> F(p).is_only_q_creator
False
"""
return self.is_only_below_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_q_annihilator
True
>>> F(i).is_only_q_annihilator
False
>>> F(p).is_only_q_annihilator
False
"""
return self.is_only_above_fermi
def __repr__(self):
return "AnnihilateFermion(%s)"%self.state
def _latex_(self,printer):
return "a_{%s}"%self.state.name
class CreateFermion(FermionicOperator, Creator):
"""
Fermionic creation operator.
"""
op_symbol = 'f+'
def _dagger_(self):
return AnnihilateFermion(self.state)
def apply_operator(self, state):
if isinstance(state, FockStateFermionKet):
element = self.state
return state.up(element)
elif isinstance(state, Mul):
c_part, nc_part = split_commutative_parts(state)
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part+[nc_part[0].up(element)]+nc_part[1:]))
else:
return Mul(self,state)
else:
return Mul(self,state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_q_creator
1
>>> Fd(i).is_q_creator
0
>>> Fd(p).is_q_creator
1
"""
if self.is_above_fermi: return 1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a',above_fermi=1)
>>> i = Symbol('i',below_fermi=1)
>>> p = Symbol('p')
>>> Fd(a).is_q_annihilator
0
>>> Fd(i).is_q_annihilator
-1
>>> Fd(p).is_q_annihilator
-1
"""
if self.is_below_fermi: return -1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_creator
True
>>> Fd(i).is_only_q_creator
False
>>> Fd(p).is_only_q_creator
False
"""
return self.is_only_above_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_annihilator
False
>>> Fd(i).is_only_q_annihilator
True
>>> Fd(p).is_only_q_annihilator
False
"""
return self.is_only_below_fermi
def __repr__(self):
return "CreateFermion(%s)"%self.state
def _latex_(self,printer):
return "a^\\dagger_{%s}"%self.state.name
Fd = CreateFermion
F = AnnihilateFermion
class FockState(Basic):
"""
Many particle Fock state with a sequence of occupation numbers.
Anywhere you can have a FockState, you can also have Integer(0).
All code must check for this!
"""
def __new__(cls, occupations):
"""
occupations is a list with two possible meanings:
- For bosons it is a list of occupation numbers.
Element i is the number of particles in state i.
- For fermions it is a list of occupied orbits.
Element 0 is the state that was occupied first, element i
is the i'th occupied state.
"""
o = map(sympify, occupations)
obj = Basic.__new__(cls, tuple(o), commutative=False)
return obj
def _eval_subs(self, old, new):
r = self.__class__([o.subs(old, new) for o in self.args[0]])
return r
def __getitem__(self, i):
i = int(i)
return self.args[0][i]
def __repr__(self):
return ("FockState(%r)") % (self.args)
def __str__(self):
return "%s%r%s" % (self.lbracket,self._labels(),self.rbracket)
def _labels(self):
return self.args[0]
def __len__(self):
return len(self.args[0])
class BosonState(FockState):
"""
Many particle Fock state with a sequence of occupation numbers.
occupation numbers can be any integer >= 0
"""
def up(self, i):
i = int(i)
new_occs = list(self.args[0])
new_occs[i] = new_occs[i]+Integer(1)
return self.__class__(new_occs)
def down(self, i):
i = int(i)
new_occs = list(self.args[0])
if new_occs[i]==Integer(0):
return Integer(0)
else:
new_occs[i] = new_occs[i]-Integer(1)
return self.__class__(new_occs)
class FermionState(FockState):
"""
Many particle Fock state with a sequence of occupied orbits
Each state can only have one particle, so we choose to store a list of
occupied orbits rather than a tuple with occupation numbers (zeros and ones).
states below fermi level are holes, and are represented by negative labels
in the occupation list
For symbolic state labels, the fermi_level caps the number of allowed hole-
states
"""
fermi_level=0
def __new__(cls, occupations, fermi_level=0):
occupations = map(sympify,occupations)
if len(occupations) >1:
try:
(occupations,sign) = _sort_anticommuting_fermions(occupations)
except ViolationOfPauliPrinciple:
return S.Zero
else:
sign = 0
cls.fermi_level = fermi_level
if cls._count_holes(occupations) > fermi_level:
return S.Zero
if sign%2:
return S.NegativeOne*FockState.__new__(cls,occupations)
else:
return FockState.__new__(cls,occupations)
def up(self, i):
"""
Performs the action of a creation operator.
If below fermi we try to remove a hole,
if above fermi we try to create a particle.
if general index p we return Kronecker(p,i)*self
where i is a new symbol with restriction above or below.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import FKet
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
>>> FKet([]).up(a)
FockStateFermionKet((a,))
A creator acting on vacuum below fermi vanishes
>>> FKet([]).up(i)
0
"""
present = i in self.args[0]
if self._only_above_fermi(i):
if present:
return S.Zero
else:
return self._add_orbit(i)
elif self._only_below_fermi(i):
if present:
return self._remove_orbit(i)
else:
return S.Zero
else:
if present:
hole = Symbol("i",below_fermi=True,dummy=True)
return KroneckerDelta(i,hole)*self._remove_orbit(i)
else:
particle = Symbol("a",above_fermi=True,dummy=True)
return KroneckerDelta(i,particle)*self._add_orbit(i)
def down(self, i):
"""
Performs the action of an annihilation operator.
If below fermi we try to create a hole,
if above fermi we try to remove a particle.
if general index p we return Kronecker(p,i)*self
where i is a new symbol with restriction above or below.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import FKet
>>> a = Symbol('a',above_fermi=True)
>>> i = Symbol('i',below_fermi=True)
>>> p = Symbol('p')
An annihilator acting on vacuum above fermi vanishes
>>> FKet([]).down(a)
0
Also below fermi, it vanishes, unless we specify a fermi level > 0
>>> FKet([]).down(i)
0
>>> FKet([],4).down(i)
FockStateFermionKet((i,))
"""
present = i in self.args[0]
if self._only_above_fermi(i):
if present:
return self._remove_orbit(i)
else:
return S.Zero
elif self._only_below_fermi(i):
if present:
return S.Zero
else:
return self._add_orbit(i)
else:
if present:
hole = Symbol("i",below_fermi=True,dummy=True)
return KroneckerDelta(i,hole)*self._add_orbit(i)
else:
particle = Symbol("a",above_fermi=True,dummy=True)
return KroneckerDelta(i,particle)*self._remove_orbit(i)
@classmethod
def _only_below_fermi(cls,i):
"""
Tests if given orbit is only below fermi surface.
If nothing can be concluded we return a conservative False.
"""
if i.is_number:
return i<= cls.fermi_level
if i.assumptions0.get('below_fermi'):
return True
return False
@classmethod
def _only_above_fermi(cls,i):
"""
Tests if given orbit is only above fermi surface.
If fermi level has not been set we return True.
If nothing can be concluded we return a conservative False.
"""
if i.is_number:
return i> cls.fermi_level
if i.assumptions0.get('above_fermi'):
return True
return not cls.fermi_level
def _remove_orbit(self,i):
"""
Removes particle/fills hole in orbit i. No input tests performed here.
"""
new_occs = list(self.args[0])
pos = new_occs.index(i)
del new_occs[pos]
if (pos)%2:
return S.NegativeOne*self.__class__(new_occs,self.fermi_level)
else:
return self.__class__(new_occs, self.fermi_level)
def _add_orbit(self,i):
"""
Adds particle/creates hole in orbit i. No input tests performed here.
"""
return self.__class__((i,)+self.args[0], self.fermi_level)
@classmethod
def _count_holes(cls,list):
"""
returns number of identified hole states in list.
"""
return len([ i for i in list if cls._only_below_fermi(i)])
def _negate_holes(self,list):
return tuple([ iff(i<=self.fermi_level, -i, i) for i in list ])
def __repr__(self):
if self.fermi_level:
return "FockStateKet(%r, fermi_level=%s)"%(self.args[0],self.fermi_level)
else:
return "FockStateKet(%r)"%(self.args[0],)
def _labels(self):
return self._negate_holes(self.args[0])
class FockStateKet(FockState):
lbracket = '|'
rbracket = '>'
class FockStateBra(FockState):
lbracket = '<'
rbracket = '|'
def __mul__(self, other):
if isinstance(other, FockStateKet):
return InnerProduct(self, other)
else:
return Basic.__mul__(self, other)
class FockStateBosonKet(BosonState,FockStateKet):
def _dagger_(self):
return FockStateBosonBra(*self.args)
class FockStateBosonBra(BosonState,FockStateBra):
def _dagger_(self):
return FockStateBosonKet(*self.args)
class FockStateFermionKet(FermionState,FockStateKet):
def _dagger_(self):
return FockStateFermionBra(*self.args)
class FockStateFermionBra(FermionState,FockStateBra):
def _dagger_(self):
return FockStateFermionKet(*self.args)
BBra = FockStateBosonBra
BKet = FockStateBosonKet
FBra = FockStateFermionBra
FKet = FockStateFermionKet
def split_commutative_parts(m):
c_part = [p for p in m.args if p.is_commutative]
nc_part = [p for p in m.args if not p.is_commutative]
return c_part, nc_part
def apply_Mul(m):
"""
Take a Mul instance with operators and apply them to states.
This method applies all operators with integer state labels
to the actual states. For symbolic state labels, nothing is done.
When inner products of FockStates are encountered (like <a|b>),
the are converted to instances of InnerProduct.
This does not currently work on double inner products like,
<a|b><c|d>.
If the argument is not a Mul, it is simply returned as is.
"""
if not isinstance(m, Mul):
return m
c_part, nc_part = split_commutative_parts(m)
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return m
else:
last = nc_part[-1]
next_to_last = nc_part[-2]
if isinstance(last, FockStateKet):
if isinstance(next_to_last, SqOperator):
if next_to_last.is_symbolic:
return m
else:
result = next_to_last.apply_operator(last)
if result == 0:
return 0
else:
return apply_Mul(Mul(*(c_part+nc_part[:-2]+[result])))
elif isinstance(next_to_last, Pow):
if isinstance(next_to_last.base, SqOperator) and \
next_to_last.exp.is_Integer:
if next_to_last.base.is_symbolic:
return m
else:
result = last
for i in range(next_to_last.exp):
result = next_to_last.base.apply_operator(result)
if result == 0: break
if result == 0:
return 0
else:
return apply_Mul(Mul(*(c_part+nc_part[:-2]+[result])))
else:
return m
elif isinstance(next_to_last, FockStateBra):
result = InnerProduct(next_to_last, last)
if result == 0:
return 0
else:
return apply_Mul(Mul(*(c_part+nc_part[:-2]+[result])))
else:
return m
else:
return m
def apply_operators(e):
"""
Take a sympy expression with operators and states and apply the operators.
"""
e = e.expand()
muls = e.atoms(Mul)
subs_list = [(m,apply_Mul(m)) for m in iter(muls)]
return e.subs(subs_list)
class InnerProduct(Basic):
"""
An unevaluated inner product between a bra and ket.
Currently this class just reduces things to a product of
Kronecker Deltas. In the future, we could introduce abstract
states like |a> and |b>, and leave the inner product unevaluated as
<a|b>.
"""
def __new__(cls, bra, ket):
assert isinstance(bra, FockStateBra), 'must be a bra'
assert isinstance(ket, FockStateKet), 'must be a key'
r = cls.eval(bra, ket)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, *(bra, ket), **dict(commutative=True))
return obj
@classmethod
def eval(cls, bra, ket):
result = Integer(1)
for i,j in zip(bra.args[0], ket.args[0]):
result *= KroneckerDelta(i,j)
if result == 0: break
return result
@property
def bra(self):
return self.args[0]
@property
def ket(self):
return self.args[1]
def _eval_subs(self, old, new):
r = self.__class__(self.bra.subs(old,new), self.ket.subs(old,new))
return r
def __repr__(self):
sbra = repr(self.bra)
sket = repr(self.ket)
return "%s|%s" % (sbra[:-1], sket[1:])
def __str__(self):
return self.__repr__()
def matrix_rep(op, basis):
"""
Find the representation of an operator in a basis.
"""
a = zeros((len(basis), len(basis)))
for i in range(len(basis)):
for j in range(len(basis)):
a[i,j] = apply_operators(Dagger(basis[i])*op*basis[j])
return a
class BosonicBasis(object):
"""
Base class for a basis set of bosonic Fock states.
"""
pass
class VarBosonicBasis(object):
"""
A single state, variable particle number basis set.
"""
def __init__(self, n_max):
self.n_max = n_max
self._build_states()
def _build_states(self):
self.basis = []
for i in range(self.n_max):
self.basis.append(FockStateBosonKet([i]))
self.n_basis = len(self.basis)
def index(self, state):
return self.basis.index(state)
def state(self, i):
return self.basis[i]
def __getitem__(self, i):
return self.state(i)
def __len__(self):
return len(self.basis)
def __repr__(self):
return repr(self.basis)
class FixedBosonicBasis(BosonicBasis):
"""
Fixed particle number basis set.
"""
def __init__(self, n_particles, n_levels):
self.n_particles = n_particles
self.n_levels = n_levels
self._build_particle_locations()
self._build_states()
def _build_particle_locations(self):
tup = ["i"+str(i) for i in range(self.n_particles)]
first_loop = "for i0 in range(%i)" % self.n_levels
other_loops = ''
for i in range(len(tup)-1):
temp = "for %s in range(%s + 1) " % (tup[i+1],tup[i])
other_loops = other_loops + temp
var = "("
for i in tup[:-1]:
var = var + i + ","
var = var + tup[-1] + ")"
cmd = "result = [%s %s %s]" % (var, first_loop, other_loops)
exec cmd
if self.n_particles==1:
result = [(item,) for item in result]
self.particle_locations = result
def _build_states(self):
self.basis = []
for tuple_of_indices in self.particle_locations:
occ_numbers = self.n_levels*[0]
for level in tuple_of_indices:
occ_numbers[level] += 1
self.basis.append(FockStateBosonKet(occ_numbers))
self.n_basis = len(self.basis)
def index(self, state):
return self.basis.index(state)
def state(self, i):
return self.basis[i]
def __getitem__(self, i):
return self.state(i)
def __len__(self):
return len(self.basis)
def __repr__(self):
return repr(self.basis)
# def move(e, i, d):
# """
# Takes the expression "e" and moves the operator at the position i by "d".
# """
# if e.is_Mul:
# if d == 1:
# # e = a*b*c*d
# a = Mul(*e.args[:i])
# b = e.args[i]
# c = e.args[i+1]
# d = Mul(*e.args[i+2:])
# if isinstance(b, Dagger) and not isinstance(c, Dagger):
# i, j = b.args[0].args[0], c.args[0]
# return a*c*b*d-a*KroneckerDelta(i, j)*d
# elif not isinstance(b, Dagger) and isinstance(c, Dagger):
# i, j = b.args[0], c.args[0].args[0]
# return a*c*b*d-a*KroneckerDelta(i, j)*d
# else:
# return a*c*b*d
# elif d == -1:
# # e = a*b*c*d
# a = Mul(*e.args[:i-1])
# b = e.args[i-1]
# c = e.args[i]
# d = Mul(*e.args[i+1:])
# if isinstance(b, Dagger) and not isinstance(c, Dagger):
# i, j = b.args[0].args[0], c.args[0]
# return a*c*b*d-a*KroneckerDelta(i, j)*d
# elif not isinstance(b, Dagger) and isinstance(c, Dagger):
# i, j = b.args[0], c.args[0].args[0]
# return a*c*b*d-a*KroneckerDelta(i, j)*d
# else:
# return a*c*b*d
# else:
# if d > 1:
# while d >= 1:
# e = move(e, i, 1)
# d -= 1
# i += 1
# return e
# elif d < -1:
# while d <= -1:
# e = move(e, i, -1)
# d += 1
# i -= 1
# return e
# elif isinstance(e, Add):
# a, b = e.as_two_terms()
# return move(a, i, d) + move(b, i, d)
# raise NotImplementedError()
class Commutator(Function):
"""
The Commutator: [A, B] = A*B - B*A
The arguments are ordered according to .__cmp__()
>>> from sympy import symbols
>>> from sympy.physics.secondquant import Commutator
>>> A, B = symbols('A B', commutative=False)
>>> Commutator(B, A)
Commutator(B, A)
Evaluate the commutator with .doit()
>>> comm = Commutator(A,B); comm
Commutator(A, B)
>>> comm.doit()
A*B - B*A
For two second quantization operators the commutator is evaluated
immediately:
>>> from sympy.physics.secondquant import Fd, F
>>> a = symbols('a',above_fermi=True)
>>> i = symbols('i',below_fermi=True)
>>> p,q = symbols('pq')
>>> Commutator(Fd(a),Fd(i))
2*NO(CreateFermion(a)*CreateFermion(i))
But for more complicated expressions, the evaluation is triggered by
a call to .doit()
>>> comm = Commutator(Fd(p)*Fd(q),F(i)); comm
Commutator(CreateFermion(p)*CreateFermion(q), AnnihilateFermion(i))
>>> comm.doit()
KroneckerDelta(i, q)*CreateFermion(p) - KroneckerDelta(i, p)*CreateFermion(q)
"""
is_commutative = False
nargs = 2
@classmethod
def eval(cls, a,b):
"""
The Commutator [A,B] is on canonical form if A < B
"""
if not (a and b): return S.Zero
if a == b: return S.Zero
if a.is_commutative or b.is_commutative:
return S.Zero
#
# [A+B,C] -> [A,C] + [B,C]
#
a = a.expand()
if isinstance(a,Add):
return Add(*[cls(term,b) for term in a.args])
b = b.expand()
if isinstance(b,Add):
return Add(*[cls(a,term) for term in b.args])
#
# [xA,yB] -> xy*[A,B]
#
c_part = []
nc_part = []
nc_part2 = []
if isinstance(a,Mul):
c_part,nc_part = split_commutative_parts(a)
if isinstance(b,Mul):
c_part2,nc_part2 = split_commutative_parts(b)
c_part.extend(c_part2)
if c_part:
a = nc_part or [a]
b = nc_part2 or [b]
return Mul(*c_part)*cls(Mul(*a),Mul(*b))
#
# single second quantization operators
#
if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator):
if isinstance(b,CreateBoson) and isinstance(a,AnnihilateBoson):
return KroneckerDelta(a.state,b.state)
if isinstance(a,CreateBoson) and isinstance(b,AnnihilateBoson):
return S.NegativeOne*KroneckerDelta(a.state,b.state)
else:
return S.Zero
if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator):
return wicks(a*b)- wicks(b*a)
#
# Canonical ordering of arguments
#
if a > b:
return S.NegativeOne*cls(b,a)
def doit(self,**hints):
a = self.args[0]
b = self.args[1]
if not hints.get("wicks"):
a = a.doit(**hints)
b = b.doit(**hints)
try:
return wicks(a*b) - wicks(b*a)
except ContractionAppliesOnlyToFermions:
pass
except WicksTheoremDoesNotApply:
pass
return (a*b - b*a).doit(**hints)
def __repr__(self):
return "Commutator(%s,%s)" %(self.args[0],self.args[1])
def __str__(self):
return "[%s,%s]" %(self.args[0],self.args[1])
def _latex_(self,printer):
return "\\left[%s,%s\\right]"%tuple([
printer._print(arg) for arg in self.args])
class NO(Function):
"""
This function is used to represent normal ordering brackets.
i.e. {abcd} sometimes written :abcd:
Applying the function NO(arg) to an argument means that all operators in
the argument will be assumed to anticommute, and have vanishing
contractions. This allows an immediate reordering to canonical form
upon object creation.
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> p,q = symbols('pq')
>>> NO(Fd(p)*F(q))
NO(CreateFermion(p)*AnnihilateFermion(q))
>>> NO(F(q)*Fd(p))
-NO(CreateFermion(p)*AnnihilateFermion(q))
Note:
If you want to generate a normal ordered equivalent of an expression, you
should use the function wicks(). This class only indicates that all
operators inside the brackets anticommute, and have vanishing contractions.
Nothing more, nothing less.
"""
nargs = 1
is_commutative = False
@classmethod
def eval(cls,arg):
"""
Use anticommutation to get canonical form of operators.
Employ associativity of normal ordered product: {ab{cd}} = {abcd}
but note that {ab}{cd} /= {abcd}
We also employ distributivity: {ab + cd} = {ab} + {cd}
Canonical form also implies expand() {ab(c+d)} = {abc} + {abd}
"""
# {ab + cd} = {ab} + {cd}
arg = arg.expand()
if arg.is_Add:
return Add(*[ cls(term) for term in arg.args])
if arg.is_Mul:
# take coefficient outside of normal ordering brackets
c_part, seq = split_commutative_parts(arg)
if c_part:
coeff = Mul(*c_part)
if not seq:
return coeff
else:
coeff = S.One
# {ab{cd}} = {abcd}
newseq = []
foundit = False
for fac in seq:
if isinstance(fac,NO):
newseq.extend(fac.args)
foundit = True
else:
newseq.append(fac)
if foundit:
return coeff*cls(Mul(*newseq))
# We assume that the user don't mix B and F operators
if isinstance(seq[0], BosonicOperator):
raise NotImplementedError
try:
newseq,sign = _sort_anticommuting_fermions(seq)
except ViolationOfPauliPrinciple:
return S.Zero
if sign%2:
return (S.NegativeOne*coeff)*cls(Mul(*newseq))
elif sign:
return coeff*cls(Mul(*newseq))
else:
pass #since sign==0, no permutations was necessary
# if we couldn't do anything with Mul object, we just
# mark it as normal ordered
if coeff == S.One:
return None
else:
return coeff*cls(Mul(*newseq))
if isinstance(arg,NO):
return arg
# if object was not Mul or Add, normal ordering does not apply
return arg
@property
def has_q_creators(self):
"""
Returns yes or no, fast
Also, in case of yes, we indicate whether leftmost operator is a
creator above or below fermi.
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> p,q = symbols('pq')
>>> no_pq = NO(Fd(p)*Fd(q))
>>> no_pq.has_q_creators
1
>>> no_pq = NO(F(p)*F(q))
>>> no_pq.has_q_creators
-1
>>> i,j = symbols('ij',below_fermi=True)
>>> no_pq = NO(Fd(i)*Fd(j))
>>> no_pq.has_q_creators
0
"""
return self.args[0].args[0].is_q_creator
@property
def has_q_annihilators(self):
"""
Returns yes or no, fast
Also, in case of yes, we indicate whether rightmost operator is an
annihilator above or below fermi.
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> p,q = symbols('pq')
>>> no_pq = NO(Fd(p)*Fd(q))
>>> no_pq.has_q_annihilators
-1
>>> no_pq = NO(F(p)*F(q))
>>> no_pq.has_q_annihilators
1
>>> a,b = symbols('ab',above_fermi=True)
>>> no_pq = NO(Fd(a)*Fd(b))
>>> no_pq.has_q_annihilators
0
"""
return self.args[0].args[-1].is_q_annihilator
def doit(self, **kw_args):
if kw_args.get("remove_brackets", True):
return self._remove_brackets()
else:
return self.__new__(type(self),self.args[0].doit(**kw_args))
def _remove_brackets(self):
"""
Returns the sorted string without normal order brackets.
The returned string have the property that no nonzero
contractions exist.
"""
# check if any creator is also an annihilator
subslist=[]
for i in self.iter_q_creators():
if self[i].is_q_annihilator:
assume = self[i].state.assumptions0
assume["dummy"]=True
Dummy = type(Symbol('x',dummy=True))
# only operators with a dummy index can be split in two terms
if isinstance(self[i].state, Dummy):
# create indices with fermi restriction
assume.pop("above_fermi", None)
assume["below_fermi"]=True
below = Symbol('i',**assume)
assume.pop("below_fermi", None)
assume["above_fermi"]=True
above = Symbol('a',**assume)
cls = type(self[i])
split = (
self[i].__new__(cls,below)
* KroneckerDelta(below,self[i].state)
+ self[i].__new__(cls,above)
* KroneckerDelta(above,self[i].state)
)
subslist.append((self[i],split))
else:
raise SubstitutionOfAmbigousOperatorFailed(self[i])
if subslist:
result = NO(self.subs(subslist))
if isinstance(result, Add):
return Add(*[term.doit() for term in result.args])
else:
return self.args[0]
def _expand_operators(self):
"""
Returns a sum of NO objects that contain no ambiguous q-operators.
If an index q has range both above and below fermi, the operator F(q)
is ambiguous in the sense that it can be both a q-creator and a q-annihilator.
If q is dummy, it is assumed to be a summation variable and this method
rewrites it into a sum of NO terms with unambiguous operators:
{Fd(p)*F(q)} = {Fd(a)*F(b)} + {Fd(a)*F(i)} + {Fd(j)*F(b)} -{F(i)*Fd(j)}
where a,b are above and i,j are below fermi level.
"""
return NO(self._remove_brackets)
def _eval_subs(self,old,new):
if self == old:
return new
ops = self.args[0].args
for i in range(len(ops)):
if ops[i] == old:
l1 = ops[:i]+(new,)+ops[i+1:]
return self.__class__(Mul(*l1))
return Function._eval_subs(self,old,new)
def __getitem__(self,i):
if isinstance(i,slice):
indices = i.indices(len(self))
return [self.args[0].args[i] for i in range(*indices)]
else:
return self.args[0].args[i]
def __len__(self):
return len(self.args[0].args)
def iter_q_annihilators(self):
"""
Iterates over the annihilation operators.
>>> from sympy import symbols
>>> i,j,k,l = symbols('ijkl',below_fermi=True)
>>> p,q,r,s = symbols('pqrs', dummy=True)
>>> a,b,c,d = symbols('abcd',above_fermi=True)
>>> from sympy.physics.secondquant import NO, F, Fd
>>> no = NO(Fd(a)*F(i)*Fd(j)*F(b))
>>> no.iter_q_creators()
<generator object... at 0x...>
>>> list(no.iter_q_creators())
[0, 1]
>>> list(no.iter_q_annihilators())
[3, 2]
"""
ops = self.args[0].args
iter = xrange(len(ops)-1, -1, -1)
for i in iter:
if ops[i].is_q_annihilator:
yield i
else:
break
def iter_q_creators(self):
"""
Iterates over the creation operators.
>>> from sympy import symbols
>>> i,j,k,l = symbols('ijkl',below_fermi=True)
>>> p,q,r,s = symbols('pqrs', dummy=True)
>>> a,b,c,d = symbols('abcd',above_fermi=True)
>>> from sympy.physics.secondquant import NO, F, Fd
>>> no = NO(Fd(a)*F(i)*Fd(j)*F(b))
>>> no.iter_q_creators()
<generator object... at 0x...>
>>> list(no.iter_q_creators())
[0, 1]
>>> list(no.iter_q_annihilators())
[3, 2]
"""
ops = self.args[0].args
iter = xrange(0, len(ops))
for i in iter:
if ops[i].is_q_creator:
yield i
else:
break
def get_subNO(self, i):
"""
Returns a NO() without FermionicOperator at index i
>>> from sympy import symbols
>>> from sympy.physics.secondquant import F, NO
>>> p,q,r = symbols('pqr')
>>> NO(F(p)*F(q)*F(r)).get_subNO(1)
NO(AnnihilateFermion(p)*AnnihilateFermion(r))
"""
mul = Mul(*(self.args[0].args[0:i] + self.args[0].args[i+1:]))
return NO(mul)
def _latex_(self,printer):
return "\\left\\{%s\\right\\}"%printer._print(self.args[0])
def __repr__(self):
return "NO(%s)"%self.args[0]
def __str__(self):
return ":%s:" % self.args[0]
# @cacheit
def contraction(a,b):
"""
Calculates contraction of Fermionic operators ab
>>> from sympy import symbols
>>> from sympy.physics.secondquant import F, Fd, contraction
>>> p,q = symbols('pq')
>>> a,b = symbols('ab',above_fermi=True)
>>> i,j = symbols('ij',below_fermi=True)
A contraction is non-zero only if a quasi-creator is to the right of a
quasi-annihilator:
>>> contraction(F(a),Fd(b))
KroneckerDelta(a, b)
>>> contraction(Fd(i),F(j))
KroneckerDelta(i, j)
For general indices a non-zero result restricts the indices to below/above
the fermi surface:
>>> contraction(Fd(p),F(q))
KroneckerDelta(p, q)*KroneckerDelta(q, _i)
>>> contraction(F(p),Fd(q))
KroneckerDelta(p, q)*KroneckerDelta(q, _a)
Two creators or two annihilators always vanishes:
>>> contraction(F(p),F(q))
0
>>> contraction(Fd(p),Fd(q))
0
"""
if isinstance(b,FermionicOperator) and isinstance(a,FermionicOperator):
if isinstance(a,AnnihilateFermion) and isinstance(b,CreateFermion):
if b.state.assumptions0.get("below_fermi"):
return S.Zero
if a.state.assumptions0.get("below_fermi"):
return S.Zero
if b.state.assumptions0.get("above_fermi"):
return KroneckerDelta(a.state,b.state)
if a.state.assumptions0.get("above_fermi"):
return KroneckerDelta(a.state,b.state)
return (KroneckerDelta(a.state,b.state)*
KroneckerDelta(b.state,Symbol('a',dummy=True,above_fermi=True)))
if isinstance(b,AnnihilateFermion) and isinstance(a,CreateFermion):
if b.state.assumptions0.get("above_fermi"):
return S.Zero
if a.state.assumptions0.get("above_fermi"):
return S.Zero
if b.state.assumptions0.get("below_fermi"):
return KroneckerDelta(a.state,b.state)
if a.state.assumptions0.get("below_fermi"):
return KroneckerDelta(a.state,b.state)
return (KroneckerDelta(a.state,b.state)*
KroneckerDelta(b.state,Symbol('i',dummy=True,below_fermi=True)))
# vanish if 2xAnnihilator or 2xCreator
return S.Zero
else:
#not fermion operators
t = ( isinstance(i,FermionicOperator) for i in (a,b) )
raise ContractionAppliesOnlyToFermions(*t)
def _sort_anticommuting_fermions(string1):
"""Sort fermionic operators to canonical order, assuming all pairs anticommute.
Uses a bidirectional bubble sort. Items in string1 are not referenced
so in principle they may be any comparable objects. Sorting is
done according to the >= operator.
If the Pauli principle is violated, an exception is raised.
returns a tuple (sorted_str, sign)
sorted_str -- list containing the sorted operators
sign -- int telling how many times the sign should be changed
(if sign==0 the string was already sorted)
"""
verified = False
sign = 0
rng = range(len(string1)-1)
rev = range(len(string1)-3,-1,-1)
string1 = list(string1)
while not verified:
verified = True
for i in rng:
left = string1[i]
right = string1[i+1]
if left == right:
raise ViolationOfPauliPrinciple([left,right])
if left > right:
verified = False
string1[i:i+2] = [right, left]
sign = sign+1
if verified:
break
for i in rev:
left = string1[i]
right = string1[i+1]
if left == right:
raise ViolationOfPauliPrinciple([left,right])
if left > right:
verified = False
string1[i:i+2] = [right, left]
sign = sign+1
return (string1,sign)
def evaluate_deltas(e):
"""
We evaluate KroneckerDelta symbols in the expression assuming Einstein summation.
If one index is repeated it is summed over and in effect substituted with
the other one. If both indices are repeated we substitute according to what
is the preferred index. this is determined by
KroneckerDelta.preferred_index and KroneckerDelta.killable_index.
In case there are no possible substitutions or if a substitution would
imply a loss of information, nothing is done.
In case an index appears in more than one KroneckerDelta, the resulting
final substitution depends on the larger expression. Behavior of
evaluate_deltas is in that case undefined.
Examples: We assume that
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import evaluate_deltas, KroneckerDelta
>>> i,j = symbols('ij',below_fermi=True, dummy=True)
>>> a,b = symbols('ab',above_fermi=True, dummy=True)
>>> p,q = symbols('pq', dummy=True)
>>> f = Function('f')
>>> t = Function('t')
The order of preference for these indices according to KroneckerDelta is
(a,b,i,j,p,q) So we get
==Trivial cases===
>>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j)
f(_j)
>>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i)
f(_i)
>>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i)
f(_i)
>>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q)
f(_q)
>>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p)
f(_p)
==more interesting cases===
>>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q))
f(_i, _q)*t(_a, _i)
>>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q))
f(_a, _q)*t(_a, _i)
>>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q))
f(_p, _p)
== Do nothing to prevent loss of information ===
>>> evaluate_deltas(KroneckerDelta(i,p)*f(q))
KroneckerDelta(_i, _p)*f(_q)
>>> evaluate_deltas(KroneckerDelta(i,p)*f(i))
KroneckerDelta(_i, _p)*f(_i)
"""
# We treat Deltas only in mul objects
# for general function objects we don't evaluate KroneckerDeltas in arguments,
# but here we hard code exceptions to this rule
accepted_functions = (
Add,
)
if isinstance(e,accepted_functions):
return e.new(*[evaluate_deltas(arg) for arg in e.args])
elif isinstance(e,Mul):
# find all occurences of delta function and count each index present in
# expression.
deltas = []
indices = {}
for i in e.args:
for s in i.atoms():
if s in indices:
indices[s] += 1
else:
indices[s] = 0 # geek counting simplifies logic below
if isinstance(i, KroneckerDelta): deltas.append(i)
for d in deltas:
# If we do something, and there are more deltas, we should recurse
# to treat the resulting expression properly
if indices[d.killable_index]:
e = e.subs(d.killable_index,d.preferred_index)
if len(deltas)>1: return evaluate_deltas(e)
elif indices[d.preferred_index] and d.indices_contain_equal_information:
e = e.subs(d.preferred_index,d.killable_index)
if len(deltas)>1: return evaluate_deltas(e)
else:
pass
return e
# nothing to do, maybe we hit a Symbol or a number
else:
return e
def _get_dummies(expr, _reverse, **require):
"""
Collects dummies recursively in predictable order.
Starting at right end to prioritize indices of non-commuting terms.
FIXME: A more sophisticated predictable order would work better.
Current implementation does not always work if factors commute. Since
commuting factors are sorted also by dummy indices, it may happen that
all terms have exactly the same index order, so that no term will
obtain a substitution of dummies.
"""
result = []
for arg in _reverse(expr.args):
try:
if arg.dummy_index:
# here we check that the dummy matches requirements
for key,val in require.items():
if val != arg.assumptions0.get(key, False):
break
else:
result.append(arg)
except AttributeError:
try:
if arg.args:
result.extend(_get_dummies(arg, _reverse, **require))
except AttributeError:
pass
return result
def _remove_duplicates(list):
"""
Returns list of unique dummies.
"""
result = []
while list:
i = list.pop()
if i in result:
pass
else:
result.append(i)
result.reverse()
return result
def _get_subslist(chaos,order):
"""
Return list of subs needed to bring list chaos into list order.
If len(chaos) < len(order), we want chaos to match start of order,
thus, chaos might end up with different elements than upon entry.
If chaos has elements not present in order, we append them to order
so that we have a canonical ordering of all elements present in
the expression.
"""
for el in chaos:
if not el in order:
order.append(el)
subslist = []
for i in xrange(len(chaos)):
if chaos[i] == order[i]:
continue
else:
if not order[i] in chaos[i:]:
subslist.append((chaos[i],order[i]))
else:
tmp = Symbol('x',dummy=True)
subslist.append((order[i], tmp))
subslist.append((chaos[i], order[i]))
ind = chaos.index(order[i])
chaos.pop(ind)
chaos.insert(ind,tmp)
return subslist
def _substitute(expr, ordered_dummies, _reverse, **require):
"""
Substitute dummies in expr (which should be a Mul object)
If keyword arguments are given, those dummies that have an identical
keyword in .assumptions0 must provide the same value (True or False)
to be substituted.
Dummies without the keyword in .assumptions0 will be default to
give the value False.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import substitute_dummies, _substitute
>>> q = Symbol('q', dummy=True)
>>> i = Symbol('i', below_fermi=True, dummy=True)
>>> a = Symbol('a', above_fermi=True, dummy=True)
>>> reverse = lambda x: reversed(x)
>>> _substitute(a, [q], reverse, above_fermi=True) # will succeed
_a
>>> _substitute(i, [q], reverse, above_fermi=True) # will not succeed
_i
>>> _substitute(i, [q], reverse, above_fermi=False) # will succeed
_i
With no keywords, all dummies are substituted.
"""
dummies = _remove_duplicates(_get_dummies(expr, _reverse, **require))
subslist = _get_subslist(dummies, ordered_dummies)
result = expr.subs(subslist)
return result
def substitute_dummies(expr, new_indices=False, reverse_order=True, pretty_indices=True):
"""
Collect terms by substitution of dummy variables.
This routine allows simplification of Add expressions containing terms
which differ only due to dummy variables.
The idea is to substitute all dummy variables consistently depending on
position in the term. For each term, we collect a sequence of all dummy
variables, where the order is determined by index position. These indices
are then substituted consistently in each term. E.g.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import substitute_dummies
>>> a,b = symbols('ab',dummy=True)
>>> c,d = symbols('cd',dummy=True)
>>> f = Function('f')
>>> expr = f(a,b) + f(c,d); expr
f(_a, _b) + f(_c, _d)
Since a, b, c and d are summation indices, this can be simplified to a
single summation term with a factor 2
>>> substitute_dummies(expr, reverse_order=False)
2*f(_a, _b)
In order to simplify as much as possible, the indices related to
non-commuting factors have highest priority when approaching canonical
indexing. This is done by giving highest priority to the rightmost
dummy indices in each term. (reverse_order=True by default) The default
substitution gives:
>>> substitute_dummies(expr, reverse_order=True)
2*f(_b, _a)
"""
# pretty_indices = True # Prettier
# pretty_indices = False # Easier to debug
if not pretty_indices:
def _i(number):
return 'i_'+str(number)
def _a(number):
return 'a_'+str(number)
def _p(number):
return 'p_'+str(number)
else:
def _i(number):
if number<5:
return "klmno"[number]
else:
return 'o_'+str(number-5)
def _a(number):
if number<6:
return "cdefgh"[number]
else:
return 'h_'+str(number-6)
def _p(number):
if number<7:
return "tuvwxyz"[number]
else:
return 'z_'+str(number-7)
# reverse iterator for use in _get_dummies()
if reverse_order:
def _reverse(seq):
i=len(seq)
while i>0:
i += -1
yield seq[i]
else:
def _reverse(seq):
for i in xrange(len(seq)):
yield seq[i]
expr = expr.expand()
aboves = []
belows = []
generals = []
Dummy = type(Symbol('x',dummy=True))
dummies = [ d for d in expr.atoms() if isinstance(d,Dummy) ]
dummies.sort()
a = i = p = 0
for d in dummies:
assum = d.assumptions0
assum["dummy"]=True
if assum.get("above_fermi"):
sym = _a(a)
a +=1
l1 = aboves
elif assum.get("below_fermi"):
sym = _i(i)
i +=1
l1 = belows
else:
sym = _p(p)
p +=1
l1 = generals
if new_indices:
l1.append(Symbol(sym, **assum))
else:
l1.append(d)
cases = (
({'above_fermi':True}, aboves),
({'below_fermi':True}, belows),
({'below_fermi':False,'above_fermi':False},generals)
)
for req, dummylist in cases:
if isinstance(expr,Add):
new_dummies = dummylist
expr = (Add(*[_substitute(term, new_dummies, _reverse, **req) for term in expr.args]))
return expr
@cacheit
def _get_contractions(string1, keep_only_fully_contracted=False):
"""
Uses recursion to find all contractions. -- Internal helper function --
Will find nonzero contractions in string1 between indices given in
leftrange and rightrange.
returns Add-object with contracted terms.
"""
# Should we store current level of contraction?
if keep_only_fully_contracted and string1:
result = []
else:
result = [NO(Mul(*string1))]
for i in range(len(string1)-1):
for j in range(i+1,len(string1)):
c = contraction(string1[i],string1[j])
if c:
# print "found contraction",c
sign = (j-i+1) %2
if sign:
coeff = S.NegativeOne*c
else:
coeff = c
#
# Call next level of recursion
# ============================
#
# We now need to find more contractions among operators
#
# oplist = string1[:i]+ string1[i+1:j] + string1[j+1:]
#
# To prevent overcounting, we don't allow contractions
# we have already encountered. i.e. contractions between
# string1[:i] <---> string1[i+1:j]
# and string1[:i] <---> string1[j+1:].
#
# This leaves the case:
oplist = string1[i+1:j] + string1[j+1:]
if oplist:
result.append(coeff*NO(
Mul(*string1[:i])*_get_contractions( oplist,
keep_only_fully_contracted=keep_only_fully_contracted)))
else:
result.append(coeff*NO( Mul(*string1[:i])))
if keep_only_fully_contracted:
break # next iteration over i leaves leftmost operator string1[0] uncontracted
return Add(*result)
# @cacheit
def wicks(e, **kw_args):
"""
Returns the normal ordered equivalent of an expression using Wicks Theorem.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import wicks, F, Fd, NO
>>> p,q,r = symbols('pqr')
>>> wicks(Fd(p)*F(q))
KroneckerDelta(p, q)*KroneckerDelta(q, _i) + NO(CreateFermion(p)*AnnihilateFermion(q))
By default, the expression is expanded:
>>> wicks(F(p)*(F(q)+F(r)))
NO(AnnihilateFermion(p)*AnnihilateFermion(q)) + NO(AnnihilateFermion(p)*AnnihilateFermion(r))
With the keyword 'keep_only_fully_contracted=True', only fully contracted
terms are returned.
By request, the result can be simplified in the following order:
-- KroneckerDelta functions are evaluated
-- Dummy variables are substituted consistently across terms
>>> p,q,r = symbols('pqr', dummy=True)
>>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True) # doctest: +SKIP
KroneckerDelta(_i, _q)*KroneckerDelta(_p, _q) + KroneckerDelta(_i, _r)*KroneckerDelta(_p, _r)
>>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True, simplify_kronecker_deltas=True)
KroneckerDelta(_i, _p) + KroneckerDelta(_i, _p)
>>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True, simplify_kronecker_deltas=True, simplify_dummies=True)
2*KroneckerDelta(_i, _p)
"""
if not e:
return S.Zero
opts={
'simplify_kronecker_deltas':False,
'expand':True,
'simplify_dummies':False,
'keep_only_fully_contracted':False
}
opts.update(kw_args)
# check if we are already normally ordered
if isinstance(e,NO):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
elif isinstance(e,FermionicOperator):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
# break up any NO-objects, and evaluate commutators
e = e.doit()
# make sure we have only one term to consider
e = e.expand()
if isinstance(e, Add):
if opts['simplify_dummies']:
return substitute_dummies(Add(*[ wicks(term, **kw_args) for term in e.args]))
else:
return Add(*[ wicks(term, **kw_args) for term in e.args])
# For Mul-objects we can actually do something
if isinstance(e, Mul):
# we dont want to mess around with commuting part of Mul
# so we factorize it out before starting recursion
c_part = []
string1 = []
for factor in e.args:
if factor.is_commutative:
c_part.append(factor)
else:
string1.append(factor)
n = len(string1)
# catch trivial cases
if n == 0:
result= e
elif n==1:
if opts['keep_only_fully_contracted']:
return S.Zero
else:
result = e
else: # non-trivial
if isinstance(string1[0], BosonicOperator):
raise NotImplementedError
string1 = tuple(string1)
# recursion over higher order contractions
result = _get_contractions(string1,
keep_only_fully_contracted=opts['keep_only_fully_contracted'] )
result = Mul(*c_part)*result
if opts['expand']:
result = result.expand()
if opts['simplify_kronecker_deltas']:
result = evaluate_deltas(result)
return result
# It seems there is nothing to do, we are probably called in error.
# Instead of silently returning None, we raise exception to prevent
# strange errors in applications.
else:
raise WicksTheoremDoesNotApply
class PermutationOperator(Basic):
"""
Represents the index permutation operator P(ij)
P(ij)*f(i)*g(j) = f(i)*g(j) - f(j)*g(i)
"""
is_commutative = True
def __new__(cls, i,j):
i,j = map(sympify,(i,j))
if (i>j):
obj = Basic.__new__(cls,j,i)
else:
obj = Basic.__new__(cls,i,j)
return obj
def get_permuted(self,expr):
"""
Returns -expr with permuted indices.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q = symbols('pq')
>>> f = Function('f')
>>> PermutationOperator(p,q).get_permuted(f(p,q))
-f(q, p)
"""
tmp = Symbol('t',dummy=True)
i = self.args[0]
j = self.args[1]
expr = expr.subs(i,tmp)
expr = expr.subs(j,i)
expr = expr.subs(tmp,j)
return S.NegativeOne*expr
def _latex_(self, printer):
return "P(%s%s)"%self.args
def simplify_index_permutations(expr, permutation_operators):
"""
Performs simplification by introducing PermutationOperators where appropriate.
Schematically:
[abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij]
permutation_operators is a list of PermutationOperators to consider.
If permutation_operators=[P(ab),P(ij)] we will try to introduce the
permutation operators P(ij) and P(ab) in the expression. If there are other
possible simplifications, we ignore them.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import simplify_index_permutations
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q,r,s = symbols('pqrs')
>>> f = Function('f')
>>> g = Function('g')
>>> expr = f(p)*g(q) - f(q)*g(p); expr
f(p)*g(q) - f(q)*g(p)
>>> simplify_index_permutations(expr,[PermutationOperator(p,q)])
PermutationOperator(p, q)*f(p)*g(q)
>>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)]
>>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r)
>>> simplify_index_permutations(expr,PermutList)
PermutationOperator(p, q)*PermutationOperator(r, s)*f(p, r)*g(q, s)
"""
def _get_indices(expr, ind):
"""
Collects indices recursively in predictable order.
"""
result = []
for arg in expr.args:
if arg in ind:
result.append(arg)
else:
try:
if arg.args:
result.extend(_get_indices(arg,ind))
except AttributeError:
pass
return result
def _choose_one_to_keep(a,b,ind):
# we keep the one where indices in ind are in order ind[0] < ind[1]
if _get_indices(a,ind) < _get_indices(b,ind):
return a
else:
return b
expr = expr.expand()
if isinstance(expr,Add):
terms = set(expr.args)
for P in permutation_operators:
new_terms = set([])
while terms:
term = terms.pop()
permuted = P.get_permuted(term)
if permuted in terms:
terms.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
new_terms.add(term)
terms = new_terms
return Add(*terms)
return expr
|
{
"content_hash": "009e7990782ba3149dbe8ae2427ffb04",
"timestamp": "",
"source": "github",
"line_count": 2968,
"max_line_length": 120,
"avg_line_length": 28.86185983827493,
"alnum_prop": 0.5437416824262801,
"repo_name": "fperez/sympy",
"id": "edc9987e65642c872cdaf0cbb7b909c43f16400b",
"size": "85662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/physics/secondquant.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7547665"
},
{
"name": "Scheme",
"bytes": "125"
}
],
"symlink_target": ""
}
|
"""
Corruptor classes: classes that encapsulate the noise process for the DAE
training criterion.
"""
# Third-party imports
import numpy
import theano
from theano import tensor
T = tensor
from pylearn2.utils.rng import make_np_rng
# Shortcuts
theano.config.warn.sum_div_dimshuffle_bug = False
if 0:
print 'WARNING: using SLOW rng'
RandomStreams = tensor.shared_randomstreams.RandomStreams
else:
import theano.sandbox.rng_mrg
RandomStreams = theano.sandbox.rng_mrg.MRG_RandomStreams
from pylearn2.expr.activations import rescaled_softmax
class Corruptor(object):
"""
.. todo::
WRITEME
Parameters
----------
corruption_level : float
Some measure of the amount of corruption to do. What this means
will be implementation specific.
rng : RandomState object or seed, optional
NumPy random number generator object (or seed for creating one)
used to initialize a `RandomStreams`.
"""
def __init__(self, corruption_level, rng=2001):
# The default rng should be build in a deterministic way
rng = make_np_rng(rng, which_method=['randn', 'randint'])
seed = int(rng.randint(2 ** 30))
self.s_rng = RandomStreams(seed)
self.corruption_level = corruption_level
def __call__(self, inputs):
"""
(Symbolically) corrupt the inputs with a noise process.
Parameters
----------
inputs : tensor_like, or list of tensor_likes
Theano symbolic(s) representing a (list of) (mini)batch of
inputs to be corrupted, with the first dimension indexing
training examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like, or list of tensor_likes
Theano symbolic(s) representing the corresponding corrupted
inputs.
"""
if isinstance(inputs, tensor.Variable):
return self._corrupt(inputs)
else:
return [self._corrupt(inp) for inp in inputs]
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
Notes
-----
This is the method that all subclasses should implement. The logic in
Corruptor.__call__ handles mapping over multiple tensor_like inputs.
"""
raise NotImplementedError()
def corruption_free_energy(self, corrupted_X, X):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
class DummyCorruptor(Corruptor):
"""
.. todo::
WRITEME
"""
def __call__(self, inputs):
"""
.. todo::
WRITEME
"""
return inputs
class BinomialCorruptor(Corruptor):
"""
A binomial corruptor that sets inputs to 0 with probability
0 < `corruption_level` < 1.
"""
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
"""
return self.s_rng.binomial(
size=x.shape,
n=1,
p=1 - self.corruption_level,
dtype=theano.config.floatX
) * x
class DropoutCorruptor(BinomialCorruptor):
"""
Sets inputs to 0 with probability of corruption_level and then
divides by (1 - corruption_level) to keep expected activation
constant.
"""
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
"""
# for stability
if self.corruption_level < 1e-5:
return x
dropped = super(DropoutCorruptor, self)._corrupt(x)
return 1.0 / (1.0 - self.corruption_level) * dropped
class GaussianCorruptor(Corruptor):
"""
A Gaussian corruptor transforms inputs by adding zero mean isotropic
Gaussian noise.
Parameters
----------
stdev : WRITEME
rng : WRITEME
"""
def __init__(self, stdev, rng=2001):
super(GaussianCorruptor, self).__init__(corruption_level=stdev,
rng=rng)
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
"""
noise = self.s_rng.normal(
size=x.shape,
avg=0.,
std=self.corruption_level,
dtype=theano.config.floatX
)
return noise + x
def corruption_free_energy(self, corrupted_X, X):
"""
.. todo::
WRITEME
"""
axis = range(1, len(X.type.broadcastable))
rval = (T.sum(T.sqr(corrupted_X - X), axis=axis) /
(2. * (self.corruption_level ** 2.)))
assert len(rval.type.broadcastable) == 1
return rval
class SaltPepperCorruptor(Corruptor):
"""
Corrupts the input with salt and pepper noise.
Sets some elements of the tensor to 0 or 1. Only really makes sense
to use on binary valued matrices.
"""
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
"""
a = self.s_rng.binomial(
size=x.shape,
p=(1 - self.corruption_level),
dtype=theano.config.floatX
)
b = self.s_rng.binomial(
size=x.shape,
p=0.5,
dtype=theano.config.floatX
)
c = T.eq(a, 0) * b
return x * a + c
class OneHotCorruptor(Corruptor):
"""
Corrupts a one-hot vector by changing active element with some
probability.
"""
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
"""
num_examples = x.shape[0]
num_classes = x.shape[1]
keep_mask = T.addbroadcast(
self.s_rng.binomial(
size=(num_examples, 1),
p=1 - self.corruption_level,
dtype='int8'
),
1
)
# generate random one-hot matrix
pvals = T.alloc(1.0 / num_classes, num_classes)
one_hot = self.s_rng.multinomial(size=(num_examples,), pvals=pvals)
return keep_mask * x + (1 - keep_mask) * one_hot
class SmoothOneHotCorruptor(Corruptor):
"""
Corrupts a one-hot vector in a way that preserves some information.
This adds Gaussian noise to a vector and then computes the softmax.
"""
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
"""
noise = self.s_rng.normal(
size=x.shape,
avg=0.,
std=self.corruption_level,
dtype=theano.config.floatX
)
return rescaled_softmax(x + noise)
class BinomialSampler(Corruptor):
"""
.. todo::
WRITEME
"""
def __init__(self, *args, **kwargs):
# pass up a 0 because corruption_level is not relevant here
super(BinomialSampler, self).__init__(0, *args, **kwargs)
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
"""
return self.s_rng.binomial(size=x.shape, p=x,
dtype=theano.config.floatX)
class MultinomialSampler(Corruptor):
"""
.. todo::
WRITEME
"""
def __init__(self, *args, **kwargs):
# corruption_level isn't relevant here
super(MultinomialSampler, self).__init__(0, *args, **kwargs)
def _corrupt(self, x):
"""
Treats each row in matrix as a multinomial trial.
Parameters
----------
x : tensor_like
x must be a matrix where all elements are non-negative
(with at least one non-zero element)
Returns
-------
y : tensor_like
y will have the same shape as x. Each row in y will be a
one hot vector, and can be viewed as the outcome of the
multinomial trial defined by the probabilities of that row
in x.
"""
normalized = x / x.sum(axis=1, keepdims=True)
return self.s_rng.multinomial(pvals=normalized, dtype=theano.config.floatX)
class ComposedCorruptor(Corruptor):
"""
.. todo::
WRITEME
Parameters
----------
corruptors : list of Corruptor objects
The corruptors are applied in reverse order. This matches the
typical function application notation. Thus
`ComposedCorruptor(a, b)._corrupt(X)` is the same as `a(b(X))`.
Notes
-----
Does NOT call Corruptor.__init__, so does not contain all of the
standard fields for Corruptors.
"""
def __init__(self, *corruptors):
# pass up the 0 for corruption_level (not relevant here)
assert len(corruptors) >= 1
self._corruptors = corruptors
def _corrupt(self, x):
"""
Corrupts a single tensor_like object.
Parameters
----------
x : tensor_like
Theano symbolic representing a (mini)batch of inputs to be
corrupted, with the first dimension indexing training
examples and the second indexing data dimensions.
Returns
-------
corrupted : tensor_like
Theano symbolic representing the corresponding corrupted input.
"""
result = x
for c in reversed(self._corruptors):
result = c(result)
return result
##################################################
def get(str):
""" Evaluate str into a corruptor object, if it exists """
obj = globals()[str]
if issubclass(obj, Corruptor):
return obj
else:
raise NameError(str)
|
{
"content_hash": "cb81fafe0a140a598c02700ebdf75d96",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 83,
"avg_line_length": 27.838779956427015,
"alnum_prop": 0.5727813429331664,
"repo_name": "kastnerkyle/pylearn2",
"id": "2eec1b9159bfc3ed2fbbcbdaa135fdd069d8f945",
"size": "12778",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pylearn2/corruption.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "53316"
},
{
"name": "C++",
"bytes": "46935"
},
{
"name": "CSS",
"bytes": "10655"
},
{
"name": "Cuda",
"bytes": "1267472"
},
{
"name": "Objective-C",
"bytes": "953"
},
{
"name": "Python",
"bytes": "3645346"
},
{
"name": "Shell",
"bytes": "4162"
}
],
"symlink_target": ""
}
|
from typing import Iterator, Tuple, Set, List, Dict, Any, Optional, Type
from uuid import UUID, uuid4
import concurrent.futures
import argparse
import functools
import contextlib
import attr
import logging
import json
import sys
import os
import signal
import psutil
import time
import hashlib
from ruamel.yaml import YAML
from ruamel.yaml.scalarstring import PreservedScalarString
import yaml
import bugzoo
import houston
from bugzoo import Client as BugZooClient
from bugzoo import BugZoo as BugZooDaemon
from bugzoo.core import FileLineSet
from houston import System
from houston.mission import Mission
from houston.trace import CommandTrace, MissionTrace
from houston.ardu.copter import ArduCopter
from compare_traces import load_file as load_traces_file
from compare_traces import matches_ground_truth
from build_traces import build_sandbox
from filter_truth import filter_truth_traces, VALID_LIST_OUTPUT
from hash_mutants import mutation_to_uid
logger = logging.getLogger('houston') # type: logging.Logger
logger.setLevel(logging.DEBUG)
DESCRIPTION = "Builds a ground truth dataset."
class FailedToCreateMutantSnapshot(houston.exceptions.HoustonException):
"""
Thrown when this script fails to create a BugZoo snapshot for a
mutant.
"""
@attr.s
class DatabaseEntry(object):
diff = attr.ib(type=str)
fn_inconsistent_traces = attr.ib(type=Tuple[Tuple[str, str], ...])
fn_consistent_traces = attr.ib(type=Tuple[Tuple[str, str], ...])
def to_dict(self) -> Dict[str, Any]:
return {'diff': PreservedScalarString(self.diff),
'uid': mutation_to_uid(self.diff),
'inconsistent': [{'oracle': o,
'trace': t} for o, t in self.fn_inconsistent_traces],
'consistent': [{'oracle': o,
'trace': t} for o, t in self.fn_consistent_traces]
}
@staticmethod
def from_dict(d: Dict[str, Any]) -> 'DatabaseEntry':
inconsistent_traces = [(i['oracle'], i['trace']) for i in d['inconsistent']]
consistent_traces = [(i['oracle'], i['trace']) for i in d['consistent']]
return DatabaseEntry(d['diff'],
inconsistent_traces,
consistent_traces)
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for process in children:
if process.name() != "python":
continue
logger.debug("killing process %d", process.pid)
process.send_signal(sig)
def setup_logging(verbose: bool = False) -> None:
log_to_stdout = logging.StreamHandler()
log_to_stdout.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter('%(processName)s - %(message)s')
log_to_stdout.setFormatter(formatter)
logging.getLogger('houston').addHandler(log_to_stdout)
logging.getLogger('experiment').addHandler(log_to_stdout)
def parse_args():
p = argparse.ArgumentParser(description=DESCRIPTION)
p.add_argument('snapshot', help='the name of the BugZoo snapshot')
p.add_argument('mutants', help='path to a JSON file of mutants.')
p.add_argument('oracle', type=str, help='path to oracle trace directory.')
p.add_argument('output', type=str,
help='the file to which the ground truth dataset should be written.')
p.add_argument('--verbose', action='store_true',
help='increases logging verbosity')
p.add_argument('--threads', type=int, default=1,
help='number of threads to use when building trace files.')
p.add_argument('--coverage', action='store_true', default=False,
help='collect coverage info')
return p.parse_args()
@contextlib.contextmanager
def build_mutant_snapshot(bz: BugZooClient,
snapshot: bugzoo.Bug,
coverage: bool,
diff: str
) -> Iterator[bugzoo.Bug]:
# generate a name for the snapshot and image
uuid = uuid4().hex[:64]
name_image = "houston-mutant:{}".format(uuid)
lines = diff.split('\n')
# create a description of the mutant snapshot
mutant = bugzoo.Bug(name=name_image,
image=name_image,
dataset='houston-mutants',
program=snapshot.program,
source=None,
source_dir=snapshot.source_dir,
languages=snapshot.languages,
tests=snapshot.tests,
compiler=snapshot.compiler,
instructions_coverage=snapshot.instructions_coverage)
try:
# create and store the Docker image
patch = bugzoo.Patch.from_unidiff(diff)
container = None
try:
container = bz.containers.provision(snapshot)
if not bz.containers.patch(container, patch):
logger.error("Failed to patch %s", str(patch))
m = "failed to patch using diff: {}".format(diff)
raise FailedToCreateMutantSnapshot(m)
logger.debug("patched using diff: %s", diff)
if coverage:
build_attempt = bz.containers.instrument(container)
else:
build_attempt = bz.containers.build(container)
if build_attempt and not build_attempt.successful:
logger.error("build failure:\n%s",
build_attempt.response.output)
m = "failed to build mutant: {}".format(diff)
raise FailedToCreateMutantSnapshot(m)
bz.containers.persist(container, name_image)
finally:
if container:
del bz.containers[container.uid]
# register the snapshot
bz.bugs.register(mutant)
yield mutant
# ensure that all resources are freed
finally:
if mutant.name in bz.bugs:
del bz.bugs[mutant.name]
if bz.docker.has_image(name_image):
bz.docker.delete_image(name_image)
def touches_the_lines(patch: bugzoo.core.Patch,
oracle_traces: List[MissionTrace]
) -> bool:
coverage = None
for oracle in oracle_traces:
if not oracle.coverage:
continue
coverage = oracle.coverage
break
if not coverage:
# trace doesn't have coverage info
raise Exception("Trace doesn't have coverage info")
modified_lines = {}
for fp in patch.file_patches:
filename = fp.new_fn
lines = set([])
for hunks in fp.hunks:
for l in hunks.lines:
if isinstance(l, bugzoo.InsertedLine) or\
isinstance(l, bugzoo.DeletedLine):
lines.add(l.number)
modified_lines[filename] = lines
logger.debug("Modified lines %s", modified_lines)
fileline_set = FileLineSet(modified_lines)
if fileline_set.intersection(coverage).files:
# line is covered
return True
return False
def process_mutation(system: Type[System],
client_bugzoo: BugZooClient,
snapshot_orig: bugzoo.Bug,
trace_filenames: List[str],
dir_mutant_traces: str,
coverage: bool,
diff: str
) -> Optional[DatabaseEntry]:
bz = client_bugzoo
sandbox_cls = system.sandbox
patch = bugzoo.Patch.from_unidiff(diff)
inconsistent_results = []
consistent_results = []
count = 3
# build an ephemeral image for the mutant
try:
with build_mutant_snapshot(client_bugzoo, snapshot_orig, coverage, diff) as snapshot:
def obtain_trace(mission: houston.Mission) -> MissionTrace:
jsn_mission = json.dumps(mission.to_dict()) # FIXME hack
with build_sandbox(client_bugzoo, snapshot, jsn_mission, False) as sandbox:
return sandbox.run_and_trace(mission.commands, coverage)
for fn_trace in trace_filenames:
logger.debug("evaluating oracle trace: %s", fn_trace)
mission, oracle_traces = load_traces_file(fn_trace)
try:
if count <= 0 or (coverage and not touches_the_lines(patch, oracle_traces)):
logger.debug("This mission is not valid: %s", mission.commands)
continue
except:
logger.debug("NO COV: %s", fn_trace)
continue
# write mutant trace to file
h = hashlib.sha256()
h.update(diff.encode())
h.update(fn_trace.encode())
identifier = h.hexdigest()
logger.debug("id %s", identifier)
fn_trace_mut_rel = "{}.json".format(identifier)
fn_trace_mut = os.path.join(dir_mutant_traces, fn_trace_mut_rel)
try:
if os.path.exists(fn_trace_mut):
logger.info("Already evaluated! %s", fn_trace_mut_rel)
_, trace_mutant = load_traces_file(fn_trace_mut)
else:
trace_mutant = obtain_trace(mission)
jsn = {'mission': mission.to_dict(),
'traces': [trace_mutant.to_dict()]}
with open(fn_trace_mut, 'w') as f:
json.dump(jsn, f)
except:
logger.exception("failed to build trace %s for mutant: %s", fn_trace, diff)
continue
count -= 1
try:
if not matches_ground_truth(trace_mutant, oracle_traces):
logger.info("found an acceptable mutant!")
inconsistent_results.append((fn_trace, fn_trace_mut))
else:
logger.debug("mutant is not sufficiently different for given mission.")
consistent_results.append((fn_trace, fn_trace_mut))
except houston.exceptions.HoustonException as e:
logger.exception("failed to check matching of traces %s", e)
continue
except FailedToCreateMutantSnapshot:
logger.error("failed to build snapshot for mutant: %s", diff)
except Exception:
logger.exception("failed to obtain data for mutant: %s", diff)
except (houston.exceptions.NoConnectionError, houston.exceptions.ConnectionLostError):
logger.error("mutant resulted in crash")
finally:
if inconsistent_results or consistent_results:
return DatabaseEntry(diff, tuple(inconsistent_results), tuple(consistent_results))
else:
return None
def main():
args = parse_args()
setup_logging(verbose=args.verbose)
name_snapshot = args.snapshot
fn_mutants = args.mutants
dir_output = args.output
dir_oracle = args.oracle
fn_output_database = os.path.join(dir_output, 'database.yml')
num_threads = args.threads
system = ArduCopter
assert num_threads >= 1
# ensure that the output directory exists
os.makedirs(args.output, exist_ok=True)
if not os.path.exists(dir_oracle):
logger.error("oracle directory not found: %s", dir_oracle)
sys.exit(1)
# load the mutant diffs
try:
with open(fn_mutants, 'r') as f:
diffs = [e['diff'] for e in yaml.load(f)]
logger.debug("loaded %d diffs from database", len(diffs))
except Exception:
logger.exception("failed to load mutation database: %s", fn_mutants)
sys.exit(1)
except FileNotFoundError:
logger.error("mutation database file not found: %s", fn_mutants)
sys.exit(1)
# obtain a list of oracle traces
filtered_traces_fn = os.path.join(dir_oracle, VALID_LIST_OUTPUT)
if os.path.exists(filtered_traces_fn):
with open(filtered_traces_fn, 'r') as f:
trace_filenames = YAML().load(f)
else:
trace_filenames = filter_truth_traces(dir_oracle)
with open(filtered_traces_fn, 'w') as f:
YAML().dump(trace_filenames, f)
trace_filenames = [os.path.join(dir_oracle, fn) for fn in trace_filenames]
logger.info("Total number of %d valid truth", len(trace_filenames))
db_entries = [] # type: List[DatabaseEntry]
futures = []
with bugzoo.server.ephemeral() as client_bugzoo:
with concurrent.futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
try:
snapshot = client_bugzoo.bugs[name_snapshot]
process = functools.partial(process_mutation,
system,
client_bugzoo,
snapshot,
trace_filenames,
dir_output,
args.coverage)
for diff in diffs:
future = executor.submit(process, diff)
futures.append(future)
for future in concurrent.futures.as_completed(futures):
entry = future.result()
if entry:
db_entries.append(entry)
except (KeyboardInterrupt, SystemExit):
logger.info("Received keyboard interrupt. Shutting down...")
for fut in futures:
logger.debug("Cancelling: %s", fut)
fut.cancel()
logger.debug("Cancelled: %s", fut.cancelled())
logger.info("Shutting down the process pool")
executor.shutdown(wait=False)
kill_child_processes(os.getpid())
logger.info("Cancelled all jobs and shutdown executor.")
time.sleep(5)
client_bugzoo.containers.clear()
logger.info("Killed all containers")
logger.info("Removing all images")
bug_names = [b for b in client_bugzoo.bugs if 'houston-mutant' in b]
for b in bug_names:
logger.debug("Removing image %s", b)
del client_bugzoo.bugs[b]
if client_bugzoo.docker.has_image(b):
client_bugzoo.docker.delete_image(b)
logger.debug("Removed all images")
# save to disk
logger.info("finished constructing evaluation dataset.")
logger.debug("saving evaluation dataset to disk.")
jsn = {
'oracle-directory': dir_oracle,
'snapshot': name_snapshot,
'entries': [e.to_dict() for e in db_entries]
}
with open(fn_output_database, 'w') as f:
YAML().dump(jsn, f)
logger.info("saved evaluation dataset to disk")
if __name__ == '__main__':
main()
|
{
"content_hash": "131bedc0f578d0cd5731dee9d4bea37e",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 96,
"avg_line_length": 38.44584382871537,
"alnum_prop": 0.5752473301447946,
"repo_name": "squaresLab/Houston",
"id": "4df3d5240f47018838e4d621ebecac21deb16510",
"size": "15263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/ground_truth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2495"
},
{
"name": "Python",
"bytes": "323485"
},
{
"name": "R",
"bytes": "5384"
},
{
"name": "Shell",
"bytes": "8780"
}
],
"symlink_target": ""
}
|
import unittest
import uuid
import pytest
import azure.cosmos.documents as documents
import azure.cosmos.cosmos_client as cosmos_client
from azure.cosmos import query_iterable
import azure.cosmos.base as base
import test.test_config as test_config
# IMPORTANT NOTES:
# Most test cases in this file create collections in your Azure Cosmos
# account.
# Collections are billing entities. By running these test cases, you may
# incur monetary costs on your account.
# To Run the test, replace the two member fields (masterKey and host) with
# values
# associated with your Azure Cosmos account.
@pytest.mark.usefixtures("teardown")
class RuPerMinTests(unittest.TestCase):
"""RuPerMinTests Tests.
"""
host = test_config._test_config.host
masterKey = test_config._test_config.masterKey
connectionPolicy = test_config._test_config.connectionPolicy
client = cosmos_client.CosmosClient(host, {'masterKey': masterKey}, connectionPolicy)
created_db = test_config._test_config.create_database_if_not_exist(client)
@classmethod
def setUpClass(cls):
# creates the database, collection, and insert all the documents
# we will gain some speed up in running the tests by creating the
# database, collection and inserting all the docs only once
if (cls.masterKey == '[YOUR_KEY_HERE]' or cls.host == '[YOUR_ENDPOINT_HERE]'):
raise Exception("You must specify your Azure Cosmos account values for "
"'masterKey' and 'host' at the top of this class to run the "
"tests.")
def _query_offers(self, collection_self_link):
offers = list(self.client.ReadOffers())
for o in offers:
if o['resource'] == collection_self_link:
return o
return None
def test_create_collection_with_ru_pm(self):
# create an ru pm collection
collection_definition = {
'id' : "test_create_collection_with_ru_pm collection" + str(uuid.uuid4())
}
options = {
'offerEnableRUPerMinuteThroughput': True,
'offerVersion': "V2",
'offerThroughput': 400
}
created_collection = self.client.CreateContainer(self.created_db['_self'], collection_definition, options)
offer = self._query_offers(created_collection['_self'])
self.assertIsNotNone(offer)
self.assertEqual(offer['offerType'], "Invalid")
self.assertIsNotNone(offer['content'])
self.client.DeleteContainer(created_collection['_self'])
def test_create_collection_without_ru_pm(self):
# create a non ru pm collection
collection_definition = {
'id' : "test_create_collection_without_ru_pm collection" + str(uuid.uuid4())
}
options = {
'offerEnableRUPerMinuteThroughput': False,
'offerVersion': "V2",
'offerThroughput': 400
}
created_collection = self.client.CreateContainer(self.created_db['_self'], collection_definition, options)
offer = self._query_offers(created_collection['_self'])
self.assertIsNotNone(offer)
self.assertEqual(offer['offerType'], "Invalid")
self.assertIsNotNone(offer['content'])
self.client.DeleteContainer(created_collection['_self'])
def test_create_collection_disable_ru_pm_on_request(self):
# create a non ru pm collection
collection_definition = {
'id' : "test_create_collection_disable_ru_pm_on_request collection" + str(uuid.uuid4())
}
options = {
'offerVersion': "V2",
'offerThroughput': 400
}
created_collection = self.client.CreateContainer(self.created_db['_self'], collection_definition, options)
offer = self._query_offers(created_collection['_self'])
self.assertIsNotNone(offer)
self.assertEqual(offer['offerType'], "Invalid")
self.assertIsNotNone(offer['content'])
self.assertEqual(offer['content']['offerIsRUPerMinuteThroughputEnabled'], False)
request_options = {
'disableRUPerMinuteUsage': True
}
doc = {
'id' : 'test_doc' + str(uuid.uuid4())
}
self.client.CreateItem(created_collection['_self'], doc, request_options)
self.client.DeleteContainer(created_collection['_self'])
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
{
"content_hash": "80d018f1c49aef588e44eef79d13d58e",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 114,
"avg_line_length": 37.18032786885246,
"alnum_prop": 0.6399911816578483,
"repo_name": "Azure/azure-documentdb-python",
"id": "0fbe659ec40e1b462a5be9342cdf1f0cd13599e7",
"size": "5658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/ru_per_min_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "663705"
}
],
"symlink_target": ""
}
|
"""Automx related tasks."""
import os
import pwd
import shutil
import stat
from .. import python
from .. import system
from .. import utils
from . import base
class Automx(base.Installer):
"""Automx installation."""
appname = "automx"
config_files = ["automx.conf"]
no_daemon = True
packages = {
"deb": ["memcached", "unzip"],
"rpm": ["memcached", "unzip"]
}
with_user = True
def __init__(self, *args, **kwargs):
"""Get configuration."""
super(Automx, self).__init__(*args, **kwargs)
self.venv_path = self.config.get("automx", "venv_path")
self.instance_path = self.config.get("automx", "instance_path")
def get_template_context(self):
"""Additional variables."""
context = super(Automx, self).get_template_context()
sql_dsn = "{}://{}:{}@{}:{}/{}".format(
"postgresql" if self.dbengine == "postgres" else self.dbengine,
self.config.get("modoboa", "dbuser"),
self.config.get("modoboa", "dbpassword"),
self.dbhost,
self.dbport,
self.config.get("modoboa", "dbname"))
if self.db_driver == "pgsql":
sql_query = (
"SELECT first_name || ' ' || last_name AS display_name, email"
", SPLIT_PART(email, '@', 2) AS domain "
"FROM core_user WHERE email='%s' AND is_active")
else:
sql_query = (
"SELECT concat(first_name, ' ', last_name) AS display_name, "
"email, SUBSTRING_INDEX(email, '@', -1) AS domain "
"FROM core_user WHERE email='%s' AND is_active=1"
)
context.update({"sql_dsn": sql_dsn, "sql_query": sql_query})
return context
def _setup_venv(self):
"""Prepare a python virtualenv."""
python.setup_virtualenv(
self.venv_path, sudo_user=self.user, python_version=3)
packages = [
"future", "lxml", "ipaddress", "sqlalchemy", "python-memcached",
"python-dateutil", "configparser"
]
if self.dbengine == "postgres":
packages.append("psycopg2-binary")
else:
packages.append("mysqlclient")
python.install_packages(packages, self.venv_path, sudo_user=self.user)
target = "{}/master.zip".format(self.home_dir)
if os.path.exists(target):
os.unlink(target)
utils.exec_cmd(
"wget https://github.com/sys4/automx/archive/master.zip",
sudo_user=self.user, cwd=self.home_dir)
self.repo_dir = "{}/automx-master".format(self.home_dir)
if os.path.exists(self.repo_dir):
shutil.rmtree(self.repo_dir)
utils.exec_cmd(
"unzip master.zip", sudo_user=self.user, cwd=self.home_dir)
utils.exec_cmd(
"{} setup.py install".format(
python.get_path("python", self.venv_path)),
cwd=self.repo_dir)
def _deploy_instance(self):
"""Copy files to instance dir."""
if not os.path.exists(self.instance_path):
pw = pwd.getpwnam(self.user)
mode = (
stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
utils.mkdir(self.instance_path, mode, pw[2], pw[3])
path = "{}/src/automx_wsgi.py".format(self.repo_dir)
utils.exec_cmd("cp {} {}".format(path, self.instance_path),
sudo_user=self.user, cwd=self.home_dir)
def post_run(self):
"""Additional tasks."""
self._setup_venv()
self._deploy_instance()
system.enable_and_start_service("memcached")
|
{
"content_hash": "8caf563c3a1655e232e7ba2646c5163e",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 36.372549019607845,
"alnum_prop": 0.5493261455525607,
"repo_name": "modoboa/modoboa-installer",
"id": "7b9ed56f142efc0ccbfb4b4791a75c53f8586047",
"size": "3710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modoboa_installer/scripts/automx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "968"
},
{
"name": "Python",
"bytes": "133233"
},
{
"name": "Shell",
"bytes": "351"
},
{
"name": "Smarty",
"bytes": "105636"
}
],
"symlink_target": ""
}
|
from rhizo.main import c
#
# Create the git base command with default options
#
def git_base_command():
git_flow_dir = c.config.get('git_flow_dir', '/home/pi/flow')
return [ 'git',
'--git-dir=%s/.git' % (git_flow_dir),
'--work-tree=%s' % (git_flow_dir) ]
|
{
"content_hash": "780d6736a8e63bea0181dcc87994392d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 28,
"alnum_prop": 0.538961038961039,
"repo_name": "manylabs/flow",
"id": "110df20daedd0aa7715937a9cf4ab01f2a2c64c1",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flow/git_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92403"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
}
|
"""
Test SBValue API linked_list_iter which treats the SBValue as a linked list and
supports iteration till the end of list is reached.
"""
from __future__ import print_function
import os
import time
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ValueAsLinkedListTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# We'll use the test method name as the exe_name.
self.exe_name = self.testMethodName
# Find the line number to break at.
self.line = line_number('main.cpp', '// Break at this line')
# Py3 asserts due to a bug in SWIG. A fix for this was upstreamed into
# SWIG 3.0.8.
@skipIf(py_version=['>=', (3, 0)], swig_version=['<', (3, 0, 8)])
@add_test_categories(['pyapi'])
def test(self):
"""Exercise SBValue API linked_list_iter."""
d = {'EXE': self.exe_name}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
exe = self.getBuildArtifact(self.exe_name)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Get Frame #0.
self.assertTrue(process.GetState() == lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint condition")
frame0 = thread.GetFrameAtIndex(0)
# Get variable 'task_head'.
task_head = frame0.FindVariable('task_head')
self.assertTrue(task_head, VALID_VARIABLE)
self.DebugSBValue(task_head)
# By design (see main.cpp), the visited id's are: [1, 2, 4, 5].
visitedIDs = [1, 2, 4, 5]
list = []
cvf = lldbutil.ChildVisitingFormatter(indent_child=2)
for t in task_head.linked_list_iter('next'):
self.assertTrue(t, VALID_VARIABLE)
# Make sure that 'next' corresponds to an SBValue with pointer
# type.
self.assertTrue(t.TypeIsPointerType())
if self.TraceOn():
print(cvf.format(t))
list.append(int(t.GetChildMemberWithName("id").GetValue()))
# Sanity checks that the we visited all the items (no more, no less).
if self.TraceOn():
print("visited IDs:", list)
self.assertTrue(visitedIDs == list)
# Let's exercise the linked_list_iter() API again, this time supplying
# our end of list test function.
def eol(val):
"""Test function to determine end of list."""
# End of list is reached if either the value object is invalid
# or it corresponds to a null pointer.
if not val or int(val.GetValue(), 16) == 0:
return True
# Also check the "id" for correct semantics. If id <= 0, the item
# is corrupted, let's return True to signify end of list.
if int(val.GetChildMemberWithName("id").GetValue(), 0) <= 0:
return True
# Otherwise, return False.
return False
list = []
for t in task_head.linked_list_iter('next', eol):
self.assertTrue(t, VALID_VARIABLE)
# Make sure that 'next' corresponds to an SBValue with pointer
# type.
self.assertTrue(t.TypeIsPointerType())
if self.TraceOn():
print(cvf.format(t))
list.append(int(t.GetChildMemberWithName("id").GetValue()))
# Sanity checks that the we visited all the items (no more, no less).
if self.TraceOn():
print("visited IDs:", list)
self.assertTrue(visitedIDs == list)
# Get variable 'empty_task_head'.
empty_task_head = frame0.FindVariable('empty_task_head')
self.assertTrue(empty_task_head, VALID_VARIABLE)
self.DebugSBValue(empty_task_head)
list = []
# There is no iterable item from empty_task_head.linked_list_iter().
for t in empty_task_head.linked_list_iter('next', eol):
if self.TraceOn():
print(cvf.format(t))
list.append(int(t.GetChildMemberWithName("id").GetValue()))
self.assertTrue(len(list) == 0)
# Get variable 'task_evil'.
task_evil = frame0.FindVariable('task_evil')
self.assertTrue(task_evil, VALID_VARIABLE)
self.DebugSBValue(task_evil)
list = []
# There 3 iterable items from task_evil.linked_list_iter(). :-)
for t in task_evil.linked_list_iter('next'):
if self.TraceOn():
print(cvf.format(t))
list.append(int(t.GetChildMemberWithName("id").GetValue()))
self.assertTrue(len(list) == 3)
|
{
"content_hash": "860000b95dc40c6d5c69a48bc4a6fd7c",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 37.423611111111114,
"alnum_prop": 0.6034514752273149,
"repo_name": "youtube/cobalt",
"id": "1b009521d253ad63b59067b92ecab9490a468bb0",
"size": "5389",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/python_api/value/linked_list/TestValueAPILinkedList.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import formless
from zope.interface import implements
class IBar(formless.TypedInterface):
bar = formless.String()
class Bar:
implements(IBar)
def __init__(self, bar):
self.bar = bar
def __str__(self):
return "A Bar: %s" % self.bar
class IFrob(formless.TypedInterface):
integer = formless.Integer()
class Frob:
implements(IFrob)
def __init__(self, integer):
self.integer = integer
def frobazz(self, other):
return Frob(self.integer ** other.integer)
def __str__(self):
return "A frob of value %s" % self.integer
class IObjectTest(formless.TypedInterface):
def someMethod(one=formless.Object(interface=IBar), two=formless.Integer(description="an integer please")):
"""Some Method.
This method takes an IBar instance.
"""
return None
someMethod = formless.autocallable(someMethod)
def frobber(frobber=formless.Object(interface=IFrob), frobee=formless.Object(IFrob)):
"""Frobber.
Takes two frobs and raises one to the power of the other.
"""
return IFrob
frobber = formless.autocallable(frobber)
someList = formless.List()
class ObjectTester:
implements(IObjectTest)
def __init__(self):
self.someList = [
Bar("boop"), Bar("bap"),
Frob(5), Frob(9), Frob(23), Frob(1234)
]
def someMethod(self, one, two):
print "ONE TWO", `one`, `two`
def frobber(self, frobber, frobee):
return frobber.frobazz(frobee)
class CompoundChecker(formless.Compound):
def coerce(self, data):
one, two = data
if (one, two) != (6, 9):
raise formless.InputError("What do you get when you multiply six by nine?")
class IAnotherTest(formless.TypedInterface):
def aBarMethod(abar=formless.Object(interface=IBar)):
"""A Bar Method
This method takes a bar, but there are no bar instances on this page.
You'll have to use the shelf.
"""
return str
aBarMethod = formless.autocallable(aBarMethod)
def aFrobMethod(aFrob=formless.Object(interface=IFrob)):
"""A Frob Method
This method takes a frob, but there are no frob instances on this page.
You'll have to use the shelf.
"""
return str
aFrobMethod = formless.autocallable(aFrobMethod)
def whatIsMyClass(anObj=formless.Object()):
"""What is my class?
Pass an object and get back the class in your hand.
"""
return formless.Object()
whatIsMyClass = formless.autocallable(whatIsMyClass)
def setBreakpoint(breakpoint=formless.String()):
"""Set a breakpoint
Set a breakpoint at the given filename and line number. String passed is equivalent
to doing b(reak) ([file:]lineno | function) in pdb.
"""
return None
setBreakpoint = formless.autocallable(setBreakpoint)
breakpoints = formless.List()
def compoundTest(
aCompound = formless.Compound(
[formless.String(label="firstname"), formless.String(label="lastname")],
label="Full Name"),
anInt = formless.Integer()):
"""Compound Test
A test of a widget/controller which renders multiple fields, triggers multiple
validators, but gathers the result into one method argument. There can
be an additional validation step which validates that the compound data
as a whole is valid.
"""
return str
compoundTest = formless.autocallable(compoundTest)
def compoundChecker(
theAnswer = CompoundChecker(
[formless.Integer(label="six"), formless.Integer(label="nine")],
label="The Answer",
description="What is the meaning of life, the universe, and everything?")
):
"""The Answer
Please type the integer six in the first box, and nine in the second.
"""
return formless.Object(label="The Answer", interface=formless.Integer)
compoundChecker = formless.autocallable(compoundChecker)
class AnotherTest:
implements(IAnotherTest)
def aBarMethod(self, abar):
return "You passed me %s" % abar
def aFrobMethod(self, aFrob):
return "You passed me %s" % aFrob
def whatIsMyClass(self, anObj):
if hasattr(anObj, '__class__'):
return anObj.__class__
return type(anObj)
def _getDebugger(self):
import sys, pdb
debugInstance = sys.modules.get('debugInstance')
if debugInstance is None:
sys.modules['debugInstance'] = debugInstance = pdb.Pdb()
debugInstance.reset()
return debugInstance
def setBreakpoint(self, breakpoint):
import sys
debugInstance = self._getDebugger()
debugInstance.do_break(debugInstance.precmd(breakpoint))
debugInstance.quitting = True
sys.settrace(debugInstance.trace_dispatch)
debugInstance.quitting = False
def _currentBreakpoints(self):
debugInstance = self._getDebugger()
class BreakpointRemover(list):
def remove(self, removal):
debugInstance.breaks[removal.fn].remove(removal.ln)
if not debugInstance.breaks[removal.fn]:
del debugInstance.breaks[removal.fn]
list.remove(self, removal)
class Dummy(formless.TypedInterface): pass
class BP:
implements(Dummy)
def __init__(self, fn, ln):
self.fn=fn
self.ln=ln
def __str__(self):
return "Breakpoint in file %s at line %s" % (self.fn, self.ln)
breakpoints = BreakpointRemover()
for fn in debugInstance.breaks.keys():
for lineno in debugInstance.breaks[fn]:
breakpoints.append(BP(fn, lineno))
return breakpoints
breakpoints = property(_currentBreakpoints)
def compoundTest(self, aCompound, anInt):
return "COMPOUND! %s %s" % (aCompound, anInt)
def compoundChecker(self, theAnswer):
return 42
|
{
"content_hash": "9156248f0ebb54a4a79a83762a559231",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 111,
"avg_line_length": 30.387254901960784,
"alnum_prop": 0.6186481690595257,
"repo_name": "perkinslr/pypyjs",
"id": "a7a8e956a62621796fec64af4137e680c185daeb",
"size": "6256",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "addedLibraries/nevow/test/test_passobj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "63586"
},
{
"name": "CSS",
"bytes": "7648"
},
{
"name": "D",
"bytes": "2081"
},
{
"name": "HTML",
"bytes": "7097"
},
{
"name": "JavaScript",
"bytes": "488078"
},
{
"name": "Makefile",
"bytes": "5877"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "26517313"
},
{
"name": "Shell",
"bytes": "1406"
}
],
"symlink_target": ""
}
|
"""
Enomaly ECP driver
"""
import time
import base64
import os
import socket
import binascii
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.utils.py3 import u
# JSON is included in the standard library starting with Python 2.6. For 2.5
# and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.base import Response, ConnectionUserAndKey
from libcloud.compute.base import NodeDriver, NodeSize, NodeLocation
from libcloud.compute.base import NodeImage, Node
from libcloud.compute.types import Provider, NodeState, InvalidCredsError
from libcloud.compute.base import is_private_subnet
#Defaults
API_HOST = ''
API_PORT = (80, 443)
class ECPResponse(Response):
def success(self):
if self.status == httplib.OK or self.status == httplib.CREATED:
try:
j_body = json.loads(self.body)
except ValueError:
self.error = "JSON response cannot be decoded."
return False
if j_body['errno'] == 0:
return True
else:
self.error = "ECP error: %s" % j_body['message']
return False
elif self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError()
else:
self.error = "HTTP Error Code: %s" % self.status
return False
def parse_error(self):
return self.error
#Interpret the json responses - no error checking required
def parse_body(self):
return json.loads(self.body)
def getheaders(self):
return self.headers
class ECPConnection(ConnectionUserAndKey):
"""
Connection class for the Enomaly ECP driver
"""
responseCls = ECPResponse
host = API_HOST
port = API_PORT
def add_default_headers(self, headers):
#Authentication
username = self.user_id
password = self.key
base64string = base64.encodestring(
b('%s:%s' % (username, password)))[:-1]
authheader = "Basic %s" % base64string
headers['Authorization'] = authheader
return headers
def _encode_multipart_formdata(self, fields):
"""
Based on Wade Leftwich's function:
http://code.activestate.com/recipes/146306/
"""
#use a random boundary that does not appear in the fields
boundary = ''
while boundary in ''.join(fields):
boundary = u(binascii.hexlify(os.urandom(16)))
L = []
for i in fields:
L.append('--' + boundary)
L.append('Content-Disposition: form-data; name="%s"' % i)
L.append('')
L.append(fields[i])
L.append('--' + boundary + '--')
L.append('')
body = '\r\n'.join(L)
content_type = 'multipart/form-data; boundary=%s' % boundary
header = {'Content-Type': content_type}
return header, body
class ECPNodeDriver(NodeDriver):
"""
Enomaly ECP node driver
"""
name = "Enomaly Elastic Computing Platform"
website = 'http://www.enomaly.com/'
type = Provider.ECP
connectionCls = ECPConnection
def list_nodes(self):
"""
Returns a list of all running Nodes
@rtype: C{list} of L{Node}
"""
#Make the call
res = self.connection.request('/rest/hosting/vm/list').parse_body()
#Put together a list of node objects
nodes = []
for vm in res['vms']:
node = self._to_node(vm)
if not node is None:
nodes.append(node)
#And return it
return nodes
def _to_node(self, vm):
"""
Turns a (json) dictionary into a Node object.
This returns only running VMs.
"""
#Check state
if not vm['state'] == "running":
return None
#IPs
iplist = [interface['ip'] for interface in vm['interfaces'] if
interface['ip'] != '127.0.0.1']
public_ips = []
private_ips = []
for ip in iplist:
try:
socket.inet_aton(ip)
except socket.error:
# not a valid ip
continue
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
#Create the node object
n = Node(
id=vm['uuid'],
name=vm['name'],
state=NodeState.RUNNING,
public_ips=public_ips,
private_ips=private_ips,
driver=self,
)
return n
def reboot_node(self, node):
"""
Shuts down a VM and then starts it again.
@inherits: L{NodeDriver.reboot_node}
"""
#Turn the VM off
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata({'action': 'stop'})
self.connection.request(
'/rest/hosting/vm/%s' % node.id,
method='POST',
headers=d[0],
data=d[1]
).parse_body()
node.state = NodeState.REBOOTING
#Wait for it to turn off and then continue (to turn it on again)
while node.state == NodeState.REBOOTING:
#Check if it's off.
response = self.connection.request(
'/rest/hosting/vm/%s' % node.id
).parse_body()
if response['vm']['state'] == 'off':
node.state = NodeState.TERMINATED
else:
time.sleep(5)
#Turn the VM back on.
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata({'action': 'start'})
self.connection.request(
'/rest/hosting/vm/%s' % node.id,
method='POST',
headers=d[0],
data=d[1]
).parse_body()
node.state = NodeState.RUNNING
return True
def destroy_node(self, node):
"""
Shuts down and deletes a VM.
@inherits: L{NodeDriver.destroy_node}
"""
#Shut down first
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata({'action': 'stop'})
self.connection.request(
'/rest/hosting/vm/%s' % node.id,
method='POST',
headers=d[0],
data=d[1]
).parse_body()
#Ensure there was no applicationl level error
node.state = NodeState.PENDING
#Wait for the VM to turn off before continuing
while node.state == NodeState.PENDING:
#Check if it's off.
response = self.connection.request(
'/rest/hosting/vm/%s' % node.id
).parse_body()
if response['vm']['state'] == 'off':
node.state = NodeState.TERMINATED
else:
time.sleep(5)
#Delete the VM
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata({'action': 'delete'})
self.connection.request(
'/rest/hosting/vm/%s' % (node.id),
method='POST',
headers=d[0],
data=d[1]
).parse_body()
return True
def list_images(self, location=None):
"""
Returns a list of all package templates aka appiances aka images.
@inherits: L{NodeDriver.list_images}
"""
#Make the call
response = self.connection.request(
'/rest/hosting/ptemplate/list').parse_body()
#Turn the response into an array of NodeImage objects
images = []
for ptemplate in response['packages']:
images.append(NodeImage(
id=ptemplate['uuid'],
name='%s: %s' % (ptemplate['name'], ptemplate['description']),
driver=self,)
)
return images
def list_sizes(self, location=None):
"""
Returns a list of all hardware templates
@inherits: L{NodeDriver.list_sizes}
"""
#Make the call
response = self.connection.request(
'/rest/hosting/htemplate/list').parse_body()
#Turn the response into an array of NodeSize objects
sizes = []
for htemplate in response['templates']:
sizes.append(NodeSize(
id=htemplate['uuid'],
name=htemplate['name'],
ram=htemplate['memory'],
disk=0, # Disk is independent of hardware template.
bandwidth=0, # There is no way to keep track of bandwidth.
price=0, # The billing system is external.
driver=self,)
)
return sizes
def list_locations(self):
"""
This feature does not exist in ECP. Returns hard coded dummy location.
@rtype: C{list} of L{NodeLocation}
"""
return [NodeLocation(id=1,
name="Cloud",
country='',
driver=self),
]
def create_node(self, **kwargs):
"""
Creates a virtual machine.
@keyword name: String with a name for this new node (required)
@type name: C{str}
@keyword size: The size of resources allocated to this node .
(required)
@type size: L{NodeSize}
@keyword image: OS Image to boot on node. (required)
@type image: L{NodeImage}
@rtype: L{Node}
"""
#Find out what network to put the VM on.
res = self.connection.request(
'/rest/hosting/network/list').parse_body()
#Use the first / default network because there is no way to specific
#which one
network = res['networks'][0]['uuid']
#Prepare to make the VM
data = {
'name': str(kwargs['name']),
'package': str(kwargs['image'].id),
'hardware': str(kwargs['size'].id),
'network_uuid': str(network),
'disk': ''
}
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata(data)
response = self.connection.request(
'/rest/hosting/vm/',
method='PUT',
headers=d[0],
data=d[1]
).parse_body()
#Create a node object and return it.
n = Node(
id=response['machine_id'],
name=data['name'],
state=NodeState.PENDING,
public_ips=[],
private_ips=[],
driver=self,
)
return n
|
{
"content_hash": "bf8a65da35c4d7d7526549a69e761598",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 78,
"avg_line_length": 29.242587601078167,
"alnum_prop": 0.5377454143239008,
"repo_name": "ConPaaS-team/conpaas",
"id": "dff49c07d326921ede1dbac87965fa99423a78be",
"size": "11631",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "conpaas-director/cpsdirector/iaas/libcloud/compute/drivers/ecp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "79"
},
{
"name": "Batchfile",
"bytes": "2136"
},
{
"name": "C",
"bytes": "12346"
},
{
"name": "CSS",
"bytes": "47680"
},
{
"name": "HTML",
"bytes": "5494"
},
{
"name": "Java",
"bytes": "404303"
},
{
"name": "JavaScript",
"bytes": "164519"
},
{
"name": "M4",
"bytes": "553"
},
{
"name": "Makefile",
"bytes": "78772"
},
{
"name": "Nginx",
"bytes": "1980"
},
{
"name": "PHP",
"bytes": "1900634"
},
{
"name": "Python",
"bytes": "2842443"
},
{
"name": "Shell",
"bytes": "232043"
},
{
"name": "Smarty",
"bytes": "15450"
}
],
"symlink_target": ""
}
|
"""
Neighborhood Component Analysis
"""
# Authors: William de Vazelhes <wdevazelhes@gmail.com>
# John Chiotellis <ioannis.chiotellis@in.tum.de>
# License: BSD 3 clause
from __future__ import print_function
from warnings import warn
import numpy as np
import sys
import time
import numbers
from scipy.optimize import minimize
from ..utils.extmath import softmax
from ..metrics import pairwise_distances
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import LabelEncoder
from ..decomposition import PCA
from ..utils.multiclass import check_classification_targets
from ..utils.random import check_random_state
from ..utils.validation import check_is_fitted, check_array, check_scalar
from ..utils.validation import _deprecate_positional_args
from ..exceptions import ConvergenceWarning
class NeighborhoodComponentsAnalysis(TransformerMixin, BaseEstimator):
"""Neighborhood Components Analysis
Neighborhood Component Analysis (NCA) is a machine learning algorithm for
metric learning. It learns a linear transformation in a supervised fashion
to improve the classification accuracy of a stochastic nearest neighbors
rule in the transformed space.
Read more in the :ref:`User Guide <nca>`.
Parameters
----------
n_components : int, default=None
Preferred dimensionality of the projected space.
If None it will be set to ``n_features``.
init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \
(n_features_a, n_features_b), default='auto'
Initialization of the linear transformation. Possible options are
'auto', 'pca', 'lda', 'identity', 'random', and a numpy array of shape
(n_features_a, n_features_b).
'auto'
Depending on ``n_components``, the most reasonable initialization
will be chosen. If ``n_components <= n_classes`` we use 'lda', as
it uses labels information. If not, but
``n_components < min(n_features, n_samples)``, we use 'pca', as
it projects data in meaningful directions (those of higher
variance). Otherwise, we just use 'identity'.
'pca'
``n_components`` principal components of the inputs passed
to :meth:`fit` will be used to initialize the transformation.
(See :class:`~sklearn.decomposition.PCA`)
'lda'
``min(n_components, n_classes)`` most discriminative
components of the inputs passed to :meth:`fit` will be used to
initialize the transformation. (If ``n_components > n_classes``,
the rest of the components will be zero.) (See
:class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)
'identity'
If ``n_components`` is strictly smaller than the
dimensionality of the inputs passed to :meth:`fit`, the identity
matrix will be truncated to the first ``n_components`` rows.
'random'
The initial transformation will be a random array of shape
`(n_components, n_features)`. Each value is sampled from the
standard normal distribution.
numpy array
n_features_b must match the dimensionality of the inputs passed to
:meth:`fit` and n_features_a must be less than or equal to that.
If ``n_components`` is not None, n_features_a must match it.
warm_start : bool, default=False
If True and :meth:`fit` has been called before, the solution of the
previous call to :meth:`fit` is used as the initial linear
transformation (``n_components`` and ``init`` will be ignored).
max_iter : int, default=50
Maximum number of iterations in the optimization.
tol : float, default=1e-5
Convergence tolerance for the optimization.
callback : callable, default=None
If not None, this function is called after every iteration of the
optimizer, taking as arguments the current solution (flattened
transformation matrix) and the number of iterations. This might be
useful in case one wants to examine or store the transformation
found after each iteration.
verbose : int, default=0
If 0, no progress messages will be printed.
If 1, progress messages will be printed to stdout.
If > 1, progress messages will be printed and the ``disp``
parameter of :func:`scipy.optimize.minimize` will be set to
``verbose - 2``.
random_state : int or numpy.RandomState, default=None
A pseudo random number generator object or a seed for it if int. If
``init='random'``, ``random_state`` is used to initialize the random
transformation. If ``init='pca'``, ``random_state`` is passed as an
argument to PCA when initializing the transformation. Pass an int
for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear transformation learned during fitting.
n_iter_ : int
Counts the number of iterations performed by the optimizer.
random_state_ : numpy.RandomState
Pseudo random number generator object used during initialization.
Examples
--------
>>> from sklearn.neighbors import NeighborhoodComponentsAnalysis
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... stratify=y, test_size=0.7, random_state=42)
>>> nca = NeighborhoodComponentsAnalysis(random_state=42)
>>> nca.fit(X_train, y_train)
NeighborhoodComponentsAnalysis(...)
>>> knn = KNeighborsClassifier(n_neighbors=3)
>>> knn.fit(X_train, y_train)
KNeighborsClassifier(...)
>>> print(knn.score(X_test, y_test))
0.933333...
>>> knn.fit(nca.transform(X_train), y_train)
KNeighborsClassifier(...)
>>> print(knn.score(nca.transform(X_test), y_test))
0.961904...
References
----------
.. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov.
"Neighbourhood Components Analysis". Advances in Neural Information
Processing Systems. 17, 513-520, 2005.
http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf
.. [2] Wikipedia entry on Neighborhood Components Analysis
https://en.wikipedia.org/wiki/Neighbourhood_components_analysis
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, init='auto', warm_start=False,
max_iter=50, tol=1e-5, callback=None, verbose=0,
random_state=None):
self.n_components = n_components
self.init = init
self.warm_start = warm_start
self.max_iter = max_iter
self.tol = tol
self.callback = callback
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
self : object
returns a trained NeighborhoodComponentsAnalysis model.
"""
# Verify inputs X and y and NCA parameters, and transform a copy if
# needed
X, y, init = self._validate_params(X, y)
# Initialize the random generator
self.random_state_ = check_random_state(self.random_state)
# Measure the total training time
t_train = time.time()
# Compute a mask that stays fixed during optimization:
same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
# (n_samples, n_samples)
# Initialize the transformation
transformation = self._initialize(X, y, init)
# Create a dictionary of parameters to be passed to the optimizer
disp = self.verbose - 2 if self.verbose > 1 else -1
optimizer_params = {'method': 'L-BFGS-B',
'fun': self._loss_grad_lbfgs,
'args': (X, same_class_mask, -1.0),
'jac': True,
'x0': transformation,
'tol': self.tol,
'options': dict(maxiter=self.max_iter, disp=disp),
'callback': self._callback
}
# Call the optimizer
self.n_iter_ = 0
opt_result = minimize(**optimizer_params)
# Reshape the solution found by the optimizer
self.components_ = opt_result.x.reshape(-1, X.shape[1])
# Stop timer
t_train = time.time() - t_train
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not opt_result.success:
warn('[{}] NCA did not converge: {}'.format(
cls_name, opt_result.message),
ConvergenceWarning)
print('[{}] Training took {:8.2f}s.'.format(cls_name, t_train))
return self
def transform(self, X):
"""Applies the learned transformation to the given data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data samples.
Returns
-------
X_embedded: ndarray of shape (n_samples, n_components)
The data samples transformed.
Raises
------
NotFittedError
If :meth:`fit` has not been called before.
"""
check_is_fitted(self)
X = check_array(X)
return np.dot(X, self.components_.T)
def _validate_params(self, X, y):
"""Validate parameters as soon as :meth:`fit` is called.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The validated training samples.
y : ndarray of shape (n_samples,)
The validated training labels, encoded to be integers in
the range(0, n_classes).
init : str or ndarray of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Raises
-------
TypeError
If a parameter is not an instance of the desired type.
ValueError
If a parameter's value violates its legal value range or if the
combination of two or more given parameters is incompatible.
"""
# Validate the inputs X and y, and converts y to numerical classes.
X, y = self._validate_data(X, y, ensure_min_samples=2)
check_classification_targets(y)
y = LabelEncoder().fit_transform(y)
# Check the preferred dimensionality of the projected space
if self.n_components is not None:
check_scalar(
self.n_components, 'n_components', numbers.Integral, min_val=1)
if self.n_components > X.shape[1]:
raise ValueError('The preferred dimensionality of the '
'projected space `n_components` ({}) cannot '
'be greater than the given data '
'dimensionality ({})!'
.format(self.n_components, X.shape[1]))
# If warm_start is enabled, check that the inputs are consistent
check_scalar(self.warm_start, 'warm_start', bool)
if self.warm_start and hasattr(self, 'components_'):
if self.components_.shape[1] != X.shape[1]:
raise ValueError('The new inputs dimensionality ({}) does not '
'match the input dimensionality of the '
'previously learned transformation ({}).'
.format(X.shape[1],
self.components_.shape[1]))
check_scalar(self.max_iter, 'max_iter', numbers.Integral, min_val=1)
check_scalar(self.tol, 'tol', numbers.Real, min_val=0.)
check_scalar(self.verbose, 'verbose', numbers.Integral, min_val=0)
if self.callback is not None:
if not callable(self.callback):
raise ValueError('`callback` is not callable.')
# Check how the linear transformation should be initialized
init = self.init
if isinstance(init, np.ndarray):
init = check_array(init)
# Assert that init.shape[1] = X.shape[1]
if init.shape[1] != X.shape[1]:
raise ValueError(
'The input dimensionality ({}) of the given '
'linear transformation `init` must match the '
'dimensionality of the given inputs `X` ({}).'
.format(init.shape[1], X.shape[1]))
# Assert that init.shape[0] <= init.shape[1]
if init.shape[0] > init.shape[1]:
raise ValueError(
'The output dimensionality ({}) of the given '
'linear transformation `init` cannot be '
'greater than its input dimensionality ({}).'
.format(init.shape[0], init.shape[1]))
if self.n_components is not None:
# Assert that self.n_components = init.shape[0]
if self.n_components != init.shape[0]:
raise ValueError('The preferred dimensionality of the '
'projected space `n_components` ({}) does'
' not match the output dimensionality of '
'the given linear transformation '
'`init` ({})!'
.format(self.n_components,
init.shape[0]))
elif init in ['auto', 'pca', 'lda', 'identity', 'random']:
pass
else:
raise ValueError(
"`init` must be 'auto', 'pca', 'lda', 'identity', 'random' "
"or a numpy array of shape (n_components, n_features).")
return X, y, init
def _initialize(self, X, y, init):
"""Initialize the transformation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The training labels.
init : str or ndarray of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Returns
-------
transformation : ndarray of shape (n_components, n_features)
The initialized linear transformation.
"""
transformation = init
if self.warm_start and hasattr(self, 'components_'):
transformation = self.components_
elif isinstance(init, np.ndarray):
pass
else:
n_samples, n_features = X.shape
n_components = self.n_components or n_features
if init == 'auto':
n_classes = len(np.unique(y))
if n_components <= min(n_features, n_classes - 1):
init = 'lda'
elif n_components < min(n_features, n_samples):
init = 'pca'
else:
init = 'identity'
if init == 'identity':
transformation = np.eye(n_components, X.shape[1])
elif init == 'random':
transformation = self.random_state_.randn(n_components,
X.shape[1])
elif init in {'pca', 'lda'}:
init_time = time.time()
if init == 'pca':
pca = PCA(n_components=n_components,
random_state=self.random_state_)
if self.verbose:
print('Finding principal components... ', end='')
sys.stdout.flush()
pca.fit(X)
transformation = pca.components_
elif init == 'lda':
from ..discriminant_analysis import (
LinearDiscriminantAnalysis)
lda = LinearDiscriminantAnalysis(n_components=n_components)
if self.verbose:
print('Finding most discriminative components... ',
end='')
sys.stdout.flush()
lda.fit(X, y)
transformation = lda.scalings_.T[:n_components]
if self.verbose:
print('done in {:5.2f}s'.format(time.time() - init_time))
return transformation
def _callback(self, transformation):
"""Called after each iteration of the optimizer.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The solution computed by the optimizer in this iteration.
"""
if self.callback is not None:
self.callback(transformation, self.n_iter_)
self.n_iter_ += 1
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0):
"""Compute the loss and the loss gradient w.r.t. ``transformation``.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The raveled linear transformation on which to compute loss and
evaluate gradient.
X : ndarray of shape (n_samples, n_features)
The training samples.
same_class_mask : ndarray of shape (n_samples, n_samples)
A mask where ``mask[i, j] == 1`` if ``X[i]`` and ``X[j]`` belong
to the same class, and ``0`` otherwise.
Returns
-------
loss : float
The loss computed for the given transformation.
gradient : ndarray of shape (n_components * n_features,)
The new (flattened) gradient of the loss.
"""
if self.n_iter_ == 0:
self.n_iter_ += 1
if self.verbose:
header_fields = ['Iteration', 'Objective Value', 'Time(s)']
header_fmt = '{:>10} {:>20} {:>10}'
header = header_fmt.format(*header_fields)
cls_name = self.__class__.__name__
print('[{}]'.format(cls_name))
print('[{}] {}\n[{}] {}'.format(cls_name, header,
cls_name, '-' * len(header)))
t_funcall = time.time()
transformation = transformation.reshape(-1, X.shape[1])
X_embedded = np.dot(X, transformation.T) # (n_samples, n_components)
# Compute softmax distances
p_ij = pairwise_distances(X_embedded, squared=True)
np.fill_diagonal(p_ij, np.inf)
p_ij = softmax(-p_ij) # (n_samples, n_samples)
# Compute loss
masked_p_ij = p_ij * same_class_mask
p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)
loss = np.sum(p)
# Compute gradient of loss w.r.t. `transform`
weighted_p_ij = masked_p_ij - p_ij * p
weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0))
gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X)
# time complexity of the gradient: O(n_components x n_samples x (
# n_samples + n_features))
if self.verbose:
t_funcall = time.time() - t_funcall
values_fmt = '[{}] {:>10} {:>20.6e} {:>10.2f}'
print(values_fmt.format(self.__class__.__name__, self.n_iter_,
loss, t_funcall))
sys.stdout.flush()
return sign * loss, sign * gradient.ravel()
def _more_tags(self):
return {'requires_y': True}
|
{
"content_hash": "b7da05502c7551c0623538e850615623",
"timestamp": "",
"source": "github",
"line_count": 526,
"max_line_length": 79,
"avg_line_length": 39.31558935361217,
"alnum_prop": 0.5636363636363636,
"repo_name": "ryfeus/lambda-packs",
"id": "8920b2d99ed02817aaf3505bf7e2ac0027962db8",
"size": "20696",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Sklearn_arm/source/sklearn/neighbors/_nca.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
"""Module for testing the show campus command.
Most of the location show commands are tested in their add/del
counterparts. However, we have chosen (so far) to not implement
those commands for campus.
"""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestShowCampus(TestBrokerCommand):
def testshowcampusall(self):
command = "show campus --all"
out = self.commandtest(command.split(" "))
# Just a sampling.
self.matchoutput(out, "Campus: ny", command)
self.matchoutput(out, "Fullname: New York", command)
self.matchoutput(out, "Campus: vi", command)
self.matchoutput(out, "Fullname: Virginia", command)
def testshowcampusvi(self):
command = "show campus --campus vi"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Campus: vi", command)
self.matchoutput(out, "Fullname: Virginia", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestShowCampus)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "f41c3ae0cfa944846078b107af6c467c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 71,
"avg_line_length": 30.394736842105264,
"alnum_prop": 0.6744588744588744,
"repo_name": "stdweird/aquilon",
"id": "4cb9daedbf5321cab6ae36c12e7296ee375181e3",
"size": "1878",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/broker/test_show_campus.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
from argparse import Namespace
from pathlib import Path
from uuid import uuid4
import pytest
from virtualenv.discovery.builtin import Builtin, get_interpreter
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import fs_supports_symlink
@pytest.mark.skipif(not fs_supports_symlink(), reason="symlink not supported")
@pytest.mark.parametrize("case", ["mixed", "lower", "upper"])
def test_discovery_via_path(monkeypatch, case, tmp_path, caplog, session_app_data):
caplog.set_level(logging.DEBUG)
current = PythonInfo.current_system(session_app_data)
core = f"somethingVeryCryptic{'.'.join(str(i) for i in current.version_info[0:3])}"
name = "somethingVeryCryptic"
if case == "lower":
name = name.lower()
elif case == "upper":
name = name.upper()
exe_name = f"{name}{current.version_info.major}{'.exe' if sys.platform == 'win32' else ''}"
target = tmp_path / current.install_path("scripts")
target.mkdir(parents=True)
executable = target / exe_name
os.symlink(sys.executable, str(executable))
pyvenv_cfg = Path(sys.executable).parents[1] / "pyvenv.cfg"
if pyvenv_cfg.exists():
(target / pyvenv_cfg.name).write_bytes(pyvenv_cfg.read_bytes())
new_path = os.pathsep.join([str(target)] + os.environ.get("PATH", "").split(os.pathsep))
monkeypatch.setenv("PATH", new_path)
interpreter = get_interpreter(core, [])
assert interpreter is not None
def test_discovery_via_path_not_found(tmp_path, monkeypatch):
monkeypatch.setenv("PATH", str(tmp_path))
interpreter = get_interpreter(uuid4().hex, [])
assert interpreter is None
def test_relative_path(session_app_data, monkeypatch):
sys_executable = Path(PythonInfo.current_system(app_data=session_app_data).system_executable)
cwd = sys_executable.parents[1]
monkeypatch.chdir(str(cwd))
relative = str(sys_executable.relative_to(cwd))
result = get_interpreter(relative, [], session_app_data)
assert result is not None
def test_discovery_fallback_fail(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(
Namespace(app_data=session_app_data, try_first_with=[], python=["magic-one", "magic-two"], env=os.environ)
)
result = builtin.run()
assert result is None
assert "accepted" not in caplog.text
def test_discovery_fallback_ok(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(
Namespace(app_data=session_app_data, try_first_with=[], python=["magic-one", sys.executable], env=os.environ)
)
result = builtin.run()
assert result is not None, caplog.text
assert result.executable == sys.executable, caplog.text
assert "accepted" in caplog.text
|
{
"content_hash": "f918d84be9f01481d24e18b78794ad2d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 117,
"avg_line_length": 35.87179487179487,
"alnum_prop": 0.7015725518227305,
"repo_name": "pypa/virtualenv",
"id": "458bee689fc0cd3c61e83601235c572661ef60b6",
"size": "2798",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/unit/discovery/test_discovery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1512"
},
{
"name": "C",
"bytes": "1135"
},
{
"name": "Nu",
"bytes": "3265"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "459846"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
import argparse
import urllib2
import pylid
from pylid.langs import bg, cs, da, de, el, en,\
es, et, fi, fr, hu, it, lv, nl, pl, pt, ro, sk, sl
from bs4 import BeautifulSoup as BS
__author__ = 'David R. Mortensen'
__email__ = 'davidmortensen@gmail.com'
def identify_lang(text):
"""Given text, return a tuple consisting of the language of the text
and the distance in n-dimensional space between the known model and
the the guessed model.
"""
lid = pylid.PyLID(3)
lid.add_ngrams_in_text(text)
return lid.closest_language_euclidean([
bg,
cs,
da,
de,
el,
en,
es,
et,
fi,
fr,
hu,
it,
lv,
nl,
pl,
pt,
ro,
sk,
sl,
])
def fetch_text(url):
try:
html = urllib2.urlopen(url)
except urllib2.URLError:
print('Incorrect URL provided.', file=sys.stdout)
exit(1)
soup = BS(html)
return soup.get_text()
def parse_arguments():
parser = argparse.ArgumentParser(
description='Identify the language of the page at the specified URL.')
parser.add_argument('url', help='URL of webpage to be classified.')
return parser.parse_args()
def main():
args = parse_arguments()
text = fetch_text(args.url)
lang = identify_lang(text)
print(lang)
if __name__ == '__main__':
main()
|
{
"content_hash": "929fff4813bc8041981b79f4e252f50f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 20.760563380281692,
"alnum_prop": 0.5800542740841248,
"repo_name": "dmort27/pylid",
"id": "b8d3631a3ab508da65f9d73bcf97850c3e408cfc",
"size": "1521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/classify_webpage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6976996"
}
],
"symlink_target": ""
}
|
'''Download, merge and summarize known logs for Certificate Transparency (CT).
Print output to stdout, warnings and errors to stderr.
The source of information is:
https://www.gstatic.com/ct/log_list/v2/all_logs_list.json
from page https://www.certificate-transparency.org/known-logs
'''
import argparse
import datetime
import json
import logging
from utlz import first_paragraph, red
from ctutlz.ctlog import download_log_list
from ctutlz.ctlog import set_operator_names, print_schema
from ctutlz.ctlog import URL_ALL_LOGS, Logs
from ctutlz.utils.logger import VERBOSE, init_logger, setup_logging, logger
from ctutlz._version import __version__
def create_parser():
parser = argparse.ArgumentParser(description=first_paragraph(__doc__))
parser.epilog = __doc__.split('\n', 1)[-1]
parser.add_argument('-v', '--version',
action='version',
default=False,
version=__version__,
help='print version number')
me1 = parser.add_mutually_exclusive_group()
me1.add_argument('--short',
dest='loglevel',
action='store_const',
const=logging.INFO,
default=VERBOSE, # default loglevel if nothing set
help='show short results')
me1.add_argument('--debug',
dest='loglevel',
action='store_const',
const=logging.DEBUG,
help='show more for diagnostic purposes')
me2 = parser.add_mutually_exclusive_group()
me2.add_argument('--json',
action='store_true',
dest='print_json',
help='print merged log lists as json')
me2.add_argument('--schema',
action='store_true',
dest='print_schema',
help='print json schema')
return parser
def warn_inconsistency(url, val_a, val_b):
# suppress warning doubles (i know it's hacky)
key = url + ''.join(sorted('%s%s' % (val_a, val_b)))
if not hasattr(warn_inconsistency, 'seen'):
warn_inconsistency.seen = {}
if not warn_inconsistency.seen.get(key, False):
warn_inconsistency.seen[key] = True
else:
return
logger.warning(red('inconsistent data for log %s: %s != %s' % (url, val_a, val_b)))
def data_structure_from_log(log):
log_data = dict(log._asdict())
log_data['id_b64'] = log.id_b64
log_data['pubkey'] = log.pubkey
log_data['scts_accepted_by_chrome'] = \
log.scts_accepted_by_chrome
return log_data
def list_from_lists(log_lists):
log_list = []
for item_dict in log_lists:
for log in item_dict['logs']:
log_data = data_structure_from_log(log)
log_list.append(log_data)
return log_list
def show_log(log, order=3):
logger.verbose('#' * order + ' %s\n' % log.url)
logdict = log._asdict()
for key, value in logdict.items():
if key == 'id_b64_non_calculated' and value == log.id_b64:
value = None # don't log this value
if key == 'operated_by':
value = ', '.join(value)
# avoid markdown syntax interpretation and improve readablity
key = key.replace('_', ' ')
if value is not None:
logger.verbose('* __%s__: `%s`' % (key, value))
logger.verbose('* __scts accepted by chrome__: '
'%s' % log.scts_accepted_by_chrome)
if log.key is not None:
logger.verbose('* __id b64__: `%s`' % log.log_id)
logger.verbose('* __pubkey__:\n```\n%s\n```' % log.pubkey)
logger.verbose('')
def show_logs(logs, heading, order=2):
if len(logs) <= 0:
return
logger.info('#' * order + '%s\n' % ' ' + heading if heading else '')
s_or_not = 's'
if len(logs) == 1:
s_or_not = ''
# show log size
logger.info('%i log%s\n' % (len(logs), s_or_not))
# list log urls
for log in logs:
if logger.level < logging.INFO:
anchor = log.url.replace('/', '')
logger.verbose('* [%s](#%s)' % (log.url, anchor))
else:
logger.info('* %s' % log.url)
logger.info('')
for log in logs:
show_log(log)
logger.info('End of list')
def ctloglist(print_json=None):
'''Gather ct-log lists and print the merged log list.
Args:
print_json(boolean): If True, print merged log list as json data.
Else print as markdown.
'''
if not print_json:
today = datetime.date.today()
now = datetime.datetime.now()
logger.info('# Known Certificate Transparency (CT) Logs\n')
logger.verbose('Created with [ctloglist]'
'(https://github.com/theno/ctutlz#ctloglist)\n')
logger.verbose('* [all_logs_list.json]('
'https://www.gstatic.com/ct/log_list/v2/all_logs_list.json)'
'\n')
logger.info('Version (Date): %s\n' % today)
logger.verbose('Datetime: %s\n' % now)
logger.info('') # formatting: insert empty line
# all_logs_list.json
all_dict = download_log_list(URL_ALL_LOGS)
orig_all_dict = dict(all_dict)
set_operator_names(all_dict)
all_logs = Logs([all_dict])
if print_json:
json_str = json.dumps(orig_all_dict, indent=4, sort_keys=True)
print(json_str)
else:
show_logs(all_logs, '')
def main():
init_logger()
parser = create_parser()
args = parser.parse_args()
setup_logging(args.loglevel)
logger.debug(args)
if args.print_schema:
print_schema()
else:
ctloglist(args.print_json)
if __name__ == '__main__':
main()
|
{
"content_hash": "8432841c8f1833aae35999944c570fcf",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 87,
"avg_line_length": 29.943298969072163,
"alnum_prop": 0.5630917541745567,
"repo_name": "theno/ctutlz",
"id": "ed6afd6881c471e14e8fc5cbcd70921a6f121ab8",
"size": "5809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctutlz/scripts/ctloglist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "700"
},
{
"name": "HTML",
"bytes": "146779"
},
{
"name": "Python",
"bytes": "109106"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ManageSnippetApp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "d9437b068d726abdbe59c3df41ac1c78",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.625,
"repo_name": "mitalimn/PythonDjangoApp",
"id": "bc046cf120f1b620102f4055806b7010dbd27b4a",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ManageSnippetApp/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1319"
},
{
"name": "Python",
"bytes": "8170"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ["DJANGO_SETTINGS_MODULE"] = "lino_book.projects.diamond2.settings"
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "e8a883dfb14ce428c30fb6261e7143ef",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 81,
"avg_line_length": 29.625,
"alnum_prop": 0.7130801687763713,
"repo_name": "khchine5/book",
"id": "214eddbb26e199f60b7e0eeaaa8ce9ffa90d299d",
"size": "259",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lino_book/projects/diamond2/manage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "Python",
"bytes": "486198"
},
{
"name": "Shell",
"bytes": "702"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_user_only_external_receipts'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=254),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', blank=True, verbose_name='groups', to='auth.Group', related_query_name='user', related_name='user_set'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, blank=True, verbose_name='last login'),
),
]
|
{
"content_hash": "87ec50145db9271b50f536b6011646aa",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 256,
"avg_line_length": 33.82142857142857,
"alnum_prop": 0.6061246040126715,
"repo_name": "mayapurmedia/tovp",
"id": "6f49bca28a4a6541d35ab405ba2be95a22e753c9",
"size": "971",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tovp/users/migrations/0004_auto_20151205_0549.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190169"
},
{
"name": "HTML",
"bytes": "281143"
},
{
"name": "JavaScript",
"bytes": "2888"
},
{
"name": "Python",
"bytes": "504316"
}
],
"symlink_target": ""
}
|
"""host_registry package.
Host registry is used to store all host tests (by id) which can be called from the test
framework.
"""
from .host_registry import HostRegistry
|
{
"content_hash": "594c058171c8f12b4f3dad8e6fb389ef",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 87,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.7602339181286549,
"repo_name": "ARMmbed/greentea",
"id": "4abdd7d7b82f4e4d851f7a6083909a55d4a1cac1",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/htrun/host_tests_registry/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "226269"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
class BidirectionalRNN:
def __init__(self, name, rnn_size, data_type=tf.float32):
self.data_type = data_type
self.name = name
with tf.variable_scope(self.name):
self.forward_rnn = rnn.MultiRNNCell([rnn.GRUCell(rnn_size[i]) for i in range(len(rnn_size))])
self.backward_rnn = rnn.MultiRNNCell([rnn.GRUCell(rnn_size[i]) for i in range(len(rnn_size))])
def run(self, data, reuse=False, time_major=False, pooling=False):
time_axis = 0 if time_major else 1
with tf.variable_scope(self.name):
with tf.variable_scope("ForwardRNN", reuse=reuse) as scope:
forward_output, state = tf.nn.dynamic_rnn(self.forward_rnn, data, dtype=self.data_type, time_major=time_major, scope=scope)
if pooling == 'mean':
forward_output = tf.reduce_mean(forward_output, time_axis)
else:
forward_output = forward_output[-1, :, :] if time_major else forward_output[:, -1, :]
with tf.variable_scope("BackwardRNN", reuse=reuse) as scope:
data = tf.reverse(data, axis=[time_axis])
backward_output, state = tf.nn.dynamic_rnn(self.backward_rnn, data, dtype=self.data_type, time_major=time_major, scope=scope)
if pooling == 'mean':
backward_output = tf.reduce_mean(backward_output, time_axis)
else:
backward_output = backward_output[-1, :, :] if time_major else backward_output[:, -1, :]
tf.summary.histogram('forward_rnn_output', forward_output)
tf.summary.histogram('backward_rnn_output', backward_output)
return (forward_output + backward_output) / 2
|
{
"content_hash": "3140fcaca2b668f9cff192d4cb366afe",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 141,
"avg_line_length": 50.31578947368421,
"alnum_prop": 0.6161087866108786,
"repo_name": "ruiann/SignatureVerification",
"id": "22e8a2903c2f9ec7e5d5bcd8bd2f64c078a0735c",
"size": "1912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BidirectionalRNN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25325"
}
],
"symlink_target": ""
}
|
import time
from functools import wraps
def print_timing(func):
@wraps(func)
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
print '%s took %0.5g s' % (func.func_name, (t2-t1))
return res
return wrapper
|
{
"content_hash": "7c31d365286ee8ca0d308fae01a36853",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 22.846153846153847,
"alnum_prop": 0.5656565656565656,
"repo_name": "ufoym/agpy",
"id": "36b77fb9e9ffc6097cfbef390a5c0749018d3db2",
"size": "297",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "agpy/timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11150"
},
{
"name": "Common Lisp",
"bytes": "6644"
},
{
"name": "Python",
"bytes": "881794"
},
{
"name": "Shell",
"bytes": "11548"
},
{
"name": "TeX",
"bytes": "1462"
},
{
"name": "VimL",
"bytes": "1189"
}
],
"symlink_target": ""
}
|
"""Nova common internal object model"""
import contextlib
import datetime
import functools
import traceback
import netaddr
import oslo_messaging as messaging
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
from oslo_versionedobjects import exception as ovoo_exc
import six
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
from nova import utils
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
# FIXME(danms): This is just until we use o.vo's class properties
# and object base.
return '_obj_' + name
class NovaObjectRegistry(ovoo_base.VersionedObjectRegistry):
notification_classes = []
def registration_hook(self, cls, index):
# NOTE(danms): This is called when an object is registered,
# and is responsible for maintaining nova.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
@classmethod
def register_notification(cls, notification_cls):
"""Register a class as notification.
Use only to register concrete notification or payload classes,
do not register base classes intended for inheritance only.
"""
cls.register_if(False)(notification_cls)
cls.notification_classes.append(notification_cls)
return notification_cls
@classmethod
def register_notification_objects(cls):
"""Register previously decorated notification as normal ovos.
This is not intended for production use but only for testing and
document generation purposes.
"""
for notification_cls in cls.notification_classes:
cls.register(notification_cls)
remotable_classmethod = ovoo_base.remotable_classmethod
remotable = ovoo_base.remotable
class NovaObject(ovoo_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'nova_object'
OBJ_PROJECT_NAMESPACE = 'nova'
# NOTE(ndipanov): This is nova-specific
@staticmethod
def should_migrate_data():
"""A check that can be used to inhibit online migration behavior
This is usually used to check if all services that will be accessing
the db directly are ready for the new format.
"""
raise NotImplementedError()
# @staticmethod
# def session(Session=None):
# if not Session:
# return Session(bind=engine)
# return Session()
#
# NOTE(danms): This has some minor change between the nova and o.vo
# version, so avoid inheriting it for the moment so we can make that
# transition separately for clarity.
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
.. note::
- This is NOT "revert to previous values"
- Specifying fields on recursive resets will only be honored at the
top level. Everything below the top will reset all.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
# NOTE(danms): This is nova-specific
@contextlib.contextmanager
def obj_alternate_context(self, context):
original_context = self._context
self._context = context
try:
yield
finally:
self._context = original_context
# NOTE(danms): This is nova-specific
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
class NovaObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
def __iter__(self):
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name
def keys(self):
return list(self)
class NovaTimestampObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class NovaPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
'deleted_at': obj_fields.DateTimeField(nullable=True),
'deleted': obj_fields.BooleanField(default=False),
}
class ObjectListBase(ovoo_base.ObjectListBase):
# NOTE(danms): These are for transition to using the oslo
# base object and can be removed when we move to it.
@classmethod
def _obj_primitive_key(cls, field):
return 'nova_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
class NovaObjectSerializer(messaging.NoOpSerializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
@property
def conductor(self):
if not hasattr(self, '_conductor'):
from nova import conductor
self._conductor = conductor.API()
return self._conductor
def _process_object(self, context, objprim):
try:
objinst = NovaObject.obj_from_primitive(objprim, context=context)
except ovoo_exc.IncompatibleObjectVersion:
objver = objprim['nova_object.version']
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim['nova_object.version'] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
objname = objprim['nova_object.name']
version_manifest = ovoo_base.obj_tree_get_versions(objname)
if objname in version_manifest:
objinst = self.conductor.object_backport_versions(
context, objprim, version_manifest)
else:
raise
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms, gibi) A set can't have an unhashable value inside,
# such as a dict. Convert the set to list, which is fine, since we
# can't send them over RPC anyway. We convert it to list as this
# way there will be no semantic change between the fake rpc driver
# used in functional test and a normal rpc driver.
if iterable == set:
iterable = list
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_dict_of_lists(context, list_cls, obj_list, item_key):
"""Construct a dictionary of object lists, keyed by item_key.
:param:context: Request context
:param:list_cls: The ObjectListBase class
:param:obj_list: The list of objects to place in the dictionary
:param:item_key: The object attribute name to use as a dictionary key
"""
obj_lists = {}
for obj in obj_list:
key = getattr(obj, item_key)
if key not in obj_lists:
obj_lists[key] = list_cls()
obj_lists[key].objects = []
obj_lists[key].objects.append(obj)
for key in obj_lists:
obj_lists[key]._context = context
obj_lists[key].obj_reset_changes()
return obj_lists
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The NovaObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def serialize_args(fn):
"""Decorator that will do the arguments serialization before remoting."""
def wrapper(obj, *args, **kwargs):
args = [utils.strtime(arg) if isinstance(arg, datetime.datetime)
else arg for arg in args]
for k, v in six.iteritems(kwargs):
if k == 'exc_val' and v:
kwargs[k] = six.text_type(v)
elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
kwargs[k] = ''.join(traceback.format_tb(v))
elif isinstance(v, datetime.datetime):
kwargs[k] = utils.strtime(v)
if hasattr(fn, '__call__'):
return fn(obj, *args, **kwargs)
# NOTE(danms): We wrap a descriptor, so use that protocol
return fn.__get__(None, obj)(*args, **kwargs)
# NOTE(danms): Make this discoverable
wrapper.remotable = getattr(fn, 'remotable', False)
wrapper.original_fn = fn
return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
else classmethod(wrapper))
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
def _strip(prim, keys):
if isinstance(prim, dict):
for k in keys:
prim.pop(k, None)
for v in prim.values():
_strip(v, keys)
if isinstance(prim, list):
for v in prim:
_strip(v, keys)
return prim
if ignore is not None:
keys = ['nova_object.changes'] + ignore
else:
keys = ['nova_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
|
{
"content_hash": "4e82acd9601680c5514ac808a4d9f79a",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 79,
"avg_line_length": 36.932714617169374,
"alnum_prop": 0.6228797587636637,
"repo_name": "xuweiliang/Codelibrary",
"id": "7ac9510e914dea9b23720cedd925255cd795b156",
"size": "16523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/objects/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
}
|
from urllib.parse import quote
from dulwich.porcelain import open_repo
import os.path
import csv
import datetime
onaji = None
class LoggerPlugin(object):
def __init__(self, serializer=None):
self._path = os.getenv('REPO_HOME', '.')
self._home = os.path.join(self._path, ".onaji")
self._null = False
if not os.path.exists(self._home):
os.mkdir(self._home)
try:
repo = open_repo(self._path)
branch = repo.refs.read_ref('HEAD').decode('utf-8').rsplit('/', 1)[-1]
self._commit = open_repo(self._path).head()
self._file = open(os.path.join(self._home, branch + '.' + self._commit.decode('utf-8') + ".csv"), 'w')
self._writer = csv.writer(self._file)
self._testname = None
if serializer:
self._serializer = serializer
else:
self._serializer = lambda x: quote(repr(x))
except:
sys.stderr.write("No repository found or error creating output file. Going into null logging mode.\n")
self._null = True
def pytest_runtest_setup(self, item):
self._testname = item.name
def pytest_unconfigure(self, config):
if not self._null:
self._file.close()
def log(self, key, *values):
"""
Log an item
Args:
key:
values:
"""
if not self._null:
for v in values:
self._writer.writerow([self._testname, quote(key), self._serializer(v)])
def pytest_configure(config):
global onaji
onaji = LoggerPlugin()
config.pluginmanager.register(onaji)
|
{
"content_hash": "8866a1f364a41322b866a09b3d0d0ae4",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 114,
"avg_line_length": 29.322033898305083,
"alnum_prop": 0.5416184971098266,
"repo_name": "jheard-tw/onaji",
"id": "2a7a3d27834e190ec1dac0003a2b3690f8dc80e9",
"size": "1730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onaji/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5234"
}
],
"symlink_target": ""
}
|
import time
import logging
import weakref
from botocore import xform_name
from botocore.exceptions import BotoCoreError, HTTPClientError, ConnectionError
from botocore.model import OperationNotFoundError
from botocore.utils import CachedProperty
logger = logging.getLogger(__name__)
class EndpointDiscoveryException(BotoCoreError):
pass
class EndpointDiscoveryRequired(EndpointDiscoveryException):
""" Endpoint Discovery is disabled but is required for this operation. """
fmt = 'Endpoint Discovery is not enabled but this operation requires it.'
class EndpointDiscoveryRefreshFailed(EndpointDiscoveryException):
""" Endpoint Discovery failed to the refresh the known endpoints. """
fmt = 'Endpoint Discovery failed to refresh the required endpoints.'
def block_endpoint_discovery_required_operations(model, **kwargs):
endpoint_discovery = model.endpoint_discovery
if endpoint_discovery and endpoint_discovery.get('required'):
raise EndpointDiscoveryRequired()
class EndpointDiscoveryModel(object):
def __init__(self, service_model):
self._service_model = service_model
@CachedProperty
def discovery_operation_name(self):
discovery_operation = self._service_model.endpoint_discovery_operation
return xform_name(discovery_operation.name)
@CachedProperty
def discovery_operation_keys(self):
discovery_operation = self._service_model.endpoint_discovery_operation
keys = []
if discovery_operation.input_shape:
keys = list(discovery_operation.input_shape.members.keys())
return keys
def discovery_required_for(self, operation_name):
try:
operation_model = self._service_model.operation_model(operation_name)
return operation_model.endpoint_discovery.get('required', False)
except OperationNotFoundError:
return False
def discovery_operation_kwargs(self, **kwargs):
input_keys = self.discovery_operation_keys
# Operation and Identifiers are only sent if there are Identifiers
if not kwargs.get('Identifiers'):
kwargs.pop('Operation', None)
kwargs.pop('Identifiers', None)
return dict((k, v) for k, v in kwargs.items() if k in input_keys)
def gather_identifiers(self, operation, params):
return self._gather_ids(operation.input_shape, params)
def _gather_ids(self, shape, params, ids=None):
# Traverse the input shape and corresponding parameters, gathering
# any input fields labeled as an endpoint discovery id
if ids is None:
ids = {}
for member_name, member_shape in shape.members.items():
if member_shape.metadata.get('endpointdiscoveryid'):
ids[member_name] = params[member_name]
elif member_shape.type_name == 'structure' and member_name in params:
self._gather_ids(member_shape, params[member_name], ids)
return ids
class EndpointDiscoveryManager(object):
def __init__(self, client, cache=None, current_time=None, always_discover=True):
if cache is None:
cache = {}
self._cache = cache
self._failed_attempts = {}
if current_time is None:
current_time = time.time
self._time = current_time
self._always_discover = always_discover
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
self._model = EndpointDiscoveryModel(client.meta.service_model)
def _parse_endpoints(self, response):
endpoints = response['Endpoints']
current_time = self._time()
for endpoint in endpoints:
cache_time = endpoint.get('CachePeriodInMinutes')
endpoint['Expiration'] = current_time + cache_time * 60
return endpoints
def _cache_item(self, value):
if isinstance(value, dict):
return tuple(sorted(value.items()))
else:
return value
def _create_cache_key(self, **kwargs):
kwargs = self._model.discovery_operation_kwargs(**kwargs)
return tuple(self._cache_item(v) for k, v in sorted(kwargs.items()))
def gather_identifiers(self, operation, params):
return self._model.gather_identifiers(operation, params)
def delete_endpoints(self, **kwargs):
cache_key = self._create_cache_key(**kwargs)
if cache_key in self._cache:
del self._cache[cache_key]
def _describe_endpoints(self, **kwargs):
# This is effectively a proxy to whatever name/kwargs the service
# supports for endpoint discovery.
kwargs = self._model.discovery_operation_kwargs(**kwargs)
operation_name = self._model.discovery_operation_name
discovery_operation = getattr(self._client, operation_name)
logger.debug('Discovering endpoints with kwargs: %s', kwargs)
return discovery_operation(**kwargs)
def _get_current_endpoints(self, key):
if key not in self._cache:
return None
now = self._time()
return [e for e in self._cache[key] if now < e['Expiration']]
def _refresh_current_endpoints(self, **kwargs):
cache_key = self._create_cache_key(**kwargs)
try:
response = self._describe_endpoints(**kwargs)
endpoints = self._parse_endpoints(response)
self._cache[cache_key] = endpoints
self._failed_attempts.pop(cache_key, None)
return endpoints
except (ConnectionError, HTTPClientError):
self._failed_attempts[cache_key] = self._time() + 60
return None
def _recently_failed(self, cache_key):
if cache_key in self._failed_attempts:
now = self._time()
if now < self._failed_attempts[cache_key]:
return True
del self._failed_attempts[cache_key]
return False
def _select_endpoint(self, endpoints):
return endpoints[0]['Address']
def describe_endpoint(self, **kwargs):
operation = kwargs['Operation']
discovery_required = self._model.discovery_required_for(operation)
if not self._always_discover and not discovery_required:
# Discovery set to only run on required operations
logger.debug(
'Optional discovery disabled. Skipping discovery for Operation: %s'
% operation
)
return None
# Get the endpoint for the provided operation and identifiers
cache_key = self._create_cache_key(**kwargs)
endpoints = self._get_current_endpoints(cache_key)
if endpoints:
return self._select_endpoint(endpoints)
# All known endpoints are stale
recently_failed = self._recently_failed(cache_key)
if not recently_failed:
# We haven't failed to discover recently, go ahead and refresh
endpoints = self._refresh_current_endpoints(**kwargs)
if endpoints:
return self._select_endpoint(endpoints)
# Discovery has failed recently, do our best to get an endpoint
logger.debug('Endpoint Discovery has failed for: %s', kwargs)
stale_entries = self._cache.get(cache_key, None)
if stale_entries:
# We have stale entries, use those while discovery is failing
return self._select_endpoint(stale_entries)
if discovery_required:
# It looks strange to be checking recently_failed again but,
# this informs us as to whether or not we tried to refresh earlier
if recently_failed:
# Discovery is required and we haven't already refreshed
endpoints = self._refresh_current_endpoints(**kwargs)
if endpoints:
return self._select_endpoint(endpoints)
# No endpoints even refresh, raise hard error
raise EndpointDiscoveryRefreshFailed()
# Discovery is optional, just use the default endpoint for now
return None
class EndpointDiscoveryHandler(object):
def __init__(self, manager):
self._manager = manager
def register(self, events, service_id):
events.register(
'before-parameter-build.%s' % service_id, self.gather_identifiers
)
events.register_first(
'request-created.%s' % service_id, self.discover_endpoint
)
events.register('needs-retry.%s' % service_id, self.handle_retries)
def gather_identifiers(self, params, model, context, **kwargs):
endpoint_discovery = model.endpoint_discovery
# Only continue if the operation supports endpoint discovery
if endpoint_discovery is None:
return
ids = self._manager.gather_identifiers(model, params)
context['discovery'] = {'identifiers': ids}
def discover_endpoint(self, request, operation_name, **kwargs):
ids = request.context.get('discovery', {}).get('identifiers')
if ids is None:
return
endpoint = self._manager.describe_endpoint(
Operation=operation_name, Identifiers=ids
)
if endpoint is None:
logger.debug('Failed to discover and inject endpoint')
return
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
logger.debug('Injecting discovered endpoint: %s', endpoint)
request.url = endpoint
def handle_retries(self, request_dict, response, operation, **kwargs):
if response is None:
return None
_, response = response
status = response.get('ResponseMetadata', {}).get('HTTPStatusCode')
error_code = response.get('Error', {}).get('Code')
if status != 421 and error_code != 'InvalidEndpointException':
return None
context = request_dict.get('context', {})
ids = context.get('discovery', {}).get('identifiers')
if ids is None:
return None
# Delete the cached endpoints, forcing a refresh on retry
# TODO: Improve eviction behavior to only evict the bad endpoint if
# there are multiple. This will almost certainly require a lock.
self._manager.delete_endpoints(
Operation=operation.name, Identifiers=ids
)
return 0
|
{
"content_hash": "14b16534521c91cb8c393ff8c8d83e5a",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 84,
"avg_line_length": 39.961832061068705,
"alnum_prop": 0.6410697230181471,
"repo_name": "pplu/botocore",
"id": "68ee6bea684d00a0ad8ce3b74780bc9c16120cf4",
"size": "11031",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "botocore/discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "23824"
},
{
"name": "Python",
"bytes": "2691062"
}
],
"symlink_target": ""
}
|
import time
import info.v1.metric as v1
class Config(object):
def __init__(self, endpoint=None, MetricsConfig=None):
self.Endpoint = endpoint
self.MetricsConfig = MetricsConfig
class MetricConfig(object):
def __init__(self, Name = None, MetricType = None,
Units=None, DataType = None,
PollingFrequency=None, Regex=None):
self.Name = Name
self.MetricType = MetricType
self.Units = Units
self.DataType = DataType
self.PollingFrequency = PollingFrequency
self.Regex = Regex
class Prometheus(object):
def __init__(self,Endpoint=None,
PollingFrequency=None,
MetricsConfig=None):
self.Endpoint = Endpoint
self.PollingFrequency = PollingFrequency
self.MetricsConfig = MetricsConfig
|
{
"content_hash": "8b3fbc5b14c56cd6ebeecccb04ca261f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 58,
"avg_line_length": 29.275862068965516,
"alnum_prop": 0.6266195524146054,
"repo_name": "knightXun/BabyCare",
"id": "197b40ce435117e21ca596a986fab3ac55f43019",
"size": "849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collector/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125863"
}
],
"symlink_target": ""
}
|
"""
Simulates the overhead of a system with 10000 actors that do nothing
Each processing 1000 messages and then shutting down.
Run with command:
python -m cProfile -s time profile.py
"""
AMOUNT_PROCESSORS = 10000
AMOUNT_MESSAGES = 1000
import unittest
from mock import create_autospec, Mock
from jobber.constants import (ACTOR_PROCESSOR_COMPLETED, ACTOR_SCHEDULER_RUNNING,
ACTOR_SCHEDULER_STOPPED, ACTOR_SCHEDULER_STOPPING)
from jobber.core.scheduler.shortest_job_next_scheduler import SJNScheduler
from jobber.core.actor.processor import ActorProcessor
from jobber.core.scheduler.actor_heap import ShortestJobNextHeap
from jobber.core.actor.actor import Actor
from jobber.core.messages.poison_pill import PoisonPill
from jobber.core.exceptions.no_messages_exception import NoMessagesException
class MockMessage(object):
pass
def stresstest():
scheduler = SJNScheduler()
mock_actor = create_autospec(Actor())
processors = [ActorProcessor(mock_actor) for _ in range(AMOUNT_PROCESSORS)]
for processor in processors:
for _ in range(AMOUNT_MESSAGES):
processor._receive_message(MockMessage())
for processor in processors:
scheduler.schedule(processor)
scheduler._state = ACTOR_SCHEDULER_RUNNING
scheduler.shutdown()
scheduler._state == ACTOR_SCHEDULER_STOPPED
scheduler.start()
if __name__=='__main__':
stresstest()
|
{
"content_hash": "069aaf1152c04093ad5ecbb4c15e48ec",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 81,
"avg_line_length": 29.872340425531913,
"alnum_prop": 0.7571225071225072,
"repo_name": "thomasquintana/jobber",
"id": "3397dbde6723ab45da1a0fb93b115bf2b45f4943",
"size": "2236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "87216"
}
],
"symlink_target": ""
}
|
import requests
import json
from calculate_credit_score import *
def get_account_number(customer):
account_url="http://api.reimaginebanking.com/customers/{}/accounts?key=dcd6272d8dab8b826b5c1376ac90af1b".format(customer["_id"])
response=requests.get(account_url)
response=json.loads(response.text)
response=response[0]
return response["_id"]
def get_account_balance(customer):
account_url="http://api.reimaginebanking.com/customers/{}/accounts?key=dcd6272d8dab8b826b5c1376ac90af1b".format(customer["_id"])
response=requests.get(account_url)
response=json.loads(response.text)
response=response[0]
return response["balance"]
class Borrower(object):
def __init__(self, name, borrow_amount, borrow_interest_rate, customer_id):
self.name=name
self.borrow_amount=borrow_amount
self.borrow_interest_rate=borrow_interest_rate
customer_url="http://api.reimaginebanking.com/customers/{}?key=dcd6272d8dab8b826b5c1376ac90af1b".format(customer_id)
customer=requests.get(customer_url)
customer=json.loads(customer.text)
self.customer=customer #capital one dictionary for get customers
if name=="Sasha":
self.credit_score=224
if name=="Elle":
self.credit_score=160
if name=="Rihanna":
self.credit_score=289
if name=="Adam":
self.credit_score=getScore()[0]
if name=="Billy":
self.credit_score=getScore()[1]
if name=="Carlisle":
self.credit_score=getScore()[2]
self.account_id=get_account_number(customer)
self.account_balance=get_account_balance(customer)
#hardcoded for now to make a borrower named adam, who is already stored in captial 1 database
def make_borrower(borrow_amount, borrow_interest_rate, name):
customers_url="http://api.reimaginebanking.com/customers?key=dcd6272d8dab8b826b5c1376ac90af1b"
customers=requests.get(customers_url)
customers=json.loads(customers.text)
customer_id=None
for customer in customers:
if customer["first_name"]==name:
customer_id=customer["_id"]
# if customer_id==None:
#do something
Customer=Borrower(name, borrow_amount, borrow_interest_rate, customer_id)
return Customer
def main():
adam_borrow_amount=200
adam_borrow_interest_rate=2
Adam=make_borrower(adam_borrow_amount, adam_borrow_interest_rate,"Adam")
billy_borrow_amount=100
billy_interest_rate=2
Billy=make_borrower(billy_borrow_amount, billy_interest_rate, "Billy")
carlisle_borrow_amount=600
carlisle_interest_rate=3
Carlisle=make_borrower(carlisle_borrow_amount, carlisle_interest_rate, "Carlisle")
sasha_borrow_amount=250
sasha_interest_rate=1
Sasha=make_borrower(sasha_borrow_amount, sasha_interest_rate, "Sasha")
rihanna_borrow_amount=400
rihanna_interest_rate=4
Rihanna=make_borrower(rihanna_borrow_amount, rihanna_interest_rate, "Rihanna")
elle_borrow_amount=800
elle_interest_rate=1
Elle=make_borrower(elle_borrow_amount, elle_interest_rate, "Elle")
borrowers=[Adam, Billy, Carlisle, Sasha, Rihanna, Elle] #all of type Borrowes
return borrowers
|
{
"content_hash": "2e4ae8d73e9504f44b8f2d77244dfb24",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 132,
"avg_line_length": 42.43421052631579,
"alnum_prop": 0.7032558139534884,
"repo_name": "Shashank-Ojha/MakeBank",
"id": "57555059d127d423de8c94fb61c6a95741449a1e",
"size": "3225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "is_borrower.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1630595"
},
{
"name": "HTML",
"bytes": "74032"
},
{
"name": "JavaScript",
"bytes": "1509355"
},
{
"name": "Python",
"bytes": "20583"
}
],
"symlink_target": ""
}
|
import datetime
from argparse import ArgumentParser
from typing import Any, List
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from django.utils.timezone import now as timezone_now
from zerver.models import Message, Realm, Recipient, Stream, \
Subscription, UserActivity, UserMessage, UserProfile, get_realm
MOBILE_CLIENT_LIST = ["Android", "ios"]
HUMAN_CLIENT_LIST = MOBILE_CLIENT_LIST + ["website"]
human_messages = Message.objects.filter(sending_client__name__in=HUMAN_CLIENT_LIST)
class Command(BaseCommand):
help = "Generate statistics on realm activity."
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('realms', metavar='<realm>', type=str, nargs='*',
help="realm to generate statistics for")
def active_users(self, realm: Realm) -> List[UserProfile]:
# Has been active (on the website, for now) in the last 7 days.
activity_cutoff = timezone_now() - datetime.timedelta(days=7)
return [activity.user_profile for activity in (
UserActivity.objects.filter(user_profile__realm=realm,
user_profile__is_active=True,
last_visit__gt=activity_cutoff,
query="/json/users/me/pointer",
client__name="website"))]
def messages_sent_by(self, user: UserProfile, days_ago: int) -> int:
sent_time_cutoff = timezone_now() - datetime.timedelta(days=days_ago)
return human_messages.filter(sender=user, pub_date__gt=sent_time_cutoff).count()
def total_messages(self, realm: Realm, days_ago: int) -> int:
sent_time_cutoff = timezone_now() - datetime.timedelta(days=days_ago)
return Message.objects.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff).count()
def human_messages(self, realm: Realm, days_ago: int) -> int:
sent_time_cutoff = timezone_now() - datetime.timedelta(days=days_ago)
return human_messages.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff).count()
def api_messages(self, realm: Realm, days_ago: int) -> int:
return (self.total_messages(realm, days_ago) - self.human_messages(realm, days_ago))
def stream_messages(self, realm: Realm, days_ago: int) -> int:
sent_time_cutoff = timezone_now() - datetime.timedelta(days=days_ago)
return human_messages.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff,
recipient__type=Recipient.STREAM).count()
def private_messages(self, realm: Realm, days_ago: int) -> int:
sent_time_cutoff = timezone_now() - datetime.timedelta(days=days_ago)
return human_messages.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff).exclude(
recipient__type=Recipient.STREAM).exclude(recipient__type=Recipient.HUDDLE).count()
def group_private_messages(self, realm: Realm, days_ago: int) -> int:
sent_time_cutoff = timezone_now() - datetime.timedelta(days=days_ago)
return human_messages.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff).exclude(
recipient__type=Recipient.STREAM).exclude(recipient__type=Recipient.PERSONAL).count()
def report_percentage(self, numerator: float, denominator: float, text: str) -> None:
if not denominator:
fraction = 0.0
else:
fraction = numerator / float(denominator)
print("%.2f%% of" % (fraction * 100,), text)
def handle(self, *args: Any, **options: Any) -> None:
if options['realms']:
try:
realms = [get_realm(string_id) for string_id in options['realms']]
except Realm.DoesNotExist as e:
raise CommandError(e)
else:
realms = Realm.objects.all()
for realm in realms:
print(realm.string_id)
user_profiles = UserProfile.objects.filter(realm=realm, is_active=True)
active_users = self.active_users(realm)
num_active = len(active_users)
print("%d active users (%d total)" % (num_active, len(user_profiles)))
streams = Stream.objects.filter(realm=realm).extra(
tables=['zerver_subscription', 'zerver_recipient'],
where=['zerver_subscription.recipient_id = zerver_recipient.id',
'zerver_recipient.type = 2',
'zerver_recipient.type_id = zerver_stream.id',
'zerver_subscription.active = true']).annotate(count=Count("name"))
print("%d streams" % (streams.count(),))
for days_ago in (1, 7, 30):
print("In last %d days, users sent:" % (days_ago,))
sender_quantities = [self.messages_sent_by(user, days_ago) for user in user_profiles]
for quantity in sorted(sender_quantities, reverse=True):
print(quantity, end=' ')
print("")
print("%d stream messages" % (self.stream_messages(realm, days_ago),))
print("%d one-on-one private messages" % (self.private_messages(realm, days_ago),))
print("%d messages sent via the API" % (self.api_messages(realm, days_ago),))
print("%d group private messages" % (self.group_private_messages(realm, days_ago),))
num_notifications_enabled = len([x for x in active_users if x.enable_desktop_notifications])
self.report_percentage(num_notifications_enabled, num_active,
"active users have desktop notifications enabled")
num_enter_sends = len([x for x in active_users if x.enter_sends])
self.report_percentage(num_enter_sends, num_active,
"active users have enter-sends")
all_message_count = human_messages.filter(sender__realm=realm).count()
multi_paragraph_message_count = human_messages.filter(
sender__realm=realm, content__contains="\n\n").count()
self.report_percentage(multi_paragraph_message_count, all_message_count,
"all messages are multi-paragraph")
# Starred messages
starrers = UserMessage.objects.filter(user_profile__in=user_profiles,
flags=UserMessage.flags.starred).values(
"user_profile").annotate(count=Count("user_profile"))
print("%d users have starred %d messages" % (
len(starrers), sum([elt["count"] for elt in starrers])))
active_user_subs = Subscription.objects.filter(
user_profile__in=user_profiles, active=True)
# Streams not in home view
non_home_view = active_user_subs.filter(is_muted=True).values(
"user_profile").annotate(count=Count("user_profile"))
print("%d users have %d streams not in home view" % (
len(non_home_view), sum([elt["count"] for elt in non_home_view])))
# Code block markup
markup_messages = human_messages.filter(
sender__realm=realm, content__contains="~~~").values(
"sender").annotate(count=Count("sender"))
print("%d users have used code block markup on %s messages" % (
len(markup_messages), sum([elt["count"] for elt in markup_messages])))
# Notifications for stream messages
notifications = active_user_subs.filter(desktop_notifications=True).values(
"user_profile").annotate(count=Count("user_profile"))
print("%d users receive desktop notifications for %d streams" % (
len(notifications), sum([elt["count"] for elt in notifications])))
print("")
|
{
"content_hash": "676c9baa4b35dbbb7e48b71ad0538909",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 104,
"avg_line_length": 52.70860927152318,
"alnum_prop": 0.6043472798090213,
"repo_name": "tommyip/zulip",
"id": "365f8dd6de9e22b5be2b6e8fb2ee08bf69a3abfb",
"size": "7959",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analytics/management/commands/realm_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400301"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "718599"
},
{
"name": "JavaScript",
"bytes": "3092201"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71123"
},
{
"name": "Python",
"bytes": "6889539"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
}
|
import sys
import multiprocessing as mp
from threading import Thread
from collections import namedtuple
import pickle
import warnings
import numpy as np
import joblib
import sklearn.cross_validation as skl_cross_validation
from Orange.util import OrangeWarning
from Orange.data import Table, Domain, ContinuousVariable, DiscreteVariable
__all__ = ["Results", "CrossValidation", "LeaveOneOut", "TestOnTrainingData",
"ShuffleSplit", "TestOnTestData", "sample"]
_MpResults = namedtuple('_MpResults', ('fold_i', 'learner_i', 'model',
'failed', 'n_values', 'values', 'probs'))
def _identity(x):
return x
def _mp_worker(fold_i, train_data, test_data, learner_i, learner,
store_models, mp_queue):
predicted, probs, model, failed = None, None, None, False
try:
if len(train_data) == 0 or len(test_data) == 0:
raise RuntimeError('Test fold is empty')
model = learner(train_data)
if train_data.domain.has_discrete_class:
predicted, probs = model(test_data, model.ValueProbs)
elif train_data.domain.has_continuous_class:
predicted = model(test_data, model.Value)
# Different models can fail at any time raising any exception
except Exception as ex: # pylint: disable=broad-except
failed = ex
mp_queue.put("dummy text; use for printing when debugging")
return _MpResults(fold_i, learner_i, store_models and model,
failed, len(test_data), predicted, probs)
class Results:
"""
Class for storing predictions in model testing.
Attributes:
data (Optional[Table]): Data used for testing. When data is stored,
this is typically not a copy but a reference.
models (Optional[List[Model]]): A list of induced models.
row_indices (np.ndarray): Indices of rows in `data` that were used in
testing, stored as a numpy vector of length `nrows`.
Values of `actual[i]`, `predicted[i]` and `probabilities[i]` refer
to the target value of instance `data[row_indices[i]]`.
nrows (int): The number of test instances (including duplicates).
actual (np.ndarray): Actual values of target variable;
a numpy vector of length `nrows` and of the same type as `data`
(or `np.float32` if the type of data cannot be determined).
predicted (np.ndarray): Predicted values of target variable;
a numpy array of shape (number-of-methods, `nrows`) and
of the same type as `data` (or `np.float32` if the type of data
cannot be determined).
probabilities (Optional[np.ndarray]): Predicted probabilities
(for discrete target variables);
a numpy array of shape (number-of-methods, `nrows`, number-of-classes)
of type `np.float32`.
folds (List[Slice or List[int]]): A list of indices (or slice objects)
corresponding to rows of each fold.
"""
score_by_folds = True
# noinspection PyBroadException
# noinspection PyNoneFunctionAssignment
def __init__(self, data=None, nmethods=0, *, learners=None, train_data=None,
nrows=None, nclasses=None,
store_data=False, store_models=False,
domain=None, actual=None, row_indices=None,
predicted=None, probabilities=None,
preprocessor=None, callback=None, n_jobs=1):
"""
Construct an instance with default values: `None` for :obj:`data` and
:obj:`models`.
If the number of rows and/or the number of classes is not given, it is
inferred from :obj:`data`, if provided. The data type for
:obj:`actual` and :obj:`predicted` is determined from the data; if the
latter cannot be find, `np.float32` is used.
Attribute :obj:`actual` and :obj:`row_indices` are constructed as empty
(uninitialized) arrays of the appropriate size, if the number of rows
is known. Attribute :obj:`predicted` is constructed if the number of
rows and of methods is given; :obj:`probabilities` also requires
knowing the number of classes.
:param data: Data or domain
:type data: Orange.data.Table or Orange.data.Domain
:param nmethods: The number of methods that will be tested
:type nmethods: int
:param nrows: The number of test instances (including duplicates)
:type nrows: int
:param nclasses: The number of class values
:type nclasses: int
:param store_data: A flag that tells whether to store the data;
this argument can be given only as keyword argument
:type store_data: bool
:param store_models: A flag that tells whether to store the models;
this argument can be given only as keyword argument
:type store_models: bool
:param preprocessor: Preprocessor for training data
:type preprocessor: Orange.preprocess.Preprocess
:param callback: Function for reporting back the progress as a value
between 0 and 1
:type callback: callable
:param n_jobs: The number of processes to parallelize the evaluation
on. -1 to parallelize on all but one CPUs. 1 for no
parallelization.
:type n_jobs: int
"""
self.store_data = store_data
self.store_models = store_models
self.dtype = np.float32
self.n_jobs = max(1, joblib.cpu_count() - 1 if n_jobs < 0 else n_jobs)
self.models = None
self.folds = None
self.indices = None
self.row_indices = row_indices
self.preprocessor = preprocessor or _identity
self._callback = callback or _identity
self.learners = learners
if learners:
nmethods = len(learners)
if nmethods is not None:
self.failed = [False] * nmethods
if data:
self.data = data if self.store_data else None
self.domain = data.domain
self.dtype = getattr(data.Y, 'dtype', self.dtype)
if learners:
train_data = train_data or data
self.fit(train_data, data)
return
def set_or_raise(value, exp_values, msg):
for exp_value in exp_values:
if exp_value is False:
continue
if value is None:
value = exp_value
elif value != exp_value:
raise ValueError(msg)
return value
domain = self.domain = set_or_raise(
domain, [data is not None and data.domain],
"mismatching domain")
self.nrows = nrows = set_or_raise(
nrows, [data is not None and len(data),
actual is not None and len(actual),
row_indices is not None and len(row_indices),
predicted is not None and predicted.shape[1],
probabilities is not None and probabilities.shape[1]],
"mismatching number of rows")
nclasses = set_or_raise(
nclasses, [domain and (len(domain.class_var.values)
if domain.has_discrete_class
else None),
probabilities is not None and probabilities.shape[2]],
"mismatching number of class values")
if nclasses is not None and probabilities is not None:
raise ValueError("regression results cannot have 'probabilities'")
nmethods = set_or_raise(
nmethods, [predicted is not None and predicted.shape[0],
probabilities is not None and probabilities.shape[0]],
"mismatching number of methods")
if actual is not None:
self.actual = actual
elif nrows is not None:
self.actual = np.empty(nrows, dtype=self.dtype)
if predicted is not None:
self.predicted = predicted
elif nmethods is not None and nrows is not None:
self.predicted = np.empty((nmethods, nrows), dtype=self.dtype)
if probabilities is not None:
self.probabilities = probabilities
elif nmethods is not None and nrows is not None and \
nclasses is not None:
self.probabilities = \
np.empty((nmethods, nrows, nclasses), dtype=np.float32)
def _prepare_arrays(self, data):
"""Initialize some mandatory arrays for results"""
nmethods = len(self.learners)
self.nrows = len(self.row_indices)
if self.store_models:
self.models = np.tile(None, (len(self.indices), nmethods))
# Initialize `predicted` and `probabilities` (only for discrete classes)
self.predicted = np.empty((nmethods, self.nrows), dtype=self.dtype)
if data.domain.has_discrete_class:
nclasses = len(data.domain.class_var.values)
self.probabilities = np.empty((nmethods, self.nrows, nclasses),
dtype=np.float32)
def get_fold(self, fold):
results = Results()
results.data = self.data
if self.folds is None:
raise ValueError("This 'Results' instance does not have folds.")
if self.models is not None:
results.models = self.models[fold]
results.row_indices = self.row_indices[self.folds[fold]]
results.actual = self.actual[self.folds[fold]]
results.predicted = self.predicted[:, self.folds[fold]]
results.domain = self.domain
if self.probabilities is not None:
results.probabilities = self.probabilities[:, self.folds[fold]]
return results
def get_augmented_data(self, model_names, include_attrs=True, include_predictions=True, include_probabilities=True):
"""
Return the data, augmented with predictions, probabilities (if the task is classification) and folds info.
Predictions, probabilities and folds are inserted as meta attributes.
Args:
model_names (list): A list of strings containing learners' names.
include_attrs (bool): Flag that tells whether to include original attributes.
include_predictions (bool): Flag that tells whether to include predictions.
include_probabilities (bool): Flag that tells whether to include probabilities.
Returns:
Orange.data.Table: Data augmented with predictions, (probabilities) and (fold).
"""
assert self.predicted.shape[0] == len(model_names)
data = self.data[self.row_indices]
class_var = data.domain.class_var
classification = class_var and class_var.is_discrete
new_meta_attr = []
new_meta_vals = np.empty((len(data), 0))
if classification:
# predictions
if include_predictions:
new_meta_attr.extend(DiscreteVariable(name=name, values=class_var.values)
for name in model_names)
new_meta_vals = np.hstack((new_meta_vals, self.predicted.T))
# probabilities
if include_probabilities:
for name in model_names:
new_meta_attr.extend(ContinuousVariable(name="%s (%s)" % (name, value))
for value in class_var.values)
for i in self.probabilities:
new_meta_vals = np.hstack((new_meta_vals, i))
elif include_predictions:
# regression
new_meta_attr.extend(ContinuousVariable(name=name)
for name in model_names)
new_meta_vals = np.hstack((new_meta_vals, self.predicted.T))
# add fold info
if self.folds is not None:
new_meta_attr.append(DiscreteVariable(name="Fold", values=[i+1 for i, s in enumerate(self.folds)]))
fold = np.empty((len(data), 1))
for i, s in enumerate(self.folds):
fold[s, 0] = i
new_meta_vals = np.hstack((new_meta_vals, fold))
# append new columns to meta attributes
new_meta_attr = list(data.domain.metas) + new_meta_attr
new_meta_vals = np.hstack((data.metas, new_meta_vals))
X = data.X if include_attrs else np.empty((len(data), 0))
attrs = data.domain.attributes if include_attrs else []
domain = Domain(attrs, data.domain.class_vars, metas=new_meta_attr)
predictions = Table.from_numpy(domain, X, data.Y, metas=new_meta_vals)
predictions.name = data.name
return predictions
_MIN_NJOBS_X_SIZE = 20e3
def fit(self, train_data, test_data=None):
"""Fits `self.learners` using folds sampled from the provided data.
Args:
train_data (Table): table to sample train folds
test_data (Optional[Table]): tap to sample test folds
of None then `train_data` will be used
"""
test_data = test_data or train_data
self.setup_indices(train_data, test_data)
self.prepare_arrays(test_data)
self._prepare_arrays(test_data)
n_callbacks = len(self.learners) * len(self.indices)
n_jobs = max(1, min(self.n_jobs, n_callbacks))
def _is_picklable(obj):
try:
return bool(pickle.dumps(obj))
except (AttributeError, TypeError, pickle.PicklingError):
return False
if n_jobs > 1 and not all(_is_picklable(learner) for learner in self.learners):
n_jobs = 1
warnings.warn("Not all arguments (learners) are picklable. "
"Setting n_jobs=1", OrangeWarning)
if n_jobs > 1 and mp.current_process().daemon:
n_jobs = 1
warnings.warn("Worker subprocesses cannot spawn new worker "
"subprocesses (e.g. parameter tuning with internal "
"cross-validation). Setting n_jobs=1", OrangeWarning)
# Workaround for NumPy locking on Macintosh and Ubuntu 14.04 LTS
# May be removed once offending libs and OSes are nowhere to be found.
# https://pythonhosted.org/joblib/parallel.html#bad-interaction-of-multiprocessing-and-third-party-libraries
mp_ctx = mp.get_context(
'forkserver' if sys.platform.startswith(('darwin', 'linux')) and n_jobs > 1 else None)
if (n_jobs > 1 and mp_ctx.get_start_method() != 'fork' and
train_data.X.size < self._MIN_NJOBS_X_SIZE):
n_jobs = 1
warnings.warn("Working with small-enough data; single-threaded "
"sequential excecution will (probably) be faster. "
"Setting n_jobs=1", OrangeWarning)
try:
# Use context-adapted Queue or just the regular Queue if no
# multiprocessing (otherwise it shits itself at least on Windos)
mp_queue = mp_ctx.Manager().Queue() if n_jobs > 1 else mp.Queue()
except (EOFError, RuntimeError):
mp_queue = mp.Queue()
n_jobs = 1
warnings.warn('''
Can't run multiprocessing code without a __main__ guard.
Multiprocessing strategies 'forkserver' (used by Orange's evaluation
methods by default on Mac OS X) and 'spawn' (default on Windos)
require the main code entry point be guarded with:
if __name__ == '__main__':
import multiprocessing as mp
mp.freeze_support() # Needed only on Windos
... # Rest of your code
... # See: https://docs.python.org/3/library/__main__.html
Otherwise, as the module is re-imported in another process, infinite
recursion ensues.
Guard your executed code with above Python idiom, or pass n_jobs=1
to evaluation methods, i.e. {}(..., n_jobs=1). Setting n_jobs to 1.
'''.format(self.__class__.__name__), OrangeWarning)
data_splits = (
(fold_i, self.preprocessor(train_data[train_i]), test_data[test_i])
for fold_i, (train_i, test_i) in enumerate(self.indices))
args_iter = (
(fold_i, train_data, test_data, learner_i, learner,
self.store_models, mp_queue)
# NOTE: If this nested for loop doesn't work, try
# itertools.product
for (fold_i, train_data, test_data) in data_splits
for (learner_i, learner) in enumerate(self.learners))
def _callback_percent(n_steps, queue):
"""Block until one of the subprocesses completes, before
signalling callback with percent"""
for percent in np.linspace(.0, .99, n_steps + 1)[1:]:
queue.get()
try:
self._callback(percent)
except Exception:
# Callback may error for whatever reason (e.g. PEBKAC)
# In that case, rather gracefully continue computation
# instead of failing
pass
results = []
with joblib.Parallel(n_jobs=n_jobs, backend=mp_ctx) as parallel:
tasks = (joblib.delayed(_mp_worker)(*args) for args in args_iter)
# Start the tasks from another thread ...
thread = Thread(target=lambda: results.append(parallel(tasks)))
thread.start()
# ... so that we can update the GUI (callback) from the main thread
_callback_percent(n_callbacks, mp_queue)
thread.join()
results = sorted(results[0])
ptr, prev_fold_i, prev_n_values = 0, 0, 0
for res in results:
if res.fold_i != prev_fold_i:
ptr += prev_n_values
prev_fold_i = res.fold_i
result_slice = slice(ptr, ptr + res.n_values)
prev_n_values = res.n_values
if res.failed:
self.failed[res.learner_i] = res.failed
continue
if self.store_models:
self.models[res.fold_i][res.learner_i] = res.model
self.predicted[res.learner_i][result_slice] = res.values
if train_data.domain.has_discrete_class:
self.probabilities[res.learner_i][result_slice, :] = res.probs
self._callback(1)
return self
def prepare_arrays(self, test_data):
"""Initialize arrays that will be used by `fit` method.
"""
self.folds = []
row_indices = []
ptr = 0
for train, test in self.indices:
self.folds.append(slice(ptr, ptr + len(test)))
row_indices.append(test)
ptr += len(test)
self.row_indices = np.concatenate(row_indices, axis=0)
self.actual = test_data[self.row_indices].Y.ravel()
def setup_indices(self, train_data, test_data):
"""Initializes `self.indices` with iterable objects with slices
(or indices) for each fold.
Args:
train_data (Table): train table
test_data (Table): test table
"""
raise NotImplementedError()
def split_by_model(self):
"""Split evaluation results by models
"""
data = self.data
nmethods = len(self.predicted)
for i in range(nmethods):
res = Results()
res.data = data
res.domain = self.domain
res.row_indices = self.row_indices
res.actual = self.actual
res.folds = self.folds
res.score_by_folds = self.score_by_folds
res.predicted = self.predicted[(i,), :]
if getattr(self, "probabilities", None) is not None:
res.probabilities = self.probabilities[(i,), :, :]
if self.models is not None:
res.models = self.models[:, i]
res.failed = [self.failed[i]]
yield res
class CrossValidation(Results):
"""
K-fold cross validation.
If the constructor is given the data and a list of learning algorithms, it
runs cross validation and returns an instance of `Results` containing the
predicted values and probabilities.
.. attribute:: k
The number of folds.
.. attribute:: random_state
"""
def __init__(self, data, learners, k=10, stratified=True, random_state=0, store_data=False,
store_models=False, preprocessor=None, callback=None, warnings=None,
n_jobs=1):
self.k = k
self.stratified = stratified
self.random_state = random_state
if warnings is None:
self.warnings = []
else:
self.warnings = warnings
super().__init__(data, learners=learners, store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback, n_jobs=n_jobs)
def setup_indices(self, train_data, test_data):
self.indices = None
if self.stratified and test_data.domain.has_discrete_class:
try:
self.indices = skl_cross_validation.StratifiedKFold(
test_data.Y, self.k, shuffle=True, random_state=self.random_state
)
except ValueError:
self.warnings.append("Using non-stratified sampling.")
self.indices = None
if self.indices is None:
self.indices = skl_cross_validation.KFold(
len(test_data), self.k, shuffle=True, random_state=self.random_state
)
class LeaveOneOut(Results):
"""Leave-one-out testing"""
score_by_folds = False
def __init__(self, data, learners, store_data=False, store_models=False,
preprocessor=None, callback=None, n_jobs=1):
super().__init__(data, learners=learners, store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback, n_jobs=n_jobs)
def setup_indices(self, train_data, test_data):
self.indices = skl_cross_validation.LeaveOneOut(len(test_data))
def prepare_arrays(self, test_data):
# sped up version of super().prepare_arrays(data)
self.row_indices = np.arange(len(test_data))
self.folds = self.row_indices
self.actual = test_data.Y.flatten()
class ShuffleSplit(Results):
def __init__(self, data, learners, n_resamples=10, train_size=None,
test_size=0.1, stratified=True, random_state=0, store_data=False,
store_models=False, preprocessor=None, callback=None, n_jobs=1):
self.n_resamples = n_resamples
self.train_size = train_size
self.test_size = test_size
self.stratified = stratified
self.random_state = random_state
super().__init__(data, learners=learners, store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback, n_jobs=n_jobs)
def setup_indices(self, train_data, test_data):
if self.stratified and test_data.domain.has_discrete_class:
self.indices = skl_cross_validation.StratifiedShuffleSplit(
test_data.Y, n_iter=self.n_resamples, train_size=self.train_size,
test_size=self.test_size, random_state=self.random_state
)
else:
self.indices = skl_cross_validation.ShuffleSplit(
len(test_data), n_iter=self.n_resamples, train_size=self.train_size,
test_size=self.test_size, random_state=self.random_state
)
class TestOnTestData(Results):
"""
Test on a separate test data set.
"""
def __init__(self, train_data, test_data, learners, store_data=False,
store_models=False, preprocessor=None, callback=None, n_jobs=1):
super().__init__(test_data, train_data=train_data, learners=learners,
store_data=store_data,
store_models=store_models, preprocessor=preprocessor,
callback=callback, n_jobs=n_jobs)
def setup_indices(self, train_data, test_data):
self.indices = ((Ellipsis, Ellipsis),)
def prepare_arrays(self, test_data):
self.row_indices = np.arange(len(test_data))
self.folds = (Ellipsis, )
self.actual = test_data.Y.ravel()
class TestOnTrainingData(TestOnTestData):
"""
Trains and test on the same data
"""
def __init__(self, data, learners, store_data=False, store_models=False,
preprocessor=None, callback=None, n_jobs=1):
if preprocessor is not None:
data = preprocessor(data)
super().__init__(train_data=data, test_data=data, learners=learners,
store_data=store_data, store_models=store_models,
preprocessor=None, callback=callback, n_jobs=n_jobs)
self.preprocessor = preprocessor
def sample(table, n=0.7, stratified=False, replace=False,
random_state=None):
"""
Samples data instances from a data table. Returns the sample and
a data set from input data table that are not in the sample. Also
uses several sampling functions from
`scikit-learn <http://scikit-learn.org>`_.
table : data table
A data table from which to sample.
n : float, int (default = 0.7)
If float, should be between 0.0 and 1.0 and represents
the proportion of data instances in the resulting sample. If
int, n is the number of data instances in the resulting sample.
stratified : bool, optional (default = False)
If true, sampling will try to consider class values and
match distribution of class values
in train and test subsets.
replace : bool, optional (default = False)
sample with replacement
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
if type(n) == float:
n = int(n * len(table))
if replace:
if random_state is None:
rgen = np.random
else:
rgen = np.random.mtrand.RandomState(random_state)
sample = rgen.randint(0, len(table), n)
o = np.ones(len(table))
o[sample] = 0
others = np.nonzero(o)[0]
return table[sample], table[others]
n = len(table) - n
if stratified and table.domain.has_discrete_class:
test_size = max(len(table.domain.class_var.values), n)
ind = skl_cross_validation.StratifiedShuffleSplit(
table.Y.ravel(), n_iter=1,
test_size=test_size, train_size=len(table) - test_size,
random_state=random_state)
else:
ind = skl_cross_validation.ShuffleSplit(
len(table), n_iter=1,
test_size=n, random_state=random_state)
ind = next(iter(ind))
return table[ind[0]], table[ind[1]]
|
{
"content_hash": "6b22c1c4ae61f600a17c72cd56640c7c",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 120,
"avg_line_length": 40.28550295857988,
"alnum_prop": 0.5933977160063159,
"repo_name": "cheral/orange3",
"id": "fe9c3406ec31734e5a43e9322fd6d5f3567112c1",
"size": "27233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Orange/evaluation/testing.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12023"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20217"
},
{
"name": "Python",
"bytes": "4139574"
},
{
"name": "Shell",
"bytes": "47441"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyvidia',
version='1.0.1',
description='Nvidia driver version detector for Linux',
long_description=long_description,
url='https://github.com/ntpeters/pyvidia',
author='Nate Peterson',
author_email='ntpeters@mtu.edu',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Hardware :: Hardware Drivers',
'Topic :: Utilities',
],
keywords='nvidia linux driver',
py_modules=['pyvidia'],
packages=find_packages(),
install_requires=['beautifulsoup4', 'lxml', 'six'],
package_data={
'pyvidia': [],
},
entry_points={
'console_scripts': [
'pyvidia=pyvidia:__main',
],
},
)
|
{
"content_hash": "714440b4cf86ef7cb9dc62bbe9d522e1",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 60,
"avg_line_length": 26.127659574468087,
"alnum_prop": 0.5993485342019544,
"repo_name": "ntpeters/pyvidia",
"id": "c6cbffc8a057198431bb80ed50ffdff6c5369f4d",
"size": "1229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18305"
}
],
"symlink_target": ""
}
|
import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
import h2o_summ
DO_MEDIAN = True
def write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
expectedRange = (expectedMax - expectedMin) + 1
for i in range(rowCount):
rowData = []
ri = expectedMin + (i % expectedRange)
for j in range(colCount):
# ri = r1.randint(expectedMin, expectedMax)
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1, base_port=54327)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary2_percentile2(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(500000, 2, 'cD', 300, 0, 9), # expectedMin/Max must cause 10 values
(500000, 2, 'cE', 300, 1, 10), # expectedMin/Max must cause 10 values
(500000, 2, 'cF', 300, 2, 11), # expectedMin/Max must cause 10 values
]
timeoutSecs = 10
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
x = 0
for (rowCount, colCount, hex_key, timeoutSecs, expectedMin, expectedMax) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
x += 1
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
legalValues = {}
for x in range(expectedMin, expectedMax):
legalValues[x] = x
write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEEDPERFILE)
h2o.beta_features = False
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=10, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
h2o.beta_features = True
summaryResult = h2o_cmd.runSummary(key=hex_key, cols=0, max_ncols=1)
if h2o.verbose:
print "summaryResult:", h2o.dump_json(summaryResult)
summaries = summaryResult['summaries']
scipyCol = 0
for column in summaries:
colname = column['colname']
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype= stats['type']
mean = stats['mean']
sd = stats['sd']
zeros = stats['zeros']
mins = stats['mins']
maxs = stats['maxs']
pct = stats['pct']
pctile = stats['pctile']
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
hcnt = column['hcnt']
for b in hcnt:
e = .1 * rowCount
self.assertAlmostEqual(b, .1 * rowCount, delta=.01*rowCount,
msg="Bins not right. b: %s e: %s" % (b, e))
print "pctile:", pctile
print "maxs:", maxs
self.assertEqual(maxs[0], expectedMax)
print "mins:", mins
self.assertEqual(mins[0], expectedMin)
for v in pctile:
self.assertTrue(v >= expectedMin,
"Percentile value %s should all be >= the min dataset value %s" % (v, expectedMin))
self.assertTrue(v <= expectedMax,
"Percentile value %s should all be <= the max dataset value %s" % (v, expectedMax))
eV1 = [1.0, 1.0, 1.0, 3.0, 4.0, 5.0, 7.0, 8.0, 9.0, 10.0, 10.0]
if expectedMin==1:
eV = eV1
elif expectedMin==0:
eV = [e-1 for e in eV1]
elif expectedMin==2:
eV = [e+1 for e in eV1]
else:
raise Exception("Test doesn't have the expected percentileValues for expectedMin: %s" % expectedMin)
trial += 1
# if colname!='' and expected[scipyCol]:
if colname!='':
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
skipHeader=True,
col=scipyCol,
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.999,
h2oSummary2=pctile[5 if DO_MEDIAN else 10],
# h2oQuantilesApprox=qresult_single,
# h2oQuantilesExact=qresult,
)
scipyCol += 1
if __name__ == '__main__':
h2o.unit_main()
|
{
"content_hash": "58b4c59cbe16d18ed98a186fa10b3fa8",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 124,
"avg_line_length": 37.13548387096774,
"alnum_prop": 0.5187630298818624,
"repo_name": "woobe/h2o",
"id": "35b9fd6e46701d995de6d6563447837a7b3a4bb7",
"size": "5756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/testdir_single_jvm/test_summary2_percentile2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
'''
The main source for testlib. Ties together the default test runners and
loaders.
Discovers and runs all tests from a given root directory.
'''
from __future__ import print_function
import sys
import os
base_dir = os.path.dirname(os.path.abspath(__name__))
ext_path = os.path.join(base_dir, os.pardir, 'ext')
sys.path.insert(0, base_dir)
sys.path.insert(0, ext_path)
import testlib.main as testlib
import testlib.config as config
import testlib.helper as helper
config.basedir = helper.absdirpath(__file__)
testlib()
|
{
"content_hash": "ddaf3a46401a6d9d53f9adcc9d5f23cd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 22.869565217391305,
"alnum_prop": 0.747148288973384,
"repo_name": "TUD-OS/gem5-dtu",
"id": "c8dc9b9613faf7a3aeb049a096aa26f611177515",
"size": "549",
"binary": false,
"copies": "2",
"ref": "refs/heads/dtu-mmu",
"path": "tests/main.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class File(models.Model):
file_name = models.CharField(max_length=500)
upload = models.FileField(upload_to='uploads/')
|
{
"content_hash": "8ce4ce8bd9d9192c3cc351cf54622693",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 23.25,
"alnum_prop": 0.7258064516129032,
"repo_name": "torchmed/biocloud",
"id": "b55f6448fd47c08aba4729d94f6f08b12a9e2b5b",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "files/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "389801"
},
{
"name": "HTML",
"bytes": "1950141"
},
{
"name": "JavaScript",
"bytes": "2826784"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "8710"
}
],
"symlink_target": ""
}
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MULTI_TENANT = True
AUTH_USER_MODEL = 'account.EWSUser'
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
# import Celery config
import djcelery
djcelery.setup_loader()
BROKER_URL = 'amqp://guest:guest@localhost:5672/'
ALLOWED_HOSTS = ['*',]
from datetime import timedelta
from celery.schedules import crontab
CELERYBEAT_SCHEDULE = {
'map_stats':{
'task': 'Map.tasks.update_system_stats',
'schedule': timedelta(hours=1),
'args': ()
},
'map_sov':{
'task': 'Map.tasks.update_system_sov',
'schedule': crontab(minute=0, hour=10),
'args': ()
},
'jump_cache':{
'task': 'Map.tasks.fill_jumps_cache',
'schedule': timedelta(minutes=10),
'args': ()
},
'downtime_sites': {
'task': 'Map.tasks.downtime_site_update',
'schedule': crontab(minute=5, hour=11),
'args': ()
},
'alliance_update':{
'task': 'core.tasks.update_all_alliances',
'schedule': crontab(minute=30, hour=10, day_of_week="tue"),
'args': ()
},
'stale_locations':{
'task': 'Map.tasks.clear_stale_records',
'schedule': timedelta(minutes=5),
'args': ()
},
'cache_reddit':{
'task': 'core.tasks.cache_eve_reddit',
'schedule': timedelta(minutes=45),
'args': ()
},
'cache_feeds':{
'task': 'core.tasks.update_feeds',
'schedule': timedelta(minutes=30),
'args': ()
},
'char_data':{
'task': 'API.tasks.update_char_data',
'schedule': timedelta(hours=1),
'args': ()
},
}
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'djangotest', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
}
}
CACHES = {
'default': {
'BACKEND': 'infinite_memcached.cache.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 0
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Change the default login behavior
LOGIN_URL = '/account/login'
LOGIN_REDIRECT_URL ='/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^28avlv8e$sky_08pu926q^+b5&4&5&+ob7ma%v(tn$bg#=&k4'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'eveigb.middleware.IGBMiddleware',
)
ROOT_URLCONF = 'evewspace.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# Uncomment the next line to enable the admin:
#'django.contrib.admin',
'core',
'Map',
'POS',
'Alerts',
'Jabber',
'SiteTracker',
'API',
'Slack',
'account',
'eveigb',
'search',
'djcelery',
'south',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
#Require a registration code to register
ACCOUNT_REQUIRE_REG_CODE=True
import django.conf.global_settings as DEFAULT_SETTINGS
TEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS + ('core.context_processors.site', 'eveigb.context_processors.igb',)
# ejabberd auth gateway log settings
import logging
TUNNEL_EJABBERD_AUTH_GATEWAY_LOG = '/tmp/ejabberd.log'
TUNNEL_EJABBERD_AUTH_GATEWAY_LOG_LEVEL = logging.DEBUG
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Dirty hack to provide configuration overriding semantics. Use local_settings to override or add upon the default.
try:
LOCAL_SETTINGS
except NameError:
try:
from local_settings import *
except ImportError:
pass
|
{
"content_hash": "abefc068a19c5f1ef8000b2d013da992",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 143,
"avg_line_length": 34.207317073170735,
"alnum_prop": 0.6033273915626857,
"repo_name": "Zumochi/eve-wspace",
"id": "5a6558196145c07bfc3be3098d3055080e1e4a07",
"size": "8416",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "evewspace/evewspace/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45009"
},
{
"name": "HTML",
"bytes": "152549"
},
{
"name": "JavaScript",
"bytes": "88761"
},
{
"name": "Nginx",
"bytes": "109"
},
{
"name": "Puppet",
"bytes": "6781"
},
{
"name": "Python",
"bytes": "1100699"
},
{
"name": "Shell",
"bytes": "2632"
}
],
"symlink_target": ""
}
|
from virtool.fake.wrapper import FakerWrapper
from virtool.otus.fake import create_fake_otus
async def test_create_fake_otus(mongo, config, fake2, snapshot, tmp_path):
app = {"db": mongo, "data_path": tmp_path, "fake": FakerWrapper(), "config": config}
user = await fake2.users.create()
await create_fake_otus(app, "reference_1", user.id)
assert await mongo.otus.find().to_list(None) == snapshot
assert await mongo.sequences.find().to_list(None) == snapshot
|
{
"content_hash": "78d655e44311662d55f0edba7042458c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 37.15384615384615,
"alnum_prop": 0.7060041407867494,
"repo_name": "igboyes/virtool",
"id": "a77fc434551daa1e453c31f4577af5720feb7a22",
"size": "483",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/otus/test_fake.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "961"
},
{
"name": "HTML",
"bytes": "44858"
},
{
"name": "Python",
"bytes": "1316464"
}
],
"symlink_target": ""
}
|
import re
#PROJECT
from odds import Odds
class Outcome:
def __init__(
self,
name,
odds
):
self.name = re.sub(r'[^\w -]', '', name).lower().capitalize()
self.odds = odds
def win_amount(
self,
amount
):
return amount * self.odds
def __eq__(
self,
other_outcome
):
return self.name == other_outcome.name
def __ne__(
self,
other_outcome
):
return self.name != other_outcome.name
def __hash__(self):
return hash(self.name)
def __str__(self):
return '{name} ({odds}:{to_one})'.format(
name=self.name,
odds=self.odds,
to_one=Odds.TO_ONE
)
|
{
"content_hash": "da8b8d0f1eee74456728697917de31c5",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 69,
"avg_line_length": 17.904761904761905,
"alnum_prop": 0.4734042553191489,
"repo_name": "ddenhartog/itmaybeahack-roulette",
"id": "a401788173071ffa200fae1762c89a5ad4e7000a",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "outcome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10022"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
__version__ = "1.0.0"
|
{
"content_hash": "bb36f28fd5ce07d694f89bb3db1bc8d6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 24.466666666666665,
"alnum_prop": 0.6920980926430518,
"repo_name": "EUDAT-B2ACCESS/b2access-deprovisioning",
"id": "830023d7f021c4e30a5fb10f1c9a54e4affba468",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "b2accessdeprovisioning/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12218"
}
],
"symlink_target": ""
}
|
"""Generates and prints out imports and constants for new TensorFlow python api.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import importlib
import os
import sys
from tensorflow.python.tools.api.generator import doc_srcs
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
API_ATTRS = tf_export.API_ATTRS
API_ATTRS_V1 = tf_export.API_ATTRS_V1
_API_VERSIONS = [1, 2]
_COMPAT_MODULE_TEMPLATE = 'compat.v%d'
_COMPAT_MODULE_PREFIX = 'compat.v'
_DEFAULT_PACKAGE = 'tensorflow.python'
_GENFILES_DIR_SUFFIX = 'genfiles/'
_SYMBOLS_TO_SKIP_EXPLICITLY = {
# Overrides __getattr__, so that unwrapping tf_decorator
# would have side effects.
'tensorflow.python.platform.flags.FLAGS'
}
_ADDITIONAL_IMPORTS = """
from tensorflow.python.util.deprecation import deprecated_alias as _deprecated_alias
"""
_GENERATED_FILE_HEADER = """# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
\"\"\"%s
\"\"\"
from __future__ import print_function as _print_function
"""
_GENERATED_FILE_FOOTER = '\n\ndel _print_function\n'
class SymbolExposedTwiceError(Exception):
"""Raised when different symbols are exported with the same name."""
pass
def format_import(source_module_name, source_name, dest_name):
"""Formats import statement.
Args:
source_module_name: (string) Source module to import from.
source_name: (string) Source symbol name to import.
dest_name: (string) Destination alias name.
Returns:
An import statement string.
"""
if source_module_name:
if source_name == dest_name:
return 'from %s import %s' % (source_module_name, source_name)
else:
return 'from %s import %s as %s' % (
source_module_name, source_name, dest_name)
else:
if source_name == dest_name:
return 'import %s' % source_name
else:
return 'import %s as %s' % (source_name, dest_name)
class _ModuleInitCodeBuilder(object):
"""Builds a map from module name to imports included in that module."""
def __init__(self, output_package):
self._output_package = output_package
self._module_imports = collections.defaultdict(
lambda: collections.defaultdict(set))
self._deprecated_module_imports = collections.defaultdict(
lambda: collections.defaultdict(set))
self._dest_import_to_id = collections.defaultdict(int)
# Names that start with underscore in the root module.
self._underscore_names_in_root = []
def _check_already_imported(self, symbol_id, api_name):
if (api_name in self._dest_import_to_id and
symbol_id != self._dest_import_to_id[api_name] and
symbol_id != -1):
raise SymbolExposedTwiceError(
'Trying to export multiple symbols with same name: %s.' %
api_name)
self._dest_import_to_id[api_name] = symbol_id
def add_import(
self, symbol_id, dest_module_name, source_module_name, source_name,
dest_name):
"""Adds this import to module_imports.
Args:
symbol_id: (number) Unique identifier of the symbol to import.
dest_module_name: (string) Module name to add import to.
source_module_name: (string) Module to import from.
source_name: (string) Name of the symbol to import.
dest_name: (string) Import the symbol using this name.
Raises:
SymbolExposedTwiceError: Raised when an import with the same
dest_name has already been added to dest_module_name.
"""
import_str = format_import(source_module_name, source_name, dest_name)
# Check if we are trying to expose two different symbols with same name.
full_api_name = dest_name
if dest_module_name:
full_api_name = dest_module_name + '.' + full_api_name
self._check_already_imported(symbol_id, full_api_name)
if not dest_module_name and dest_name.startswith('_'):
self._underscore_names_in_root.append(dest_name)
# The same symbol can be available in multiple modules.
# We store all possible ways of importing this symbol and later pick just
# one.
self._module_imports[dest_module_name][full_api_name].add(import_str)
def add_deprecated_endpoint(
self, symbol_id, dest_module_name, source_module_name, source_name,
dest_name, canonical_endpoint):
"""Adds deprecated alias to deprecated_module_imports.
Args:
symbol_id: (number) Unique identifier of the symbol to import.
dest_module_name: (string) Module name to add import to.
source_module_name: (string) Module to import from.
source_name: (string) Name of the symbol to import.
dest_name: (string) Import the deprecated symbol using this name.
canonical_endpoint: (string) Preferred endpoint that should be used
instead of the deprecated one.
Raises:
SymbolExposedTwiceError: Raised when an import with the same
dest_name has already been added to dest_module_name.
"""
deprecated_endpoint = (
dest_module_name + '.' + dest_name if dest_module_name else dest_name)
self._check_already_imported(symbol_id, deprecated_endpoint)
# First import the symbol, but make it hidden.
hidden_symbol = '_' + source_name
import_str = format_import(source_module_name, source_name, hidden_symbol)
# Call deprecated_alias passing the hidden symbol we just imported.
deprecated_alias = (
'%s = '
'_deprecated_alias(\'tf.%s\', \'tf.%s\', %s)' % (
dest_name, deprecated_endpoint, canonical_endpoint,
hidden_symbol))
self._deprecated_module_imports[dest_module_name][deprecated_endpoint].add(
'%s\n%s' % (import_str, deprecated_alias))
def _import_submodules(self):
"""Add imports for all destination modules in self._module_imports."""
# Import all required modules in their parent modules.
# For e.g. if we import 'foo.bar.Value'. Then, we also
# import 'bar' in 'foo'.
imported_modules = set(self._module_imports.keys())
imported_modules = imported_modules.union(
set(self._deprecated_module_imports.keys()))
for module in imported_modules:
if not module:
continue
module_split = module.split('.')
parent_module = '' # we import submodules in their parent_module
for submodule_index in range(len(module_split)):
if submodule_index > 0:
submodule = module_split[submodule_index-1]
parent_module += '.' + submodule if parent_module else submodule
import_from = self._output_package
if submodule_index > 0:
import_from += '.' + '.'.join(module_split[:submodule_index])
self.add_import(
-1, parent_module, import_from,
module_split[submodule_index], module_split[submodule_index])
def build(self):
"""Get a map from destination module to __init__.py code for that module.
Returns:
A dictionary where
key: (string) destination module (for e.g. tf or tf.consts).
value: (string) text that should be in __init__.py files for
corresponding modules.
"""
self._import_submodules()
module_text_map = {}
for dest_module, dest_name_to_imports in self._module_imports.items():
# Sort all possible imports for a symbol and pick the first one.
imports_list = [
sorted(imports)[0]
for _, imports in dest_name_to_imports.items()]
module_text_map[dest_module] = '\n'.join(sorted(imports_list))
for dest_module, dest_name_to_imports in (
self._deprecated_module_imports.items()):
# Sort all possible imports for a symbol and pick the first one.
imports_list = [
sorted(imports)[0]
for _, imports in dest_name_to_imports.items()]
if dest_module in module_text_map:
module_text_map[dest_module] += _ADDITIONAL_IMPORTS
else:
module_text_map[dest_module] = _ADDITIONAL_IMPORTS
module_text_map[dest_module] += '\n'.join(sorted(imports_list))
# Expose exported symbols with underscores in root module
# since we import from it using * import.
underscore_names_str = ', '.join(
'\'%s\'' % name for name in self._underscore_names_in_root)
# We will always generate a root __init__.py file to let us handle *
# imports consistently. Be sure to have a root __init__.py file listed in
# the script outputs.
module_text_map[''] = module_text_map.get('', '') + '''
_names_with_underscore = [%s]
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend([_s for _s in _names_with_underscore])
''' % underscore_names_str
return module_text_map
def _get_name_and_module(full_name):
"""Split full_name into module and short name.
Args:
full_name: Full name of symbol that includes module.
Returns:
Full module name and short symbol name.
"""
name_segments = full_name.split('.')
return '.'.join(name_segments[:-1]), name_segments[-1]
def _join_modules(module1, module2):
"""Concatenate 2 module components.
Args:
module1: First module to join.
module2: Second module to join.
Returns:
Given two modules aaa.bbb and ccc.ddd, returns a joined
module aaa.bbb.ccc.ddd.
"""
if not module1:
return module2
if not module2:
return module1
return '%s.%s' % (module1, module2)
def add_imports_for_symbol(
module_code_builder,
symbol,
source_module_name,
source_name,
api_name,
api_version,
output_module_prefix=''):
"""Add imports for the given symbol to `module_code_builder`.
Args:
module_code_builder: `_ModuleInitCodeBuilder` instance.
symbol: A symbol.
source_module_name: Module that we can import the symbol from.
source_name: Name we can import the symbol with.
api_name: API name. Currently, must be either `tensorflow` or `estimator`.
api_version: API version.
output_module_prefix: Prefix to prepend to destination module.
"""
names_attr_v2 = API_ATTRS[api_name].names
constants_attr_v2 = API_ATTRS[api_name].constants
if api_version == 1:
names_attr = API_ATTRS_V1[api_name].names
constants_attr = API_ATTRS_V1[api_name].constants
else:
names_attr = names_attr_v2
constants_attr = constants_attr_v2
# If symbol is _tf_api_constants attribute, then add the constants.
if source_name == constants_attr:
for exports, name in symbol:
for export in exports:
dest_module, dest_name = _get_name_and_module(export)
dest_module = _join_modules(output_module_prefix, dest_module)
module_code_builder.add_import(
-1, dest_module, source_module_name, name, dest_name)
# If symbol has _tf_api_names attribute, then add import for it.
if (hasattr(symbol, '__dict__') and names_attr in symbol.__dict__):
# Get a list of all V2 names if we generate V1 API to check for
# deprecations.
exports_v2 = []
canonical_endpoint = None
if api_version == 1 and hasattr(symbol, names_attr_v2):
exports_v2 = getattr(symbol, names_attr_v2)
if exports_v2:
canonical_endpoint = exports_v2[0]
# Generate import statements for symbols.
for export in getattr(symbol, names_attr): # pylint: disable=protected-access
dest_module, dest_name = _get_name_and_module(export)
dest_module = _join_modules(output_module_prefix, dest_module)
# Add deprecated alias if only some of the endpoints are deprecated
# and symbol is not under compat.v*.
# TODO(annarev): handle deprecated class endpoints as well.
if (export not in exports_v2 and canonical_endpoint and
not dest_module.startswith(_COMPAT_MODULE_PREFIX) and
tf_inspect.isfunction(symbol)):
module_code_builder.add_deprecated_endpoint(
id(symbol), dest_module, source_module_name, source_name, dest_name,
canonical_endpoint)
else:
module_code_builder.add_import(
id(symbol), dest_module, source_module_name, source_name, dest_name)
def get_api_init_text(packages,
output_package,
api_name,
api_version,
compat_api_versions=None):
"""Get a map from destination module to __init__.py code for that module.
Args:
packages: Base python packages containing python with target tf_export
decorators.
output_package: Base output python package where generated API will be
added.
api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
api_version: API version you want to generate (1 or 2).
compat_api_versions: Additional API versions to generate under compat/
directory.
Returns:
A dictionary where
key: (string) destination module (for e.g. tf or tf.consts).
value: (string) text that should be in __init__.py files for
corresponding modules.
"""
if compat_api_versions is None:
compat_api_versions = []
module_code_builder = _ModuleInitCodeBuilder(output_package)
# Traverse over everything imported above. Specifically,
# we want to traverse over TensorFlow Python modules.
def in_packages(m):
return any(package in m for package in packages)
for module in list(sys.modules.values()):
# Only look at tensorflow modules.
if (not module or not hasattr(module, '__name__') or
module.__name__ is None or not in_packages(module.__name__)):
continue
# Do not generate __init__.py files for contrib modules for now.
if (('.contrib.' in module.__name__ or module.__name__.endswith('.contrib'))
and '.lite' not in module.__name__):
continue
for module_contents_name in dir(module):
if (module.__name__ + '.' + module_contents_name
in _SYMBOLS_TO_SKIP_EXPLICITLY):
continue
attr = getattr(module, module_contents_name)
_, attr = tf_decorator.unwrap(attr)
add_imports_for_symbol(
module_code_builder, attr, module.__name__, module_contents_name,
api_name, api_version)
for compat_api_version in compat_api_versions:
add_imports_for_symbol(
module_code_builder, attr, module.__name__, module_contents_name,
api_name, compat_api_version,
_COMPAT_MODULE_TEMPLATE % compat_api_version)
return module_code_builder.build()
def get_module(dir_path, relative_to_dir):
"""Get module that corresponds to path relative to relative_to_dir.
Args:
dir_path: Path to directory.
relative_to_dir: Get module relative to this directory.
Returns:
Name of module that corresponds to the given directory.
"""
dir_path = dir_path[len(relative_to_dir):]
# Convert path separators to '/' for easier parsing below.
dir_path = dir_path.replace(os.sep, '/')
return dir_path.replace('/', '.').strip('.')
def get_module_docstring(module_name, package, api_name):
"""Get docstring for the given module.
This method looks for docstring in the following order:
1. Checks if module has a docstring specified in doc_srcs.
2. Checks if module has a docstring source module specified
in doc_srcs. If it does, gets docstring from that module.
3. Checks if module with module_name exists under base package.
If it does, gets docstring from that module.
4. Returns a default docstring.
Args:
module_name: module name relative to tensorflow
(excluding 'tensorflow.' prefix) to get a docstring for.
package: Base python package containing python with target tf_export
decorators.
api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
Returns:
One-line docstring to describe the module.
"""
# Get the same module doc strings for any version. That is, for module
# 'compat.v1.foo' we can get docstring from module 'foo'.
for version in _API_VERSIONS:
compat_prefix = _COMPAT_MODULE_TEMPLATE % version
if module_name.startswith(compat_prefix):
module_name = module_name[len(compat_prefix):].strip('.')
# Module under base package to get a docstring from.
docstring_module_name = module_name
doc_sources = doc_srcs.get_doc_sources(api_name)
if module_name in doc_sources:
docsrc = doc_sources[module_name]
if docsrc.docstring:
return docsrc.docstring
if docsrc.docstring_module_name:
docstring_module_name = docsrc.docstring_module_name
docstring_module_name = package + '.' + docstring_module_name
if (docstring_module_name in sys.modules and
sys.modules[docstring_module_name].__doc__):
return sys.modules[docstring_module_name].__doc__
return 'Public API for tf.%s namespace.' % module_name
def create_api_files(output_files, packages, root_init_template, output_dir,
output_package, api_name, api_version,
compat_api_versions, compat_init_templates):
"""Creates __init__.py files for the Python API.
Args:
output_files: List of __init__.py file paths to create.
packages: Base python packages containing python with target tf_export
decorators.
root_init_template: Template for top-level __init__.py file.
"# API IMPORTS PLACEHOLDER" comment in the template file will be replaced
with imports.
output_dir: output API root directory.
output_package: Base output package where generated API will be added.
api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
api_version: API version to generate (`v1` or `v2`).
compat_api_versions: Additional API versions to generate in compat/
subdirectory.
compat_init_templates: List of templates for top level compat init files
in the same order as compat_api_versions.
Raises:
ValueError: if output_files list is missing a required file.
"""
module_name_to_file_path = {}
for output_file in output_files:
module_name = get_module(os.path.dirname(output_file), output_dir)
module_name_to_file_path[module_name] = os.path.normpath(output_file)
# Create file for each expected output in genrule.
for module, file_path in module_name_to_file_path.items():
if not os.path.isdir(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
open(file_path, 'a').close()
module_text_map = get_api_init_text(packages, output_package, api_name,
api_version, compat_api_versions)
# Add imports to output files.
missing_output_files = []
# Root modules are "" and "compat.v*".
root_module = ''
compat_module_to_template = {
_COMPAT_MODULE_TEMPLATE % v: t
for v, t in zip(compat_api_versions, compat_init_templates)
}
for module, text in module_text_map.items():
# Make sure genrule output file list is in sync with API exports.
if module not in module_name_to_file_path:
module_file_path = '"%s/__init__.py"' % (
module.replace('.', '/'))
missing_output_files.append(module_file_path)
continue
contents = ''
if module == root_module and root_init_template:
# Read base init file for root module
with open(root_init_template, 'r') as root_init_template_file:
contents = root_init_template_file.read()
contents = contents.replace('# API IMPORTS PLACEHOLDER', text)
elif module in compat_module_to_template:
# Read base init file for compat module
with open(compat_module_to_template[module], 'r') as init_template_file:
contents = init_template_file.read()
contents = contents.replace('# API IMPORTS PLACEHOLDER', text)
else:
contents = (
_GENERATED_FILE_HEADER % get_module_docstring(
module, packages[0], api_name) + text + _GENERATED_FILE_FOOTER)
with open(module_name_to_file_path[module], 'w') as fp:
fp.write(contents)
if missing_output_files:
raise ValueError(
"""Missing outputs for genrule:\n%s. Be sure to add these targets to
tensorflow/python/tools/api/generator/api_init_files_v1.bzl and
tensorflow/python/tools/api/generator/api_init_files.bzl (tensorflow repo), or
tensorflow_estimator/python/estimator/api/api_gen.bzl (estimator repo)"""
% ',\n'.join(sorted(missing_output_files)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'outputs', metavar='O', type=str, nargs='+',
help='If a single file is passed in, then we we assume it contains a '
'semicolon-separated list of Python files that we expect this script to '
'output. If multiple files are passed in, then we assume output files '
'are listed directly as arguments.')
parser.add_argument(
'--packages',
default=_DEFAULT_PACKAGE,
type=str,
help='Base packages that import modules containing the target tf_export '
'decorators.')
parser.add_argument(
'--root_init_template', default='', type=str,
help='Template for top level __init__.py file. '
'"#API IMPORTS PLACEHOLDER" comment will be replaced with imports.')
parser.add_argument(
'--apidir', type=str, required=True,
help='Directory where generated output files are placed. '
'gendir should be a prefix of apidir. Also, apidir '
'should be a prefix of every directory in outputs.')
parser.add_argument(
'--apiname', required=True, type=str,
choices=API_ATTRS.keys(),
help='The API you want to generate.')
parser.add_argument(
'--apiversion', default=2, type=int,
choices=_API_VERSIONS,
help='The API version you want to generate.')
parser.add_argument(
'--compat_apiversions', default=[], type=int, action='append',
help='Additional versions to generate in compat/ subdirectory. '
'If set to 0, then no additional version would be generated.')
parser.add_argument(
'--compat_init_templates', default=[], type=str, action='append',
help='Templates for top-level __init__ files under compat modules. '
'The list of init file templates must be in the same order as '
'list of versions passed with compat_apiversions.')
parser.add_argument(
'--output_package', default='tensorflow', type=str,
help='Root output package.')
args = parser.parse_args()
if len(args.outputs) == 1:
# If we only get a single argument, then it must be a file containing
# list of outputs.
with open(args.outputs[0]) as output_list_file:
outputs = [line.strip() for line in output_list_file.read().split(';')]
else:
outputs = args.outputs
# Populate `sys.modules` with modules containing tf_export().
packages = args.packages.split(',')
for package in packages:
importlib.import_module(package)
create_api_files(outputs, packages, args.root_init_template, args.apidir,
args.output_package, args.apiname, args.apiversion,
args.compat_apiversions, args.compat_init_templates)
if __name__ == '__main__':
main()
|
{
"content_hash": "e6daa668874a7ffbc09452b7086b3007",
"timestamp": "",
"source": "github",
"line_count": 601,
"max_line_length": 84,
"avg_line_length": 38.55740432612313,
"alnum_prop": 0.6690976567557071,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "c11900d397b76dd52ac380501e49b558ee7859bc",
"size": "23861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/tools/api/generator/create_python_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
}
|
from docx.api import Document # noqa
__version__ = '0.8.6'
# register custom Part classes with opc package reader
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from docx.opc.part import PartFactory
from docx.opc.parts.coreprops import CorePropertiesPart
from docx.parts.document import DocumentPart
from docx.parts.image import ImagePart
from docx.parts.numbering import NumberingPart
from docx.parts.settings import SettingsPart
from docx.parts.styles import StylesPart
def part_class_selector(content_type, reltype):
if reltype == RT.IMAGE:
return ImagePart
return None
PartFactory.part_class_selector = part_class_selector
PartFactory.part_type_for[CT.OPC_CORE_PROPERTIES] = CorePropertiesPart
PartFactory.part_type_for[CT.WML_DOCUMENT_MAIN] = DocumentPart
PartFactory.part_type_for[CT.WML_NUMBERING] = NumberingPart
PartFactory.part_type_for[CT.WML_SETTINGS] = SettingsPart
PartFactory.part_type_for[CT.WML_STYLES] = StylesPart
del (
CT, CorePropertiesPart, DocumentPart, NumberingPart, PartFactory,
StylesPart, part_class_selector
)
|
{
"content_hash": "584d14ea96f12e182dfd8dd9092599b5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 31.485714285714284,
"alnum_prop": 0.7912885662431942,
"repo_name": "thebongy/MakeMyOutputs",
"id": "cfa48729d129f511b147ae1470e094150be96489",
"size": "1121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docx/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "449173"
}
],
"symlink_target": ""
}
|
"""The tests for the MQTT component."""
import asyncio
import ssl
import unittest
from unittest import mock
import pytest
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.const import (
ATTR_DOMAIN, ATTR_SERVICE, EVENT_CALL_SERVICE, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component,
fire_mqtt_message, get_test_home_assistant, mock_coro, mock_mqtt_component,
threadsafe_coroutine_factory)
@pytest.fixture
def mock_MQTT():
"""Make sure connection is established."""
with mock.patch('homeassistant.components.mqtt.MQTT') as mock_MQTT:
mock_MQTT.return_value.async_connect.return_value = mock_coro(True)
yield mock_MQTT
@asyncio.coroutine
def async_mock_mqtt_client(hass, config=None):
"""Mock the MQTT paho client."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with mock.patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
return mock_client()
mock_mqtt_client = threadsafe_coroutine_factory(async_mock_mqtt_client)
# pylint: disable=invalid-name
class TestMQTTComponent(unittest.TestCase):
"""Test the MQTT component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
self.calls = []
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@callback
def record_calls(self, *args):
"""Record calls."""
self.calls.append(args)
def aiohttp_client_stops_on_home_assistant_start(self):
"""Test if client stops on HA stop."""
self.hass.bus.fire(EVENT_HOMEASSISTANT_STOP)
self.hass.block_till_done()
assert self.hass.data['mqtt'].async_disconnect.called
def test_publish_calls_service(self):
"""Test the publishing of call to services."""
self.hass.bus.listen_once(EVENT_CALL_SERVICE, self.record_calls)
mqtt.publish(self.hass, 'test-topic', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'test-topic' == \
self.calls[0][0].data['service_data'][mqtt.ATTR_TOPIC]
assert 'test-payload' == \
self.calls[0][0].data['service_data'][mqtt.ATTR_PAYLOAD]
def test_service_call_without_topic_does_not_publish(self):
"""Test the service call if topic is missing."""
self.hass.bus.fire(EVENT_CALL_SERVICE, {
ATTR_DOMAIN: mqtt.DOMAIN,
ATTR_SERVICE: mqtt.SERVICE_PUBLISH
})
self.hass.block_till_done()
assert not self.hass.data['mqtt'].async_publish.called
def test_service_call_with_template_payload_renders_template(self):
"""Test the service call with rendered template.
If 'payload_template' is provided and 'payload' is not, then render it.
"""
mqtt.publish_template(self.hass, "test/topic", "{{ 1+1 }}")
self.hass.block_till_done()
assert self.hass.data['mqtt'].async_publish.called
assert self.hass.data['mqtt'].async_publish.call_args[0][1] == "2"
def test_service_call_with_payload_doesnt_render_template(self):
"""Test the service call with unrendered template.
If both 'payload' and 'payload_template' are provided then fail.
"""
payload = "not a template"
payload_template = "a template"
with pytest.raises(vol.Invalid):
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template
}, blocking=True)
assert not self.hass.data['mqtt'].async_publish.called
def test_service_call_with_ascii_qos_retain_flags(self):
"""Test the service call with args that can be misinterpreted.
Empty payload message and ascii formatted qos and retain flags.
"""
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: "",
mqtt.ATTR_QOS: '2',
mqtt.ATTR_RETAIN: 'no'
}, blocking=True)
assert self.hass.data['mqtt'].async_publish.called
assert self.hass.data['mqtt'].async_publish.call_args[0][2] == 2
assert not self.hass.data['mqtt'].async_publish.call_args[0][3]
def test_validate_topic(self):
"""Test topic name/filter validation."""
# Invalid UTF-8, must not contain U+D800 to U+DFFF.
with pytest.raises(vol.Invalid):
mqtt.valid_topic('\ud800')
with pytest.raises(vol.Invalid):
mqtt.valid_topic('\udfff')
# Topic MUST NOT be empty
with pytest.raises(vol.Invalid):
mqtt.valid_topic('')
# Topic MUST NOT be longer than 65535 encoded bytes.
with pytest.raises(vol.Invalid):
mqtt.valid_topic('ü' * 32768)
# UTF-8 MUST NOT include null character
with pytest.raises(vol.Invalid):
mqtt.valid_topic('bad\0one')
# Topics "SHOULD NOT" include these special characters
# (not MUST NOT, RFC2119). The receiver MAY close the connection.
mqtt.valid_topic('\u0001')
mqtt.valid_topic('\u001F')
mqtt.valid_topic('\u009F')
mqtt.valid_topic('\u009F')
mqtt.valid_topic('\uffff')
def test_validate_subscribe_topic(self):
"""Test invalid subscribe topics."""
mqtt.valid_subscribe_topic('#')
mqtt.valid_subscribe_topic('sport/#')
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic('sport/#/')
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic('foo/bar#')
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic('foo/#/bar')
mqtt.valid_subscribe_topic('+')
mqtt.valid_subscribe_topic('+/tennis/#')
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic('sport+')
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic('sport+/')
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic('sport/+1')
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic('sport/+#')
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic('bad+topic')
mqtt.valid_subscribe_topic('sport/+/player1')
mqtt.valid_subscribe_topic('/finance')
mqtt.valid_subscribe_topic('+/+')
mqtt.valid_subscribe_topic('$SYS/#')
def test_validate_publish_topic(self):
"""Test invalid publish topics."""
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic('pub+')
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic('pub/+')
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic('1#')
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic('bad+topic')
mqtt.valid_publish_topic('//')
# Topic names beginning with $ SHOULD NOT be used, but can
mqtt.valid_publish_topic('$SYS/')
def test_entity_device_info_schema(self):
"""Test MQTT entity device info validation."""
# just identifier
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({
'identifiers': ['abcd']
})
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({
'identifiers': 'abcd'
})
# just connection
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({
'connections': [
['mac', '02:5b:26:a8:dc:12'],
]
})
# full device info
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({
'identifiers': ['helloworld', 'hello'],
'connections': [
["mac", "02:5b:26:a8:dc:12"],
["zigbee", "zigbee_id"],
],
'manufacturer': 'Whatever',
'name': 'Beer',
'model': 'Glass',
'sw_version': '0.1-beta',
})
# full device info with via_hub
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({
'identifiers': ['helloworld', 'hello'],
'connections': [
["mac", "02:5b:26:a8:dc:12"],
["zigbee", "zigbee_id"],
],
'manufacturer': 'Whatever',
'name': 'Beer',
'model': 'Glass',
'sw_version': '0.1-beta',
'via_hub': 'test-hub',
})
# no identifiers
with pytest.raises(vol.Invalid):
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({
'manufacturer': 'Whatever',
'name': 'Beer',
'model': 'Glass',
'sw_version': '0.1-beta',
})
# empty identifiers
with pytest.raises(vol.Invalid):
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({
'identifiers': [],
'connections': [],
'name': 'Beer',
})
# pylint: disable=invalid-name
class TestMQTTCallbacks(unittest.TestCase):
"""Test the MQTT callbacks."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_client(self.hass)
self.calls = []
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@callback
def record_calls(self, *args):
"""Record calls."""
self.calls.append(args)
def aiohttp_client_starts_on_home_assistant_mqtt_setup(self):
"""Test if client is connected after mqtt init on bootstrap."""
assert self.hass.data['mqtt']._mqttc.connect.call_count == 1
def test_receiving_non_utf8_message_gets_logged(self):
"""Test receiving a non utf8 encoded message."""
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
with self.assertLogs(level='WARNING') as test_handle:
fire_mqtt_message(self.hass, 'test-topic', b'\x9a')
self.hass.block_till_done()
assert \
"WARNING:homeassistant.components.mqtt:Can't decode payload " \
"b'\\x9a' on test-topic with encoding utf-8" in \
test_handle.output[0]
def test_all_subscriptions_run_when_decode_fails(self):
"""Test all other subscriptions still run when decode fails for one."""
mqtt.subscribe(self.hass, 'test-topic', self.record_calls,
encoding='ascii')
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', '°C')
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_subscribe_topic(self):
"""Test the subscription of a topic."""
unsub = mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'test-topic' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
unsub()
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
def test_subscribe_topic_not_match(self):
"""Test if subscribed topic is not a match."""
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_subscribe_topic_level_wildcard(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'test-topic/bier/on' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
def test_subscribe_topic_level_wildcard_no_subtree_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier', 'test-payload')
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_subscribe_topic_level_wildcard_root_topic_no_subtree_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic-123', 'test-payload')
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_subscribe_topic_subtree_wildcard_subtree_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'test-topic/bier/on' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
def test_subscribe_topic_subtree_wildcard_root_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'test-topic' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
def test_subscribe_topic_subtree_wildcard_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_subscribe_topic_level_wildcard_and_wildcard_root_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, '+/test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'hi/test-topic', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'hi/test-topic' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
def test_subscribe_topic_level_wildcard_and_wildcard_subtree_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, '+/test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'hi/test-topic/here-iam', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert 'hi/test-topic/here-iam' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
def test_subscribe_topic_level_wildcard_and_wildcard_level_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, '+/test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'hi/here-iam/test-topic', 'test-payload')
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_subscribe_topic_level_wildcard_and_wildcard_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, '+/test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'hi/another-test-topic', 'test-payload')
self.hass.block_till_done()
assert 0 == len(self.calls)
def test_subscribe_topic_sys_root(self):
"""Test the subscription of $ root topics."""
mqtt.subscribe(self.hass, '$test-topic/subtree/on', self.record_calls)
fire_mqtt_message(self.hass, '$test-topic/subtree/on', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert '$test-topic/subtree/on' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
def test_subscribe_topic_sys_root_and_wildcard_topic(self):
"""Test the subscription of $ root and wildcard topics."""
mqtt.subscribe(self.hass, '$test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, '$test-topic/some-topic', 'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert '$test-topic/some-topic' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
def test_subscribe_topic_sys_root_and_wildcard_subtree_topic(self):
"""Test the subscription of $ root and wildcard subtree topics."""
mqtt.subscribe(self.hass, '$test-topic/subtree/#', self.record_calls)
fire_mqtt_message(self.hass, '$test-topic/subtree/some-topic',
'test-payload')
self.hass.block_till_done()
assert 1 == len(self.calls)
assert '$test-topic/subtree/some-topic' == self.calls[0][0]
assert 'test-payload' == self.calls[0][1]
def test_subscribe_special_characters(self):
"""Test the subscription to topics with special characters."""
topic = '/test-topic/$(.)[^]{-}'
payload = 'p4y.l[]a|> ?'
mqtt.subscribe(self.hass, topic, self.record_calls)
fire_mqtt_message(self.hass, topic, payload)
self.hass.block_till_done()
assert 1 == len(self.calls)
assert topic == self.calls[0][0]
assert payload == self.calls[0][1]
def test_mqtt_failed_connection_results_in_disconnect(self):
"""Test if connection failure leads to disconnect."""
for result_code in range(1, 6):
self.hass.data['mqtt']._mqttc = mock.MagicMock()
self.hass.data['mqtt']._mqtt_on_connect(
None, {'topics': {}}, 0, result_code)
assert self.hass.data['mqtt']._mqttc.disconnect.called
def test_mqtt_disconnect_tries_no_reconnect_on_stop(self):
"""Test the disconnect tries."""
self.hass.data['mqtt']._mqtt_on_disconnect(None, None, 0)
assert not self.hass.data['mqtt']._mqttc.reconnect.called
@mock.patch('homeassistant.components.mqtt.time.sleep')
def test_mqtt_disconnect_tries_reconnect(self, mock_sleep):
"""Test the re-connect tries."""
self.hass.data['mqtt'].subscriptions = [
mqtt.Subscription('test/progress', None, 0),
mqtt.Subscription('test/progress', None, 1),
mqtt.Subscription('test/topic', None, 2),
]
self.hass.data['mqtt']._mqttc.reconnect.side_effect = [1, 1, 1, 0]
self.hass.data['mqtt']._mqtt_on_disconnect(None, None, 1)
assert self.hass.data['mqtt']._mqttc.reconnect.called
assert 4 == len(self.hass.data['mqtt']._mqttc.reconnect.mock_calls)
assert [1, 2, 4] == \
[call[1][0] for call in mock_sleep.mock_calls]
def test_retained_message_on_subscribe_received(self):
"""Test every subscriber receives retained message on subscribe."""
def side_effect(*args):
async_fire_mqtt_message(self.hass, 'test/state', 'online')
return 0, 0
self.hass.data['mqtt']._mqttc.subscribe.side_effect = side_effect
calls_a = mock.MagicMock()
mqtt.subscribe(self.hass, 'test/state', calls_a)
self.hass.block_till_done()
assert calls_a.called
calls_b = mock.MagicMock()
mqtt.subscribe(self.hass, 'test/state', calls_b)
self.hass.block_till_done()
assert calls_b.called
def test_not_calling_unsubscribe_with_active_subscribers(self):
"""Test not calling unsubscribe() when other subscribers are active."""
unsub = mqtt.subscribe(self.hass, 'test/state', None)
mqtt.subscribe(self.hass, 'test/state', None)
self.hass.block_till_done()
assert self.hass.data['mqtt']._mqttc.subscribe.called
unsub()
self.hass.block_till_done()
assert not self.hass.data['mqtt']._mqttc.unsubscribe.called
def test_restore_subscriptions_on_reconnect(self):
"""Test subscriptions are restored on reconnect."""
mqtt.subscribe(self.hass, 'test/state', None)
self.hass.block_till_done()
assert self.hass.data['mqtt']._mqttc.subscribe.call_count == 1
self.hass.data['mqtt']._mqtt_on_disconnect(None, None, 0)
self.hass.data['mqtt']._mqtt_on_connect(None, None, None, 0)
self.hass.block_till_done()
assert self.hass.data['mqtt']._mqttc.subscribe.call_count == 2
def test_restore_all_active_subscriptions_on_reconnect(self):
"""Test active subscriptions are restored correctly on reconnect."""
self.hass.data['mqtt']._mqttc.subscribe.side_effect = (
(0, 1), (0, 2), (0, 3), (0, 4)
)
unsub = mqtt.subscribe(self.hass, 'test/state', None, qos=2)
mqtt.subscribe(self.hass, 'test/state', None)
mqtt.subscribe(self.hass, 'test/state', None, qos=1)
self.hass.block_till_done()
expected = [
mock.call('test/state', 2),
mock.call('test/state', 0),
mock.call('test/state', 1)
]
assert self.hass.data['mqtt']._mqttc.subscribe.mock_calls == \
expected
unsub()
self.hass.block_till_done()
assert self.hass.data['mqtt']._mqttc.unsubscribe.call_count == \
0
self.hass.data['mqtt']._mqtt_on_disconnect(None, None, 0)
self.hass.data['mqtt']._mqtt_on_connect(None, None, None, 0)
self.hass.block_till_done()
expected.append(mock.call('test/state', 1))
assert self.hass.data['mqtt']._mqttc.subscribe.mock_calls == \
expected
@asyncio.coroutine
def test_setup_embedded_starts_with_no_config(hass):
"""Test setting up embedded server with no config."""
client_config = ('localhost', 1883, 'user', 'pass', None, '3.1.1')
with mock.patch('homeassistant.components.mqtt.server.async_start',
return_value=mock_coro(
return_value=(True, client_config))
) as _start:
yield from async_mock_mqtt_client(hass, {})
assert _start.call_count == 1
@asyncio.coroutine
def test_setup_embedded_with_embedded(hass):
"""Test setting up embedded server with no config."""
client_config = ('localhost', 1883, 'user', 'pass', None, '3.1.1')
with mock.patch('homeassistant.components.mqtt.server.async_start',
return_value=mock_coro(
return_value=(True, client_config))
) as _start:
_start.return_value = mock_coro(return_value=(True, client_config))
yield from async_mock_mqtt_client(hass, {'embedded': None})
assert _start.call_count == 1
async def test_setup_fails_if_no_connect_broker(hass):
"""Test for setup failure if connection to broker is missing."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker'
})
with mock.patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect = lambda *args: 1
assert not await mqtt.async_setup_entry(hass, entry)
async def test_setup_uses_certificate_on_certificate_set_to_auto(
hass, mock_MQTT):
"""Test setup uses bundled certs when certificate is set to auto."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker',
'certificate': 'auto'
})
assert await mqtt.async_setup_entry(hass, entry)
assert mock_MQTT.called
import requests.certs
expectedCertificate = requests.certs.where()
assert mock_MQTT.mock_calls[0][2]['certificate'] == expectedCertificate
async def test_setup_does_not_use_certificate_on_mqtts_port(hass, mock_MQTT):
"""Test setup doesn't use bundled certs when ssl set."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker',
'port': 8883
})
assert await mqtt.async_setup_entry(hass, entry)
assert mock_MQTT.called
assert mock_MQTT.mock_calls[0][2]['port'] == 8883
import requests.certs
mqttsCertificateBundle = requests.certs.where()
assert mock_MQTT.mock_calls[0][2]['port'] != mqttsCertificateBundle
async def test_setup_without_tls_config_uses_tlsv1_under_python36(
hass, mock_MQTT):
"""Test setup defaults to TLSv1 under python3.6."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker',
})
assert await mqtt.async_setup_entry(hass, entry)
assert mock_MQTT.called
import sys
if sys.hexversion >= 0x03060000:
expectedTlsVersion = ssl.PROTOCOL_TLS # pylint: disable=no-member
else:
expectedTlsVersion = ssl.PROTOCOL_TLSv1
assert mock_MQTT.mock_calls[0][2]['tls_version'] == expectedTlsVersion
async def test_setup_with_tls_config_uses_tls_version1_2(hass, mock_MQTT):
"""Test setup uses specified TLS version."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker',
'tls_version': '1.2'
})
assert await mqtt.async_setup_entry(hass, entry)
assert mock_MQTT.called
assert mock_MQTT.mock_calls[0][2]['tls_version'] == ssl.PROTOCOL_TLSv1_2
async def test_setup_with_tls_config_of_v1_under_python36_only_uses_v1(
hass, mock_MQTT):
"""Test setup uses TLSv1.0 if explicitly chosen."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker',
'tls_version': '1.0'
})
assert await mqtt.async_setup_entry(hass, entry)
assert mock_MQTT.called
assert mock_MQTT.mock_calls[0][2]['tls_version'] == ssl.PROTOCOL_TLSv1
@asyncio.coroutine
def test_birth_message(hass):
"""Test sending birth message."""
mqtt_client = yield from async_mock_mqtt_client(hass, {
mqtt.CONF_BROKER: 'mock-broker',
mqtt.CONF_BIRTH_MESSAGE: {mqtt.ATTR_TOPIC: 'birth',
mqtt.ATTR_PAYLOAD: 'birth'}
})
calls = []
mqtt_client.publish.side_effect = lambda *args: calls.append(args)
hass.data['mqtt']._mqtt_on_connect(None, None, 0, 0)
yield from hass.async_block_till_done()
assert calls[-1] == ('birth', 'birth', 0, False)
@asyncio.coroutine
def test_mqtt_subscribes_topics_on_connect(hass):
"""Test subscription to topic on connect."""
mqtt_client = yield from async_mock_mqtt_client(hass)
hass.data['mqtt'].subscriptions = [
mqtt.Subscription('topic/test', None),
mqtt.Subscription('home/sensor', None, 2),
mqtt.Subscription('still/pending', None),
mqtt.Subscription('still/pending', None, 1),
]
hass.add_job = mock.MagicMock()
hass.data['mqtt']._mqtt_on_connect(None, None, 0, 0)
yield from hass.async_block_till_done()
assert mqtt_client.disconnect.call_count == 0
expected = {
'topic/test': 0,
'home/sensor': 2,
'still/pending': 1
}
calls = {call[1][1]: call[1][2] for call in hass.add_job.mock_calls}
assert calls == expected
async def test_setup_fails_without_config(hass):
"""Test if the MQTT component fails to load with no config."""
assert not await async_setup_component(hass, mqtt.DOMAIN, {})
async def test_message_callback_exception_gets_logged(hass, caplog):
"""Test exception raised by message handler."""
await async_mock_mqtt_component(hass)
@callback
def bad_handler(*args):
"""Record calls."""
raise Exception('This is a bad message callback')
await mqtt.async_subscribe(hass, 'test-topic', bad_handler)
async_fire_mqtt_message(hass, 'test-topic', 'test')
await hass.async_block_till_done()
assert \
"Exception in bad_handler when handling msg on 'test-topic':" \
" 'test'" in caplog.text
async def test_mqtt_ws_subscription(hass, hass_ws_client):
"""Test MQTT websocket subscription."""
await async_mock_mqtt_component(hass)
client = await hass_ws_client(hass)
await client.send_json({
'id': 5,
'type': 'mqtt/subscribe',
'topic': 'test-topic',
})
response = await client.receive_json()
assert response['success']
async_fire_mqtt_message(hass, 'test-topic', 'test1')
async_fire_mqtt_message(hass, 'test-topic', 'test2')
response = await client.receive_json()
assert response['event']['topic'] == 'test-topic'
assert response['event']['payload'] == 'test1'
response = await client.receive_json()
assert response['event']['topic'] == 'test-topic'
assert response['event']['payload'] == 'test2'
# Unsubscribe
await client.send_json({
'id': 8,
'type': 'unsubscribe_events',
'subscription': 5,
})
response = await client.receive_json()
assert response['success']
|
{
"content_hash": "364fb6694a2590565f0ebadb7cb508f2",
"timestamp": "",
"source": "github",
"line_count": 803,
"max_line_length": 79,
"avg_line_length": 36.906600249066,
"alnum_prop": 0.614387906600081,
"repo_name": "nugget/home-assistant",
"id": "81941173d684aa44881b85d3bdad322643fad573",
"size": "29638",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14492390"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17526"
}
],
"symlink_target": ""
}
|
import requests
import time
while True:
response = requests.get('http://ssmale.ddns.net')
if response.status_code == requests.codes.ok:
print('Server Up')
else:
print('Server Down')
time.sleep(10)
|
{
"content_hash": "9e10ad9375c8ffe0531240fe7d6af74d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 53,
"avg_line_length": 22.9,
"alnum_prop": 0.6419213973799127,
"repo_name": "SSmale/StatusPi",
"id": "f2aed8a2383d94012620b207dba1dfd1a2240eca",
"size": "310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Test Scripts/PingTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1737"
}
],
"symlink_target": ""
}
|
import numpy as np
from ._base import _BaseImputer
from ..utils.validation import FLOAT_DTYPES
from ..metrics import pairwise_distances_chunked
from ..metrics.pairwise import _NAN_METRICS
from ..neighbors._base import _get_weights
from ..neighbors._base import _check_weights
from ..utils import is_scalar_nan
from ..utils._mask import _get_mask
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
class KNNImputer(_BaseImputer):
"""Imputation for completing missing values using k-Nearest Neighbors.
Each sample's missing values are imputed using the mean value from
`n_neighbors` nearest neighbors found in the training set. Two samples are
close if the features that neither is missing are close.
Read more in the :ref:`User Guide <knnimpute>`.
.. versionadded:: 0.22
Parameters
----------
missing_values : int, float, str, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to np.nan, since `pd.NA` will be converted to np.nan.
n_neighbors : int, default=5
Number of neighboring samples to use for imputation.
weights : {'uniform', 'distance'} or callable, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood are
weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- callable : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
metric : {'nan_euclidean'} or callable, default='nan_euclidean'
Distance metric for searching neighbors. Possible values:
- 'nan_euclidean'
- callable : a user-defined function which conforms to the definition
of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
accepts two arrays, X and Y, and a `missing_values` keyword in
`kwds` and returns a scalar distance value.
copy : bool, default=True
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible.
add_indicator : bool, default=False
If True, a :class:`MissingIndicator` transform will stack onto the
output of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on the
missing indicator even if there are missing values at transform/test
time.
Attributes
----------
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
References
----------
* Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
no. 6, 2001 Pages 520-525.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import KNNImputer
>>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
>>> imputer = KNNImputer(n_neighbors=2)
>>> imputer.fit_transform(X)
array([[1. , 2. , 4. ],
[3. , 4. , 3. ],
[5.5, 6. , 5. ],
[8. , 8. , 7. ]])
"""
@_deprecate_positional_args
def __init__(self, *, missing_values=np.nan, n_neighbors=5,
weights="uniform", metric="nan_euclidean", copy=True,
add_indicator=False):
super().__init__(
missing_values=missing_values,
add_indicator=add_indicator
)
self.n_neighbors = n_neighbors
self.weights = weights
self.metric = metric
self.copy = copy
def _calc_impute(self, dist_pot_donors, n_neighbors,
fit_X_col, mask_fit_X_col):
"""Helper function to impute a single column.
Parameters
----------
dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
Distance matrix between the receivers and potential donors from
training set. There must be at least one non-nan distance between
a receiver and a potential donor.
n_neighbors : int
Number of neighbors to consider.
fit_X_col : ndarray of shape (n_potential_donors,)
Column of potential donors from training set.
mask_fit_X_col : ndarray of shape (n_potential_donors,)
Missing mask for fit_X_col.
Returns
-------
imputed_values: ndarray of shape (n_receivers,)
Imputed values for receiver.
"""
# Get donors
donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1,
axis=1)[:, :n_neighbors]
# Get weight matrix from from distance matrix
donors_dist = dist_pot_donors[
np.arange(donors_idx.shape[0])[:, None], donors_idx]
weight_matrix = _get_weights(donors_dist, self.weights)
# fill nans with zeros
if weight_matrix is not None:
weight_matrix[np.isnan(weight_matrix)] = 0.0
# Retrieve donor values and calculate kNN average
donors = fit_X_col.take(donors_idx)
donors_mask = mask_fit_X_col.take(donors_idx)
donors = np.ma.array(donors, mask=donors_mask)
return np.ma.average(donors, axis=1, weights=weight_matrix).data
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
"""
# Check data integrity and calling arguments
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
if self.metric not in _NAN_METRICS and not callable(self.metric):
raise ValueError(
"The selected metric does not support NaN values")
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got {}".format(self.n_neighbors))
X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite,
copy=self.copy)
_check_weights(self.weights)
self._fit_X = X
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
super()._fit_indicator(self._mask_fit_X)
return self
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
check_is_fitted(self)
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite,
copy=self.copy, reset=False)
mask = _get_mask(X, self.missing_values)
mask_fit_X = self._mask_fit_X
valid_mask = ~np.all(mask_fit_X, axis=0)
X_indicator = super()._transform_indicator(mask)
# Removes columns where the training data is all nan
if not np.any(mask):
# No missing values in X
# Remove columns where the training data is all nan
return X[:, valid_mask]
row_missing_idx = np.flatnonzero(mask.any(axis=1))
non_missing_fix_X = np.logical_not(mask_fit_X)
# Maps from indices from X to indices in dist matrix
dist_idx_map = np.zeros(X.shape[0], dtype=int)
dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start:start + len(dist_chunk)]
# Find and impute missing by column
for col in range(X.shape[1]):
if not valid_mask[col]:
# column was all missing during training
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
# column has no missing values
continue
potential_donors_idx, = np.nonzero(non_missing_fix_X[:, col])
# receivers_idx are indices in X
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
# distances for samples that needed imputation for column
dist_subset = (dist_chunk[dist_idx_map[receivers_idx] - start]
[:, potential_donors_idx])
# receivers with all nan distances impute with mean
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(self._fit_X[:, col],
mask=mask_fit_X[:, col]).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
# all receivers imputed with mean
continue
# receivers with at least one defined distance
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = (dist_chunk[dist_idx_map[receivers_idx]
- start]
[:, potential_donors_idx])
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
value = self._calc_impute(
dist_subset,
n_neighbors,
self._fit_X[potential_donors_idx, col],
mask_fit_X[potential_donors_idx, col])
X[receivers_idx, col] = value
# process in fixed-memory chunks
gen = pairwise_distances_chunked(
X[row_missing_idx, :],
self._fit_X,
metric=self.metric,
missing_values=self.missing_values,
force_all_finite=force_all_finite,
reduce_func=process_chunk)
for chunk in gen:
# process_chunk modifies X in place. No return value.
pass
return super()._concatenate_indicator(X[:, valid_mask], X_indicator)
|
{
"content_hash": "7ea4da2facbcf9b86f974d910a2e2b14",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 78,
"avg_line_length": 38.861952861952865,
"alnum_prop": 0.5810951308265465,
"repo_name": "glemaitre/scikit-learn",
"id": "c4b407fdd66e7d694043d8f436cd066d8248f248",
"size": "11662",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "sklearn/impute/_knn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41025"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "10011694"
},
{
"name": "Shell",
"bytes": "44168"
}
],
"symlink_target": ""
}
|
"""Tests for the WLED switch platform."""
from unittest.mock import MagicMock
import pytest
from wled import WLEDConnectionError, WLEDError
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.components.wled.const import (
ATTR_DURATION,
ATTR_FADE,
ATTR_TARGET_BRIGHTNESS,
ATTR_UDP_PORT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
ENTITY_CATEGORY_CONFIG,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry
async def test_switch_state(
hass: HomeAssistant, init_integration: MockConfigEntry
) -> None:
"""Test the creation and values of the WLED switches."""
entity_registry = er.async_get(hass)
state = hass.states.get("switch.wled_rgb_light_nightlight")
assert state
assert state.attributes.get(ATTR_DURATION) == 60
assert state.attributes.get(ATTR_ICON) == "mdi:weather-night"
assert state.attributes.get(ATTR_TARGET_BRIGHTNESS) == 0
assert state.attributes.get(ATTR_FADE)
assert state.state == STATE_OFF
entry = entity_registry.async_get("switch.wled_rgb_light_nightlight")
assert entry
assert entry.unique_id == "aabbccddeeff_nightlight"
assert entry.entity_category == ENTITY_CATEGORY_CONFIG
state = hass.states.get("switch.wled_rgb_light_sync_send")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:upload-network-outline"
assert state.attributes.get(ATTR_UDP_PORT) == 21324
assert state.state == STATE_OFF
entry = entity_registry.async_get("switch.wled_rgb_light_sync_send")
assert entry
assert entry.unique_id == "aabbccddeeff_sync_send"
assert entry.entity_category == ENTITY_CATEGORY_CONFIG
state = hass.states.get("switch.wled_rgb_light_sync_receive")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:download-network-outline"
assert state.attributes.get(ATTR_UDP_PORT) == 21324
assert state.state == STATE_ON
entry = entity_registry.async_get("switch.wled_rgb_light_sync_receive")
assert entry
assert entry.unique_id == "aabbccddeeff_sync_receive"
assert entry.entity_category == ENTITY_CATEGORY_CONFIG
async def test_switch_change_state(
hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock
) -> None:
"""Test the change of state of the WLED switches."""
# Nightlight
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.wled_rgb_light_nightlight"},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.nightlight.call_count == 1
mock_wled.nightlight.assert_called_with(on=True)
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "switch.wled_rgb_light_nightlight"},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.nightlight.call_count == 2
mock_wled.nightlight.assert_called_with(on=False)
# Sync send
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.wled_rgb_light_sync_send"},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.sync.call_count == 1
mock_wled.sync.assert_called_with(send=True)
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "switch.wled_rgb_light_sync_send"},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.sync.call_count == 2
mock_wled.sync.assert_called_with(send=False)
# Sync receive
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "switch.wled_rgb_light_sync_receive"},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.sync.call_count == 3
mock_wled.sync.assert_called_with(receive=False)
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.wled_rgb_light_sync_receive"},
blocking=True,
)
await hass.async_block_till_done()
assert mock_wled.sync.call_count == 4
mock_wled.sync.assert_called_with(receive=True)
async def test_switch_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test error handling of the WLED switches."""
mock_wled.nightlight.side_effect = WLEDError
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.wled_rgb_light_nightlight"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("switch.wled_rgb_light_nightlight")
assert state
assert state.state == STATE_OFF
assert "Invalid response from API" in caplog.text
async def test_switch_connection_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test error handling of the WLED switches."""
mock_wled.nightlight.side_effect = WLEDConnectionError
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "switch.wled_rgb_light_nightlight"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("switch.wled_rgb_light_nightlight")
assert state
assert state.state == STATE_UNAVAILABLE
assert "Error communicating with API" in caplog.text
|
{
"content_hash": "adfdd0e59282314c7db94256e40f68cf",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 80,
"avg_line_length": 31.543478260869566,
"alnum_prop": 0.6857339765678843,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "7ba86960d2b6260db6e8eaa868013e9d0c95d483",
"size": "5804",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/wled/test_switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""
Summary measures for ergodic Markov chains
"""
__author__ = "Sergio J. Rey <srey@asu.edu>"
__all__ = ['steady_state', 'fmpt', 'var_fmpt']
import numpy as np
import numpy.linalg as la
def steady_state(P):
"""
Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : matrix
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
matrix
(k, 1), steady state distribution.
Examples
--------
Taken from Kemeny and Snell. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> p=np.matrix([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
matrix([[ 0.4],
[ 0.2],
[ 0.4]])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive).
"""
v, d = la.eig(np.transpose(P))
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
# normalize eigenvector corresponding to the eigenvalue 1
return d[:, i] / sum(d[:, i])
def fmpt(P):
"""
Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : matrix
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : matrix
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> p=np.matrix([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
matrix([[ 2.5 , 4. , 3.33333333],
[ 2.66666667, 5. , 2.66666667],
[ 3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in Kemeny and Snell (1976).
References
----------
.. [1] Kemeny, John, G. and J. Laurie Snell (1976) Finite Markov Chains.
Springer-Verlag. Berlin.
"""
A = np.zeros_like(P)
ss = steady_state(P)
k = ss.shape[0]
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag==0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return M
def var_fmpt(P):
"""
Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : matrix
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
matrix
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> p=np.matrix([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
matrix([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in Kemeny and Snell (1976).
"""
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return W - np.multiply(M, M)
|
{
"content_hash": "c4b93781a8bd972bcaa94b7665b8b290",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 80,
"avg_line_length": 27.70909090909091,
"alnum_prop": 0.5503062117235346,
"repo_name": "spreg-git/pysal",
"id": "56cca9a23eb2f4f5b4d2bf3b70d01b0cf8201556",
"size": "4572",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pysal/spatial_dynamics/ergodic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10152"
},
{
"name": "Makefile",
"bytes": "408"
},
{
"name": "Python",
"bytes": "2549924"
}
],
"symlink_target": ""
}
|
DEBUG = False
TEMPLATE_DEBUG = DEBUG
import os
BASE_PATH = os.path.dirname(__file__)
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_PATH + '/tracked.db'
}
}
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = BASE_PATH + '/media/'
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/media'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '0ym**1k5h^h-pot1m&%c+t2$tqhzsg*ha(#ir=nd4c67@h(!&!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'geo.middleware.GEMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/Users/richbs/Sites/tracked/geo/templates/'
)
TEMPLATE_CONTEXT_PROCESSORS = ('geo.context_processors.gmap_key_processor',
'django.core.context_processors.auth',
'django.core.context_processors.media',)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'geo',
)
# Extra stuff mytracks.net
FLICKR_KEY = 'b11d20ddf6862abefad67bd901d7f15d'
FLICKR_USER = "38584744@N00"
FLICKR_SECRET = ''
GOOGLE_MAPS_KEY = 'AIzaSyDuyEu9FOMi86_Wl4W8GQHqOEZElrKtZ_E'
|
{
"content_hash": "46311a1f74668811c01feda1ebc94da0",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 94,
"avg_line_length": 31.46875,
"alnum_prop": 0.7067196292618338,
"repo_name": "richbs/tracked",
"id": "44d6ce8f1674bee54cc019e8ca4ee49bbefb2fac",
"size": "3021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7916"
},
{
"name": "JavaScript",
"bytes": "19153"
},
{
"name": "Python",
"bytes": "42908"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class ShopifyAccount(ProviderAccount):
pass
class ShopifyProvider(OAuth2Provider):
id = 'shopify'
name = 'Shopify'
account_class = ShopifyAccount
@property
def is_per_user(self):
grant_options = getattr(settings, 'SOCIALACCOUNT_PROVIDERS', {}).get(
'shopify', {}).get('AUTH_PARAMS', {}).get('grant_options[]', '')
return grant_options.lower().strip() == 'per-user'
def get_auth_params(self, request, action):
ret = super(ShopifyProvider, self).get_auth_params(request, action)
shop = request.GET.get('shop', None)
if shop:
ret.update({'shop': shop})
return ret
def get_default_scope(self):
return ['read_orders', 'read_products']
def extract_uid(self, data):
if self.is_per_user:
return str(data['associated_user']['id'])
else:
return str(data['shop']['id'])
def extract_common_fields(self, data):
if self.is_per_user:
return dict(
email=data['associated_user']['email'],
first_name=data['associated_user']['first_name'],
last_name=data['associated_user']['last_name'],
)
else:
# See: https://docs.shopify.com/api/shop
# Without online mode, User is only available with Shopify Plus,
# email is the only common field
return dict(email=data['shop']['email'])
provider_classes = [ShopifyProvider]
|
{
"content_hash": "b29cee2410a7f33728389716d8bb3e13",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 32.23076923076923,
"alnum_prop": 0.6073985680190931,
"repo_name": "okwow123/djangol2",
"id": "bc34908325ff8daed43f79d439a5d1d1e6b7f0c4",
"size": "1676",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/shopify/provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "44659"
},
{
"name": "JavaScript",
"bytes": "3260"
},
{
"name": "Makefile",
"bytes": "694"
},
{
"name": "Python",
"bytes": "636751"
}
],
"symlink_target": ""
}
|
import time
import phenox as px
def main():
duration = 3.0
filename = "testsound.raw"
print("sound record({0} sec) starts after press ENTER".format(duration))
raw_input()
print("now recording...")
started = px.set_sound_recordquery(3.0)
if not started:
print("some problem occurred for sound recording. program ends.")
return
time.sleep(duration)
print("recording ended: start save raw sound file")
while True:
sound = px.get_sound(duration, restype='str')
if not sound:
continue
with open(filename, "wb") as f:
f.write(sound)
break
print("raw sound was saved to '{0}'".format(filename))
if __name__ == "__main__":
main()
|
{
"content_hash": "a48e286b10bc32fcfb9b757ef8cae7b1",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 76,
"avg_line_length": 24.966666666666665,
"alnum_prop": 0.5994659546061415,
"repo_name": "atsushisugiyama/phenox_python",
"id": "b0c6aa426002931dbb2c87f4b28c291c627a0d4d",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/get_sound.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25243"
}
],
"symlink_target": ""
}
|
import os
import sys
import imp
BASEDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
os.environ['DJANGO_SETTINGS_MODULE'] = 'project.development'
imp.load_source('django_binary', os.path.join(BASEDIR, 'bin', 'django'))
extensions = ['sphinx.ext.autodoc']
#templates_path = ['_templates']
#html_theme_path = ['_themes']
#html_static_path = ['_static']
source_suffix = '.rst'
master_doc = 'index'
project = u'Foo'
copyright = u'2014'
version = '1.0'
release = '1.0'
language = 'en'
html_title = "Foo"
unused_docs = []
exclude_trees = []
pygments_style = 'colorful'
html_theme = 'nature'
html_theme_options = {}
html_use_modindex = False
html_use_index = True
html_show_sourcelink = False
html_copy_source = False
html_file_suffix = '.html'
html_last_updated_fmt = '%b %d, %Y'
html_add_permalinks = False
#html_use_smartypants = True
html_additional_pages = {
# 'index': 'index.html',
}
|
{
"content_hash": "beb54e9062881b7e2e3d95071a82d973",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 23.973684210526315,
"alnum_prop": 0.6805708013172338,
"repo_name": "haltu/eca-eops",
"id": "26fa333a5d089b29e99584153258b92cc9100154",
"size": "937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10800"
},
{
"name": "HTML",
"bytes": "16540"
},
{
"name": "JavaScript",
"bytes": "51606"
},
{
"name": "Python",
"bytes": "19139"
},
{
"name": "Shell",
"bytes": "298"
}
],
"symlink_target": ""
}
|
class Person(object):
def __init__(self, name):
self.name = name
class Employee(Person):
def __init__(self, name, salary):
super(Employee, self).__init__(name)
self.salary = salary
e1 = Employee('Mighty', 10000000)
# it seems the super function only works for new style classes
|
{
"content_hash": "b918ef556e0ce607d1a78ffde0abd5f3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 20.714285714285715,
"alnum_prop": 0.6862068965517242,
"repo_name": "cohadar/learn-python-the-hard-way",
"id": "e076843787cd474002bb04b7c94906bf324aa3f1",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex42.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30545"
}
],
"symlink_target": ""
}
|
""" Enforce the nocase collation on the email table
Revision ID: 049fed905da7
Revises: 49d77a93118e
Create Date: 2018-04-21 13:23:56.571524
"""
# revision identifiers, used by Alembic.
revision = '049fed905da7'
down_revision = '49d77a93118e'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255), nullable=False)
def downgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255), nullable=False)
|
{
"content_hash": "498a728b6daeaeeb30e6ab71ed32cd48",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 80,
"avg_line_length": 23.52,
"alnum_prop": 0.717687074829932,
"repo_name": "kaiyou/freeposte.io",
"id": "d8af41d3939450c7b44b83c1077a10d69a1c0872",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/admin/migrations/versions/049fed905da7_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "25498"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "1756"
},
{
"name": "PHP",
"bytes": "1431"
},
{
"name": "Python",
"bytes": "50818"
},
{
"name": "Shell",
"bytes": "2056"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
# also update in nsq/version.py
version = '0.9.0'
setup(
name='pynsq',
version=version,
description='official Python client library for NSQ',
keywords='python nsq',
author='Matt Reiferson',
author_email='snakes@gmail.com',
url='https://github.com/nsqio/pynsq',
download_url=(
'https://s3.amazonaws.com/bitly-downloads/nsq/pynsq-%s.tar.gz' %
version
),
packages=['nsq'],
install_requires=['tornado'],
include_package_data=True,
zip_safe=False,
tests_require=['pytest>=3.6.3', 'mock', 'python-snappy'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 6 - Mature',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
]
)
|
{
"content_hash": "b18b45647337006ceb21c0f23804bf1e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 72,
"avg_line_length": 29.096153846153847,
"alnum_prop": 0.6133509583608724,
"repo_name": "bitly/pynsq",
"id": "5d1109c82d793e53a9cb9907cc3fd479a48f0f3b",
"size": "1513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115926"
},
{
"name": "Shell",
"bytes": "1087"
}
],
"symlink_target": ""
}
|
import logging
import log
from log import DispatchingFormatter, \
TaskFormatter, \
DefaultFormatter, \
TaskHandler, \
TaskLoggerAdapter
if __name__ == "__main__":
logger = log.getLogger('main')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(DispatchingFormatter([ TaskFormatter(), DefaultFormatter() ]))
logger.addHandler(handler)
handler = TaskHandler()
handler.setFormatter(TaskFormatter())
logger.addHandler(handler)
logger.error('Sickbeard Logger test')
class Task():
def __init__(self):
self.output = []
self.worker = 1
self.seq = 10
def test_logger():
logger = log.getLogger('main.test')
adapter = TaskLoggerAdapter(logger)
t = Task()
adapter.set_task(t)
adapter.info('hi')
adapter.info('test2')
print t.output
test_logger()
|
{
"content_hash": "550aa4196d92b4fdbe7af34e186e6a34",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 87,
"avg_line_length": 25.6,
"alnum_prop": 0.56640625,
"repo_name": "srluge/DelugeSickbeardPlugin",
"id": "986b88fccab377e40aba7610a37cc446d04aaa9a",
"size": "2105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sickbeard/log_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "416662"
},
{
"name": "Python",
"bytes": "116644"
}
],
"symlink_target": ""
}
|
"""
A SearchSource is a TroveSource + information about how to search it
(using findTroves). This allows the user to abstract away information
about whether the trove source will work without an installLabelPath
or not, what flavor to use, etc.
It also makes it easier to stack sources (see TroveSourceStack findTroves
for an example of the pain of stacking trove sources.).
Finally a SearchSource is closely tied to a resolve method. This resolve
method resolves dependencies against the SearchSource. A SearchSource
stack that searches against a trove list first and then against
an installLabelPath will have a resolve method that works the same way
(see resolvemethod.py for implementation).
Currently, there are 3 types of SearchSources.
- NetworkSearchSource(repos, installLabelPath, flavor, db=None)
- searches the network on the given installLabelPath.
- TroveSearchSource(repos, troveList, flavor=None, db=None)
- searches the given trove list.
- SearchSourceStack(*sources)
- searches the sources in order.
For all of these sources, you simply call findTroves(troveSpecs),
without passing in flavor or installLabelPath.
You can also create a searchSourceStack by calling
createSearchSourceStackFromStrings.
"""
import itertools
from conary import trove
from conary import versions
from conary import errors as baseerrors
from conary.repository import changeset
from conary.repository import errors
from conary.repository import resolvemethod
from conary.repository import trovesource
class AbstractSearchSource(object):
# used for doing isinstance/issubclass checks.
def getTroveSource(self):
raise NotImplementedError
def _filterSpecsForSource(self, troveSpecs):
return [], dict(zip(troveSpecs, [[x] for x in troveSpecs]))
def findTrove(self, troveSpec, useAffinity=False, **kw):
raise NotImplementedError
def findTroves(self, troveSpecs, useAffinity=False, **kw):
raise NotImplementedError
def getResolveMethod(self):
raise NotImplementedError
class SearchSource(AbstractSearchSource):
def __init__(self, source, flavor, db=None):
source.searchWithFlavor()
self.source = source
self.db = db
self.flavor = flavor
self.installLabelPath = None
# pass through methods that are valid in both the searchSource
# and its underlying trove source.
for method in ('getTroveLeavesByLabel', 'getTroveVersionsByLabel',
'getTroveLeavesByBranch', 'getTroveVersionsByBranch',
'getTroveVersionFlavors', 'getMetadata', 'hasTroves',
'createChangeSet', 'iterFilesInTrove', 'getFileVersion',
'getTrove', 'getTroves'):
if hasattr(source, method):
setattr(self, method, getattr(source, method))
def getTroveSource(self):
"""
Returns the source that this stack is wrapping, if there is one.
"""
return self.source
def findTrove(self, troveSpec, useAffinity=False, **kw):
"""
Finds the trove matching the given (name, versionSpec, flavor)
troveSpec. If useAffinity is True, uses the associated database
for branch/flavor affinity.
"""
res = self.findTroves([troveSpec], useAffinity=useAffinity, **kw)
return res[troveSpec]
def findTroves(self, troveSpecs, useAffinity=False, **kw):
"""
Finds the trove matching the given list of
(name, versionSpec, flavor) troveSpecs. If useAffinity is True,
uses the associated database for label/flavor affinity.
"""
if useAffinity:
kw['affinityDatabase'] = self.db
return self.source.findTroves(self.installLabelPath, troveSpecs,
self.flavor, **kw)
def getResolveMethod(self):
"""
Returns the dep resolution method
"""
m = resolvemethod.BasicResolutionMethod(None, self.db, self.flavor)
m.setTroveSource(self.source)
m.setFlavorPreferences(self.source.getFlavorPreferenceList())
return m
def getFlavorPreferenceList(self):
return self.source.getFlavorPreferenceList()
def getSearchPath(self):
return self.installLabelPath
class NetworkSearchSource(SearchSource):
"""
Search source using an installLabelPath.
"""
def __init__(self, repos, installLabelPath, flavor, db=None,
resolveSearchMethod=resolvemethod.RESOLVE_ALL):
SearchSource.__init__(self, repos, flavor, db)
self.installLabelPath = installLabelPath
self.resolveSearchMethod = resolveSearchMethod
def _filterSpecsForSource(self, troveSpecs):
troveSpecMap = {}
rejected = []
for name, versionStr, flavor in troveSpecs:
labelStrs = self._getLabelsFromStr(versionStr)
if not labelStrs:
rejected.append((name, versionStr, flavor))
else:
for labelStr in labelStrs:
troveSpecMap.setdefault((name, labelStr, flavor), []).append(
(name, versionStr, flavor))
return rejected, troveSpecMap
def _getLabelsFromStr(self, versionStr):
if not versionStr:
return [versionStr]
if not isinstance(versionStr, str):
versionStr = str(versionStr)
firstChar = versionStr[0]
lastChar = versionStr[-1]
if firstChar == '/':
try:
version = versions.VersionFromString(versionStr)
except baseerrors.ParseError, e:
raise errors.TroveNotFound, 'Error parsing version "%s": %s' % (versionStr, str(e))
if isinstance(version, versions.Branch):
label = version.label()
else:
label = version.trailingLabel()
if label in self.installLabelPath:
return [versionStr]
else:
return None
if firstChar == '@':
if '/' in versionStr:
item, remainder = versionStr[1:].split('/')
namespace, tag = item.split(':', 1)
return [ '%s/%s' % (x, remainder) for x in self.installLabelPath
if (x.getNamespace(), x.getLabel()) == (namespace, tag) ]
else:
namespace, tag = versionStr[1:].split(':', 1)
return [ str(x) for x in self.installLabelPath
if (x.getNamespace(), x.getLabel()) == (namespace, tag) ]
if firstChar == ':':
if '/' in versionStr:
tag, remainder = versionStr[1:].split('/')
return [ '%s/%s' % (x, remainder) for x in self.installLabelPath if x.getLabel() == tag ]
else:
tag = versionStr[1:]
return [ str(x) for x in self.installLabelPath if x.getLabel() == tag ]
elif lastChar == '@':
host = versionStr[:-1]
return [ str(x) for x in self.installLabelPath if x.getHost() == host ]
elif '@' in versionStr:
if '/' in versionStr:
label, remainder = versionStr.split('/')
return [ '%s/%s' % (x, remainder) for x in self.installLabelPath
if str(x) == label ]
return [ str(x) for x in self.installLabelPath if str(x) == versionStr ]
# version/revision only are all ok - they don't modify the label we search on.
return [ versionStr ]
def getResolveMethod(self):
"""
Resolves using the given installLabelPath.
"""
searchMethod = self.resolveSearchMethod
m = resolvemethod.DepResolutionByLabelPath(None, self.db,
self.installLabelPath,
self.flavor,
searchMethod=searchMethod)
m.setTroveSource(self.source)
m.setFlavorPreferences(self.source.getFlavorPreferenceList())
return m
class TroveSearchSource(SearchSource):
"""
Search source using a list of troves. Accepts either
a list of trove tuples or a list of trove objects.
"""
def __init__(self, troveSource, troveList, flavor=None, db=None):
if isinstance(troveList, set):
troveList = tuple(troveList)
elif not isinstance(troveList, (list, tuple)):
troveList = [troveList]
if troveList and not isinstance(troveList[0], trove.Trove):
troveTups = troveList
troveList = troveSource.getTroves(troveList, withFiles=False)
else:
troveTups = [ x.getNameVersionFlavor() for x in troveList ]
newTroveSource = trovesource.TroveListTroveSource(troveSource, troveTups)
newTroveSource.searchWithFlavor()
newTroveSource.setFlavorPreferenceList(
troveSource.getFlavorPreferenceList())
newTroveSource.searchLeavesOnly()
SearchSource.__init__(self, newTroveSource, flavor, db)
self.troveList = troveList
def getSearchPath(self):
return [ x.getNameVersionFlavor() for x in self.troveList ]
def getResolveMethod(self):
"""
Returns a dep resolution method that will resolve dependencies
against these troves.
"""
m = resolvemethod.DepResolutionByTroveList(None, self.db,
self.troveList,
self.flavor)
m.setTroveSource(self.source)
m.setFlavorPreferences(self.source.getFlavorPreferenceList())
return m
class SearchSourceStack(trovesource.SourceStack, AbstractSearchSource):
"""
Created by SearchSourceStack(*sources)
Method for searching a stack of sources. Call in the same way
as a single searchSource:
findTroves(troveSpecs, useAffinity=False)
"""
def __init__(self, *args, **kw):
trovesource.SourceStack.__init__(self, *args)
AbstractSearchSource.__init__(self)
self.resolveSearchMethod = kw.pop('resolveSearchMethod',
resolvemethod.RESOLVE_ALL)
def getSearchPath(self):
searchPath = []
for source in self.sources:
searchPath.extend(source.getSearchPath())
return searchPath
def getFlavorPreferenceList(self):
return self.sources[0].getFlavorPreferenceList()
def setFlavorPreferenceList(self, flavorList):
for source in self.sources:
source.setFlavorPreferenceList(self, flavorList)
def getTroveSource(self):
if len(self.sources) == 1:
return self.sources[0].getTroveSource()
return trovesource.stack(*[ x.getTroveSource() for x in self.sources])
def findTrove(self, troveSpec, useAffinity=False, **kw):
"""
Finds the trove matching the given (name, versionSpec, flavor)
troveSpec. If useAffinity is True, uses the associated database
for branch/flavor affinity.
"""
res = self.findTroves([troveSpec], useAffinity=useAffinity, **kw)
return res[troveSpec]
def findTroves(self, troveSpecs, useAffinity=False, allowMissing=False,
requireLatest=False, **kw):
"""
Finds the trove matching the given list of
(name, versionSpec, flavor) troveSpecs. If useAffinity is True,
uses the associated database for branch/flavor affinity.
"""
troveSpecs = list(troveSpecs)
reposSpecs = {}
results = {}
networkSource = None
for source in self.sources:
if isinstance(source, NetworkSearchSource):
networkSource = source
newTroveSpecs, specsToUse = source._filterSpecsForSource(troveSpecs)
foundTroves = source.findTroves(specsToUse, allowMissing=True,
requireLatest = requireLatest)
for troveSpec in specsToUse:
for origSpec in specsToUse[troveSpec]:
if troveSpec in foundTroves:
results.setdefault(origSpec, []).extend(foundTroves[troveSpec])
else:
newTroveSpecs.append(origSpec)
troveSpecs = newTroveSpecs
if troveSpecs:
if networkSource:
# All the explicit search sources are exhausted. Fall back
# to searching the repository without any label restrictions.
results.update(networkSource.findTroves(troveSpecs,
useAffinity=useAffinity,
allowMissing=allowMissing,
**kw))
elif not allowMissing:
# search again with allowMissing=False to raise the appropriate
# exception (only troves that weren't found before will be in
# this list.
results.update(self.sources[-1].findTroves(troveSpecs,
useAffinity=useAffinity,
allowMissing=False, **kw))
return results
def getResolveMethod(self):
methods = []
if self.resolveSearchMethod == resolvemethod.RESOLVE_LEAVES_FIRST:
# special handling for resolveLeavesFirst stack:
# first search only the leaves for _everything_
# then go back and search the remainder.
# If we just left this up to the individual resolveMethods
# then for source [a,b,c] it would search a-leaves only
# a-rest, b-leaves only, b-rest, where we want a-leaves, b-leaves,
# c-leaves, etc.
for source in self.sources:
method = source.getResolveMethod()
if hasattr(method, 'searchLeavesOnly'):
method.searchLeavesOnly()
methods.append(method)
for source in self.sources:
method = source.getResolveMethod()
if hasattr(method, 'searchLeavesOnly'):
method.searchAllVersions()
methods.append(method)
return resolvemethod.stack(methods)
else:
return resolvemethod.stack(
[x.getResolveMethod() for x in self.sources])
def stack(*sources):
""" create a search source that will search first source1, then source2 """
return SearchSourceStack(*sources)
def createSearchPathFromStrings(searchPath):
"""
Creates a list of items that can be passed into createSearchSource.
Valid items in the searchPath include:
1. troveSpec (foo=:devel) or list of trovespecs
2. string for label (conary.rpath.com@rpl:devel)
3. label objects or list of label objects.
"""
from conary.conaryclient import cmdline
from conary import conarycfg
labelList = []
finalPath = []
if not isinstance(searchPath, (list, tuple)):
searchPath = [searchPath]
for item in searchPath:
if isinstance(item, conarycfg.CfgLabelList):
item = tuple(item)
elif isinstance(item, versions.Label):
labelList.append(item)
continue
elif isinstance(item, (list, tuple)):
# recurse
item = list(itertools.chain(*createSearchPathFromStrings(item)))
elif isinstance(item, str):
if '=' in item:
# only troveSpecs have = in them
item = ( cmdline.parseTroveSpec(item), )
elif '@' in item:
try:
item = versions.Label(item)
except baseerrors.ParseError, err:
raise baseerrors.ParseError(
'Error parsing label "%s": %s' % (item, err))
labelList.append(item)
continue
else:
item = (cmdline.parseTroveSpec(item),)
else:
raise baseerrors.ParseError('Unknown searchPath item "%s"' % item)
# labels don't get here, so we know that this is not part of a
# labelPath
if labelList:
finalPath.append(tuple(labelList))
labelList = []
finalPath.append(item)
if labelList:
finalPath.append(tuple(labelList))
return tuple(finalPath)
def createSearchSourceStackFromStrings(searchSource, searchPath, flavor,
db=None, fallBackToRepos=True):
"""
Create a search source stack from a list of search path elements. See
L{createSearchPathFromStrings} for the elements allowed.
"""
try:
strings = searchPath
searchPath = createSearchPathFromStrings(searchPath)
return createSearchSourceStack(searchSource, searchPath, flavor, db,
fallBackToRepos=fallBackToRepos)
except baseerrors.ConaryError, err:
raise baseerrors.ConaryError('Could not create search path "%s": %s' % (
' '.join(strings), err))
def createSearchSourceStack(searchSource, searchPath, flavor, db=None,
resolveLeavesFirst=True, troveSource=None,
useAffinity=True, fallBackToRepos=True):
"""
Creates a searchSourceStack based on a searchPath.
Valid parameters include:
- a label object
- a trove tuple
- a trove object
- a list of any of the above.
"""
if troveSource is None:
troveSource = searchSource.getTroveSource()
if resolveLeavesFirst:
searchMethod = resolvemethod.RESOLVE_LEAVES_FIRST
else:
searchMethod = resolvemethod.RESOLVE_ALL
searchStack = SearchSourceStack(
resolveSearchMethod=searchMethod)
hasNetworkSearchSource = False
for item in searchPath:
if not isinstance(item, (list, tuple)):
item = [item]
if isinstance(item[0], versions.Label):
searchStack.addSource(NetworkSearchSource(troveSource,
item, flavor, db,
resolveSearchMethod=searchMethod))
hasNetworkSearchSource = True
elif isinstance(item[0], trove.Trove):
s = TroveSearchSource(searchSource.getTroveSource(), item, flavor)
searchStack.addSource(s)
elif isinstance(item[0], (list, tuple)):
if not isinstance(item[0][1], versions.Version):
item = searchSource.findTroves(item, useAffinity=useAffinity)
item = list(itertools.chain(*item.itervalues()))
s = TroveSearchSource(searchSource.getTroveSource(), item, flavor)
searchStack.addSource(s)
else:
raise baseerrors.ParseError('unknown search path item %s' % (item,))
if fallBackToRepos and not hasNetworkSearchSource:
searchStack.addSource(NetworkSearchSource(troveSource, [], flavor, db,
resolveSearchMethod=searchMethod))
return searchStack
|
{
"content_hash": "18a5efe1f652fe79b7c4ce432223490b",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 105,
"avg_line_length": 42.05756929637526,
"alnum_prop": 0.5932065906210393,
"repo_name": "fedora-conary/conary",
"id": "aecf2ba4ceb2fc0657e40eba67024cff08f971a3",
"size": "20312",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "conary/repository/searchsource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "481681"
},
{
"name": "C++",
"bytes": "8244"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Erlang",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "45629"
},
{
"name": "Python",
"bytes": "10586616"
},
{
"name": "Shell",
"bytes": "4657"
},
{
"name": "Standard ML",
"bytes": "2756"
}
],
"symlink_target": ""
}
|
from conans import CMake, ConanFile, AutoToolsBuildEnvironment, tools
import os
import shutil
class JpegConan(ConanFile):
name = "jpeg"
src_version = "9b"
version = "9.2" # same as 9b
ZIP_FOLDER_NAME = name + "-" + src_version
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False]}
default_options = "shared=True"
exports = ["CMakeLists.txt"]
url="http://github.com/GatorQue/conan-jpeg"
license="http://www.infai.org/jpeg"
description="The Independent JPEG Group (IJG) is responsible for the reference implementation of the original JPEG standard."
def source(self):
zip_name = "%ssrc.v%s.tar.gz" % (self.name, self.src_version)
tools.download("http://www.infai.org/jpeg/files?get=%s" % zip_name, zip_name)
tools.unzip(zip_name)
os.unlink(zip_name)
if self.settings.os == "Windows":
shutil.move("CMakeLists.txt", "%s/CMakeLists.txt" % self.ZIP_FOLDER_NAME)
def build(self):
if self.settings.os == "Windows":
cmake = CMake(self.settings)
cmake_options = []
cmake_options.append("-DCMAKE_INSTALL_PREFIX:PATH=../install")
if self.options.shared == True:
cmake_options.append("-DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=ON")
cmake_options.append("-DBUILD_SHARED_LIBS=ON")
self.run("IF not exist build mkdir build")
cd_build = "cd build"
self.output.warn('%s && cmake ../%s %s %s' % (cd_build, self.ZIP_FOLDER_NAME, cmake.command_line, " ".join(cmake_options)))
self.run('%s && cmake ../%s %s %s' % (cd_build, self.ZIP_FOLDER_NAME, cmake.command_line, " ".join(cmake_options)))
self.output.warn('%s && cmake --build . --target install %s' % (cd_build, cmake.build_config))
self.run('%s && cmake --build . --target install %s' % (cd_build, cmake.build_config))
else:
env_build = AutoToolsBuildEnvironment(self)
env_build.fpic = self.options.shared
if self.settings.os == "Macos":
old_str = '-install_name \$rpath/\$soname'
new_str = '-install_name \$soname'
tools.replace_in_file("./%s/configure" % self.ZIP_FOLDER_NAME, old_str, new_str)
conf_options = []
conf_options.append("--prefix=/")
if self.options.shared == True:
conf_options.append("--enable-shared")
conf_options.append("--disable-static")
else:
conf_options.append("--disable-shared")
conf_options.append("--enable-static")
with tools.environment_append(env_build.vars):
self.run("./configure %s" % " ".join(conf_options), cwd=self.ZIP_FOLDER_NAME)
self.run("make", cwd=self.ZIP_FOLDER_NAME)
self.run("make install DESTDIR=%s/install" % self.conanfile_directory, cwd=self.ZIP_FOLDER_NAME)
def package(self):
self.copy("*", dst="include", src="install/include")
self.copy("*", dst="lib", src="install/lib", links=True)
self.copy("*", dst="bin", src="install/bin")
def package_info(self):
self.cpp_info.includedirs = ["include"]
self.cpp_info.libs = ["jpeg"]
|
{
"content_hash": "521b4fa7ba17adafddddc9fa6dfb628a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 135,
"avg_line_length": 45.917808219178085,
"alnum_prop": 0.5817422434367542,
"repo_name": "GatorQue/conan-jpeg",
"id": "bad34c403ee371ce421fad479792e747069038fd",
"size": "3352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conanfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "350"
},
{
"name": "CMake",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "4914"
},
{
"name": "Shell",
"bytes": "648"
}
],
"symlink_target": ""
}
|
"""
gen_dates.py -- generate dates for inclusion in VIVO
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2016 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
from datetime import datetime
from dateutil.relativedelta import relativedelta
out_file = open('dates.txt', "w")
print >>out_file, "uri\tPrecision\tDate"
# Generate years
start_year = 1970
end_year = 2020
for year in range(start_year, end_year + 1):
date_string = str(year) + "-01-01T00:00:00"
print >>out_file, "\t" + "y" + "\t" + date_string
# Generate year month
current_date = datetime(start_year, 1, 1)
end_date = datetime(end_year, 12, 31)
while current_date <= end_date:
date_string = current_date.isoformat()
print >>out_file, "\t" + "ym" + "\t" + date_string
current_date += relativedelta(months=+1)
# Generate year month day
current_date = datetime(start_year, 1, 1)
end_date = datetime(end_year, 12, 31)
while current_date <= end_date:
date_string = current_date.isoformat()
print >>out_file, "\t" + "ymd" + "\t" + date_string
current_date += relativedelta(days=+1)
out_file.close()
|
{
"content_hash": "d0054536a43286efffcb1a1e5c961855",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 56,
"avg_line_length": 26.58139534883721,
"alnum_prop": 0.6535433070866141,
"repo_name": "ctsit/vivo-pump",
"id": "80d7cc641dd691aa87147ff4607c1aa97d1bee39",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dates/gen_dates.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "8858"
},
{
"name": "Python",
"bytes": "316861"
},
{
"name": "Shell",
"bytes": "1312"
},
{
"name": "TeX",
"bytes": "947186"
}
],
"symlink_target": ""
}
|
import demistomock as demisto # noqa: F401
from bs4 import BeautifulSoup
from CommonServerPython import * # noqa: F401
def extract_html_table(html, indexes):
soup = BeautifulSoup(html, 'html.parser')
tables = []
for index, tab in enumerate(soup.find_all('table')):
if len(indexes) > 0 and index not in indexes and str(index) not in indexes:
continue
table = []
headers = []
# Check if there are headers and use them
for th in tab.find_all('th'):
headers.append(th.text)
for tr in tab.find_all('tr'):
tds = tr.find_all('td')
# This is a data row and not header row
if len(tds) > 0:
# Single value in a table - just create an array of strings ignoring header
if len(tds) == 1:
table.append(tds[0].text)
# If there are 2 columns and no headers, treat as key-value (might override values if same key in first column)
elif len(tds) == 2 and len(headers) == 0:
if type(table) == list:
table = {} # type: ignore
table[tds[0].text] = tds[1].text
else:
row = {}
if len(headers) > 0:
for i, td in enumerate(tds):
row[headers[i]] = td.text
else:
for i, td in enumerate(tds):
row['cell' + str(i)] = td.text
table.append(row)
if len(table) > 0:
tables.append(table)
if len(tables) > 0:
return({
'Type': entryTypes['note'],
'Contents': 'Found {} tables in HTML.'.format(len(tables)),
'ContentsFormat': formats['text'],
'EntryContext': {'HTMLTables': tables if len(tables) > 1 else tables[0]}
})
else:
return 'Did not find tables in HTML.'
def main():
html = demisto.getArg('html')
indexes = argToList(demisto.getArg('indexes'))
demisto.results(extract_html_table(html, indexes))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
{
"content_hash": "d09a1f123cc1297b20895c525a870fac",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 127,
"avg_line_length": 38.224137931034484,
"alnum_prop": 0.5083446098331078,
"repo_name": "demisto/content",
"id": "992e12ef7d173e3f43d201634fe78157ba9c1345",
"size": "2217",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/CommonScripts/Scripts/ExtractHTMLTables/ExtractHTMLTables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
import webob.exc
from wsme.rest import json
from glance.api import policy
from glance.api.v2.model.metadef_resource_type import ResourceType
from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation
from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations
from glance.api.v2.model.metadef_resource_type import ResourceTypes
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance import i18n
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
_ = i18n._
class ResourceTypeController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.gateway = glance.gateway.Gateway(db_api=self.db_api,
notifier=self.notifier,
policy_enforcer=self.policy)
def index(self, req):
try:
filters = {}
filters['namespace'] = None
rs_type_repo = self.gateway.get_metadef_resource_type_repo(
req.context)
db_resource_type_list = rs_type_repo.list(filters=filters)
resource_type_list = [ResourceType.to_wsme_model(
resource_type) for resource_type in db_resource_type_list]
resource_types = ResourceTypes()
resource_types.resource_types = resource_type_list
except exception.Forbidden as e:
LOG.debug("User not permitted to retrieve metadata resource types "
"index")
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError(e)
return resource_types
def show(self, req, namespace):
try:
filters = {}
filters['namespace'] = namespace
rs_type_repo = self.gateway.get_metadef_resource_type_repo(
req.context)
db_resource_type_list = rs_type_repo.list(filters=filters)
resource_type_list = [ResourceTypeAssociation.to_wsme_model(
resource_type) for resource_type in db_resource_type_list]
resource_types = ResourceTypeAssociations()
resource_types.resource_type_associations = resource_type_list
except exception.Forbidden as e:
LOG.debug("User not permitted to retrieve metadata resource types "
"within '%s' namespace", namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError(e)
return resource_types
def create(self, req, resource_type, namespace):
rs_type_factory = self.gateway.get_metadef_resource_type_factory(
req.context)
rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context)
try:
new_resource_type = rs_type_factory.new_resource_type(
namespace=namespace, **resource_type.to_dict())
rs_type_repo.add(new_resource_type)
except exception.Forbidden as e:
LOG.debug("User not permitted to create metadata resource type "
"within '%s' namespace", namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
return ResourceTypeAssociation.to_wsme_model(new_resource_type)
def delete(self, req, namespace, resource_type):
rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context)
try:
filters = {}
found = False
filters['namespace'] = namespace
db_resource_type_list = rs_type_repo.list(filters=filters)
for db_resource_type in db_resource_type_list:
if db_resource_type.name == resource_type:
db_resource_type.delete()
rs_type_repo.remove(db_resource_type)
found = True
if not found:
raise exception.NotFound()
except exception.Forbidden as e:
LOG.debug("User not permitted to delete metadata resource type "
"'%s' within '%s' namespace", resource_type, namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
msg = (_("Failed to find resource type %(resourcetype)s to "
"delete") % {'resourcetype': resource_type})
LOG.error(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ['created_at', 'updated_at']
def __init__(self, schema=None):
super(RequestDeserializer, self).__init__()
self.schema = schema or get_schema()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, image):
for key in cls._disallowed_properties:
if key in image:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(explanation=msg)
def create(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
resource_type = json.fromjson(ResourceTypeAssociation, body)
return dict(resource_type=resource_type)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema
def show(self, response, result):
resource_type_json = json.tojson(ResourceTypeAssociations, result)
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, result):
resource_type_json = json.tojson(ResourceTypes, result)
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def create(self, response, result):
resource_type_json = json.tojson(ResourceTypeAssociation, result)
response.status_int = 201
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def delete(self, response, result):
response.status_int = 204
def _get_base_properties():
return {
'name': {
'type': 'string',
'description': _('Resource type names should be aligned with Heat '
'resource types whenever possible: '
'http://docs.openstack.org/developer/heat/'
'template_guide/openstack.html'),
'maxLength': 80,
},
'prefix': {
'type': 'string',
'description': _('Specifies the prefix to use for the given '
'resource type. Any properties in the namespace '
'should be prefixed with this prefix when being '
'applied to the specified resource type. Must '
'include prefix separator (e.g. a colon :).'),
'maxLength': 80,
},
'properties_target': {
'type': 'string',
'description': _('Some resource types allow more than one key / '
'value pair per instance. For example, Cinder '
'allows user and image metadata on volumes. Only '
'the image properties metadata is evaluated by '
'Nova (scheduling or drivers). This property '
'allows a namespace target to remove the '
'ambiguity.'),
'maxLength': 80,
},
"created_at": {
"type": "string",
"description": _("Date and time of resource type association"
" (READ-ONLY)"),
"format": "date-time"
},
"updated_at": {
"type": "string",
"description": _("Date and time of the last resource type "
"association modification (READ-ONLY)"),
"format": "date-time"
}
}
def get_schema():
properties = _get_base_properties()
mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs()
schema = glance.schema.Schema(
'resource_type_association',
properties,
required=mandatory_attrs,
)
return schema
def get_collection_schema():
resource_type_schema = get_schema()
return glance.schema.CollectionSchema('resource_type_associations',
resource_type_schema)
def create_resource():
"""ResourceTypeAssociation resource factory method"""
schema = get_schema()
deserializer = RequestDeserializer(schema)
serializer = ResponseSerializer(schema)
controller = ResourceTypeController()
return wsgi.Resource(controller, deserializer, serializer)
|
{
"content_hash": "b89e695966d808ca83f4aac72ff3e547",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 79,
"avg_line_length": 41.84375,
"alnum_prop": 0.6046489917849142,
"repo_name": "kfwang/Glance-OVA-OVF",
"id": "8ddf76bb199810c03123dd54a10f6ed74d755b3a",
"size": "11323",
"binary": false,
"copies": "1",
"ref": "refs/heads/ovf",
"path": "glance/api/v2/metadef_resource_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3961818"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
}
|
"""
WSGI config for elitegamez project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "elitegamez.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "0c5ae8fe33478239f1d64587fc28d1ac",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.214285714285715,
"alnum_prop": 0.7772151898734178,
"repo_name": "vlameiras/cdkeyswholesale",
"id": "de9012ff52f9f4f6c342da12ae133cb7cd7e3c45",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elitegamez/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "35583"
},
{
"name": "HTML",
"bytes": "21905"
},
{
"name": "JavaScript",
"bytes": "16643"
},
{
"name": "Python",
"bytes": "66500"
}
],
"symlink_target": ""
}
|
'''
PyEarthScience: read_GRIB_with_PyNIO.py
Description:
Demonstrate the use of PyNIO to open and read the content of
a GRIB file.
- PyNIO
- GRIB
2019-01-22 kmf
'''
from __future__ import print_function
import Ngl,Nio
import numpy as np
#-- data directory and file name, we use an example GRIB file from the NCL package
ncarg = Ngl.pynglpath("data")
fname = ncarg+'/grb/MET9_IR108_cosmode_0909210000.grb2'
#-- open file
ds = Nio.open_file(fname, "r")
print('------------------------------------------------------')
print()
print('--> ds: ', ds)
print()
#-- print the size and shape of the variable
print('------------------------------------------------------')
print()
print('--> ds.dimensions ',ds.dimensions)
print()
#-- print file variables
print('------------------------------------------------------')
print()
print('--> file variables: ', ds.variables)
print()
#-- read variable 'SBTMP_P31_GRLL0_I207'
var = ds.variables['SBTMP_P31_GRLL0_I207']
#-- print variable information
print('------------------------------------------------------')
print()
print('--> var')
print()
print(var)
print()
#-- print the dimension names, size and shape of the variable
dimnames = var.dimensions
print('------------------------------------------------------')
print()
print('--> var.dimensions ',var.dimensions)
#-- print the size and shape of the variable
print('------------------------------------------------------')
print()
print('--> var.size ',var.size)
print('--> var.shape ',var.shape)
print()
#-- read variables lat and lon
lat = ds.variables['gridlat_0']
lon = ds.variables['gridlon_0']
#-- print the size of the coordinates
nlat = len(lat[0])
nlon = len(lat[1])
print('------------------------------------------------------')
print()
print('--> lat: (%4d,%4d)' % (lat.shape[0],lat.shape[1]))
print('--> lon: (%4d,%4d)' % (lon.shape[0],lon.shape[1]))
print()
#-- print the minimum and maximum of lat and lon
print('------------------------------------------------------')
print()
print('--> lat min %12.6f' % np.min(lat))
print('--> lat max %12.6f' % np.max(lat))
print('--> lon min %12.6f' % np.min(lon))
print('--> lon max %12.6f' % np.max(lon))
print()
#-- get the attribute content
lat_spol = lat.attributes['Latitude_of_southern_pole']
lon_spol = lat.attributes['Longitude_of_southern_pole']
#-- retrieve the name of the coordinates lat/lon and the values of
#-- the shape of the coordinates
dimslat = dimnames[0]
shapelat = lat.shape
dimslon = dimnames[1]
shapelon = lon.shape
nrlat = shapelat
nrlon = shapelon
print('------------------------------------------------------')
print()
print('--> dimslat: ',dimslat, ' dimslon: ',dimslon,' nrlat: ',nrlat,' nrlon: ',nrlon)
print()
#-- print the variable attributes
print('------------------------------------------------------')
print()
print('--> variable attributes: ',var.attributes)
print()
#-- print the variable values
print('------------------------------------------------------')
print()
print('--> values ')
print()
print(var[:])
print()
#-- print the type of the variable SBTMP_P31_GRLL0_I207 (DataArray)
print('------------------------------------------------------')
print()
print('--> type(var) ',type(var))
print()
#-- print the type of the variable SBTMP_P31_GRLL0_I207 values (numpy.ndarray)
print('------------------------------------------------------')
print()
print('--> type(var[:]) ',type(var[:]))
print()
#-- select variable SBTMP_P31_GRLL0_I207 from dataset
print('------------------------------------------------------')
print()
print('--> dataset variable SBTMP_P31_GRLL0_I207')
print()
print(ds.variables['SBTMP_P31_GRLL0_I207'][:])
print()
#-- select variable SBTMP_P31_GRLL0_I207 from dataset, lat index 1 and lon index 2
print('------------------------------------------------------')
print()
print('--> dataset variable SBTMP_P31_GRLL0_I207 select data indexing lat=1 and lon=2')
print()
print(ds.variables['SBTMP_P31_GRLL0_I207'][1,2])
print()
#-- select a sub-region (slice) using lat/lon array indexing
print('------------------------------------------------------')
print()
print('--> select sub-region')
print()
print(ds.variables['SBTMP_P31_GRLL0_I207'][0:10,5:25])
print()
#-- print median values of variable SBTMP_P31_GRLL0_I207 of dataset
print('------------------------------------------------------')
print()
print('--> variable SBTMP_P31_GRLL0_I207 median')
print()
print(np.median(ds.variables['SBTMP_P31_GRLL0_I207']))
print()
#-- compute the means of the variable SBTMP_P31_GRLL0_I207 of the dataset
print('------------------------------------------------------')
print()
print('--> variable SBTMP_P31_GRLL0_I207 mean')
print()
mean = np.mean(ds.variables['SBTMP_P31_GRLL0_I207'])
print(mean)
print()
|
{
"content_hash": "8fd2e789cb7b726ca650fb6b9efa28c9",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 89,
"avg_line_length": 28.316091954022987,
"alnum_prop": 0.5177592855693119,
"repo_name": "KMFleischer/PyEarthScience",
"id": "5ce2943d8507fde98e098da1f8f8b1ae2a9fa926",
"size": "5213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IO/read_GRIB_PyNIO.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "12025570"
},
{
"name": "NCL",
"bytes": "60407"
},
{
"name": "Python",
"bytes": "217868"
}
],
"symlink_target": ""
}
|
'''
The Inline classes used in the editor.
'''
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet, modelformset_factory
from django.utils.functional import curry
from django.contrib.admin.util import flatten_fieldsets
from django.utils.encoding import force_unicode
from metashare.repository.editor.related_mixin import RelatedAdminMixin
from metashare.repository.editor.schemamodel_mixin import SchemaModelLookup
from django.contrib.admin.options import InlineModelAdmin
class SchemaModelInline(InlineModelAdmin, RelatedAdminMixin, SchemaModelLookup):
extra = 1
template = 'admin/edit_inline/stacked.html'
collapse = False
def __init__(self, parent_model, admin_site):
super(SchemaModelInline, self).__init__(parent_model, admin_site)
if self.collapse:
self.verbose_name_plural = '_{}'.format(force_unicode(self.verbose_name_plural))
# Show m2m fields as horizontal filter widget unless they have a custom widget:
self.filter_horizontal = self.list_m2m_fields_without_custom_widget(self.model)
def get_fieldsets(self, request, obj=None):
return SchemaModelLookup.get_fieldsets(self, request, obj)
def formfield_for_dbfield(self, db_field, **kwargs):
# ForeignKey or ManyToManyFields
if self.is_x_to_many_relation(db_field):
return self.formfield_for_relation(db_field, **kwargs)
self.use_hidden_widget_for_one2one(db_field, kwargs)
lang_widget = self.add_lang_widget(db_field)
kwargs.update(lang_widget)
formfield = super(SchemaModelInline, self).formfield_for_dbfield(db_field, **kwargs)
self.use_related_widget_where_appropriate(db_field, kwargs, formfield)
return formfield
def response_change(self, request, obj):
if '_popup' in request.REQUEST:
return self.edit_response_close_popup_magic(obj)
else:
return super(SchemaModelInline, self).response_change(request, obj)
class ReverseInlineFormSet(BaseModelFormSet):
'''
A formset with either a single object or a single empty
form. Since the formset is used to render a required OneToOne
relation, the forms must not be empty.
'''
model = None
parent_fk_name = ''
def __init__(self,
data = None,
files = None,
instance = None,
prefix = None,
save_as_new = False,
queryset=None):
_qs = None
if instance.pk:
obj = getattr(instance, self.parent_fk_name)
if obj:
_qs = self.model.objects.filter(pk = obj.id)
if not _qs:
_qs = self.model.objects.filter(pk = -1)
self.extra = 1
super(ReverseInlineFormSet, self).__init__(data, files,
prefix = prefix,
queryset = _qs)
for form in self.forms:
# if the form set can be deleted, then it is not required and then
# its forms may be empty
form.empty_permitted = getattr(self, 'can_delete', False)
# pylint: disable-msg=E1101
def save_new_objects(self, commit=True):
'''
This is identical with the super implementation
except for the "and form.empty_permitted" condition
'''
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed() and form.empty_permitted:
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def reverse_inlineformset_factory(parent_model,
model,
parent_fk_name,
formset,
form = ModelForm,
fields = None,
exclude = None,
formfield_callback = lambda f: f.formfield()):
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': 0,
'can_delete': parent_fk_name not in \
parent_model.get_fields()['required'],
'can_order': False,
'fields': fields,
'exclude': exclude,
'max_num': 1,
}
form_set = modelformset_factory(model, **kwargs)
form_set.parent_fk_name = parent_fk_name
return form_set
class ReverseInlineModelAdmin(SchemaModelInline):
'''
Derived from http://djangosnippets.org/snippets/2032/
reverseadmin
============
Module that makes django admin handle OneToOneFields in a better way.
A common use case for one-to-one relationships is to "embed" a model
inside another one. For example, a Person may have multiple foreign
keys pointing to an Address entity, one home address, one business
address and so on. Django admin displays those relations using select
boxes, letting the user choose which address entity to connect to a
person. A more natural way to handle the relationship is using
inlines. However, since the foreign key is placed on the owning
entity, django admins standard inline classes can't be used. Which is
why I created this module that implements "reverse inlines" for this
use case.
Example:
from django.db import models
class Address(models.Model):
street = models.CharField(max_length = 255)
zipcode = models.CharField(max_length = 10)
city = models.CharField(max_length = 255)
class Person(models.Model):
name = models.CharField(max_length = 255)
business_addr = models.OneToOneField(Address,
related_name = 'business_addr')
home_addr = models.OneToOneField(Address, related_name = 'home_addr')
This is how standard django admin renders it:
http://img9.imageshack.us/i/beforetz.png/
Here is how it looks when using the reverseadmin module:
http://img408.imageshack.us/i/afterw.png/
You use reverseadmin in the following way:
from django.contrib import admin
from models import Person
from reverseadmin import ReverseModelAdmin
class PersonAdmin(ReverseModelAdmin):
inline_type = 'tabular'
admin.site.register(Person, PersonAdmin)
inline_type can be either "tabular" or "stacked" for tabular and
stacked inlines respectively.
The module is designed to work with Django 1.1.1. Since it hooks into
the internals of the admin package, it may not work with later Django
versions.
'''
formset = ReverseInlineFormSet
def __init__(self,
parent_model,
parent_fk_name,
model, admin_site,
inline_type):
self.date_hierarchy = None # Salvatore: to avoid an error in validate
self.template = 'admin/edit_inline_one2one/%s.html' % inline_type
self.parent_fk_name = parent_fk_name
self.model = model
field_descriptor = getattr(parent_model, self.parent_fk_name)
field = field_descriptor.field
# Use the name and the help_text of the owning models field to
# render the verbose_name and verbose_name_plural texts.
self.verbose_name_plural = field.verbose_name.title()
self.verbose_name = field.help_text
if not self.verbose_name:
self.verbose_name = self.verbose_name_plural
super(ReverseInlineModelAdmin, self).__init__(parent_model, admin_site)
def get_formset(self, request, obj = None, **kwargs):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.get_readonly_fields(request, obj))
# if exclude is an empty list we use None, since that's the actual
# default
exclude = exclude or None
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return reverse_inlineformset_factory(self.parent_model,
self.model,
self.parent_fk_name,
self.formset,
**defaults)
|
{
"content_hash": "bcc9c284e18ea63e93293e2d49c02a12",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 92,
"avg_line_length": 38.88559322033898,
"alnum_prop": 0.5980167810831426,
"repo_name": "zeehio/META-SHARE",
"id": "fe661f309d42cc29708c0ecab2abea9b003de2c3",
"size": "9177",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "metashare/repository/editor/inlines.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7362"
},
{
"name": "C",
"bytes": "321"
},
{
"name": "C++",
"bytes": "112277"
},
{
"name": "CSS",
"bytes": "125117"
},
{
"name": "HTML",
"bytes": "2956138"
},
{
"name": "Java",
"bytes": "12780"
},
{
"name": "JavaScript",
"bytes": "201032"
},
{
"name": "M4",
"bytes": "8416"
},
{
"name": "Makefile",
"bytes": "26172"
},
{
"name": "Python",
"bytes": "4084877"
},
{
"name": "Shell",
"bytes": "121386"
},
{
"name": "XSLT",
"bytes": "473763"
}
],
"symlink_target": ""
}
|
from django.test import LiveServerTestCase
from selenium import webdriver
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_visit_homepage(self):
# She goes to check out its homepage
self.browser.get(self.live_server_url)
# She notices the page title and header mention to-do lists
self.assertIn('depp-tracking-project', self.browser.title)
self.fail('Finish the test!')
|
{
"content_hash": "fc4185a6268951b3b7181fff22497d42",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 30,
"alnum_prop": 0.6888888888888889,
"repo_name": "DeppSRL/depp-tracking",
"id": "a352bc9176a53ba9ea75d1c78317c2d2ef21b36e",
"size": "540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/tests/functional_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37"
},
{
"name": "HTML",
"bytes": "5432"
},
{
"name": "JavaScript",
"bytes": "528"
},
{
"name": "Python",
"bytes": "65001"
},
{
"name": "SQLPL",
"bytes": "300568"
}
],
"symlink_target": ""
}
|
"""
Tests for the text Label class.
"""
from reportlab.lib.testutils import setOutDir,setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os, sys, copy
from os.path import join, basename, splitext
import unittest
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import *
from reportlab.graphics.charts.textlabels import Label
from reportlab.platypus.flowables import Spacer, PageBreak
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
#canvas.rect(2.5*cm, 2.5*cm, 15*cm, 25*cm)
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
class MyDocTemplate(BaseDocTemplate):
"The document template used for all PDF documents."
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('normal', [frame1], myMainPageFrame)
self.addPageTemplates(template)
class LabelTestCase(unittest.TestCase):
"Test Label class."
def _test0(self):
"Perform original test function."
pdfPath = outputfile('test_charts_textlabels.pdf')
c = Canvas(pdfPath)
label = Label()
demoLabel = label.demo()
demoLabel.drawOn(c, 0, 0)
c.save()
def _makeProtoLabel(self):
"Return a label prototype for further modification."
protoLabel = Label()
protoLabel.dx = 0
protoLabel.dy = 0
protoLabel.boxStrokeWidth = 0.1
protoLabel.boxStrokeColor = colors.black
protoLabel.boxFillColor = colors.yellow
# protoLabel.text = 'Hello World!' # Does not work as expected.
return protoLabel
def _makeDrawings(self, protoLabel, text=None):
# Set drawing dimensions.
w, h = drawWidth, drawHeight = 400, 100
drawings = []
for boxAnchors in ('sw se nw ne', 'w e n s', 'c'):
boxAnchors = boxAnchors.split(' ')
# Create drawing.
d = Drawing(w, h)
d.add(Line(0, h*0.5, w, h*0.5, strokeColor=colors.gray, strokeWidth=0.5))
d.add(Line(w*0.5 ,0, w*0.5, h, strokeColor=colors.gray, strokeWidth=0.5))
labels = []
for boxAnchor in boxAnchors:
# Modify label, put it on a drawing.
label = copy.deepcopy(protoLabel)
label.boxAnchor = boxAnchor
args = {'ba':boxAnchor, 'text':text or 'Hello World!'}
label.setText('(%(ba)s) %(text)s (%(ba)s)' % args)
labels.append(label)
for label in labels:
d.add(label)
drawings.append(d)
return drawings
def test1(self):
"Test all different box anchors."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
h1 = styleSheet['Heading1']
h2 = styleSheet['Heading2']
h3 = styleSheet['Heading3']
story.append(Paragraph('Tests for class <i>Label</i>', h1))
story.append(Paragraph('Testing box anchors', h2))
story.append(Paragraph("""This should display "Hello World" labels
written as black text on a yellow box relative to the origin of the crosshair
axes. The labels indicate their relative position being one of the nine
canonical points of a box: sw, se, nw, ne, w, e, n, s or c (standing for
<i>southwest</i>, <i>southeast</i>... and <i>center</i>).""", bt))
story.append(Spacer(0, 0.5*cm))
# Round 1a
story.append(Paragraph('Helvetica 10pt', h3))
story.append(Spacer(0, 0.5*cm))
w, h = drawWidth, drawHeight = 400, 100
protoLabel = self._makeProtoLabel()
protoLabel.setOrigin(drawWidth*0.5, drawHeight*0.5)
protoLabel.textAnchor = 'start'
protoLabel.fontName = 'Helvetica'
protoLabel.fontSize = 10
drawings = self._makeDrawings(protoLabel)
for d in drawings:
story.append(d)
story.append(Spacer(0, 1*cm))
story.append(PageBreak())
# Round 1b
story.append(Paragraph('Helvetica 18pt', h3))
story.append(Spacer(0, 0.5*cm))
w, h = drawWidth, drawHeight = 400, 100
protoLabel = self._makeProtoLabel()
protoLabel.setOrigin(drawWidth*0.5, drawHeight*0.5)
protoLabel.textAnchor = 'start'
protoLabel.fontName = 'Helvetica'
protoLabel.fontSize = 18
drawings = self._makeDrawings(protoLabel)
for d in drawings:
story.append(d)
story.append(Spacer(0, 1*cm))
story.append(PageBreak())
# Round 1c
story.append(Paragraph('Helvetica 18pt, multi-line', h3))
story.append(Spacer(0, 0.5*cm))
w, h = drawWidth, drawHeight = 400, 100
protoLabel = self._makeProtoLabel()
protoLabel.setOrigin(drawWidth*0.5, drawHeight*0.5)
protoLabel.textAnchor = 'start'
protoLabel.fontName = 'Helvetica'
protoLabel.fontSize = 18
drawings = self._makeDrawings(protoLabel, text='Hello\nWorld!')
for d in drawings:
story.append(d)
story.append(Spacer(0, 1*cm))
story.append(PageBreak())
story.append(Paragraph('Testing text (and box) anchors', h2))
story.append(Paragraph("""This should display labels as before,
but now with a fixes size and showing some effect of setting the
textAnchor attribute.""", bt))
story.append(Spacer(0, 0.5*cm))
# Round 2a
story.append(Paragraph("Helvetica 10pt, textAnchor='start'", h3))
story.append(Spacer(0, 0.5*cm))
w, h = drawWidth, drawHeight = 400, 100
protoLabel = self._makeProtoLabel()
protoLabel.setOrigin(drawWidth*0.5, drawHeight*0.5)
protoLabel.width = 4*cm
protoLabel.height = 1.5*cm
protoLabel.textAnchor = 'start'
protoLabel.fontName = 'Helvetica'
protoLabel.fontSize = 10
drawings = self._makeDrawings(protoLabel)
for d in drawings:
story.append(d)
story.append(Spacer(0, 1*cm))
story.append(PageBreak())
# Round 2b
story.append(Paragraph("Helvetica 10pt, textAnchor='middle'", h3))
story.append(Spacer(0, 0.5*cm))
w, h = drawWidth, drawHeight = 400, 100
protoLabel = self._makeProtoLabel()
protoLabel.setOrigin(drawWidth*0.5, drawHeight*0.5)
protoLabel.width = 4*cm
protoLabel.height = 1.5*cm
protoLabel.textAnchor = 'middle'
protoLabel.fontName = 'Helvetica'
protoLabel.fontSize = 10
drawings = self._makeDrawings(protoLabel)
for d in drawings:
story.append(d)
story.append(Spacer(0, 1*cm))
story.append(PageBreak())
# Round 2c
story.append(Paragraph("Helvetica 10pt, textAnchor='end'", h3))
story.append(Spacer(0, 0.5*cm))
w, h = drawWidth, drawHeight = 400, 100
protoLabel = self._makeProtoLabel()
protoLabel.setOrigin(drawWidth*0.5, drawHeight*0.5)
protoLabel.width = 4*cm
protoLabel.height = 1.5*cm
protoLabel.textAnchor = 'end'
protoLabel.fontName = 'Helvetica'
protoLabel.fontSize = 10
drawings = self._makeDrawings(protoLabel)
for d in drawings:
story.append(d)
story.append(Spacer(0, 1*cm))
story.append(PageBreak())
# Round 2d
story.append(Paragraph("Helvetica 10pt, multi-line, textAnchor='start'", h3))
story.append(Spacer(0, 0.5*cm))
w, h = drawWidth, drawHeight = 400, 100
protoLabel = self._makeProtoLabel()
protoLabel.setOrigin(drawWidth*0.5, drawHeight*0.5)
protoLabel.width = 4*cm
protoLabel.height = 1.5*cm
protoLabel.textAnchor = 'start'
protoLabel.fontName = 'Helvetica'
protoLabel.fontSize = 10
drawings = self._makeDrawings(protoLabel, text='Hello\nWorld!')
for d in drawings:
story.append(d)
story.append(Spacer(0, 1*cm))
story.append(PageBreak())
path = outputfile('test_charts_textlabels.pdf')
doc = MyDocTemplate(path)
doc.multiBuild(story)
def makeSuite():
return makeSuiteForClasses(LabelTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
{
"content_hash": "99441c324f6e245c34eef2c9a54f1332",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 102,
"avg_line_length": 32.9963768115942,
"alnum_prop": 0.6222685846052487,
"repo_name": "malexandre/python-xhtml2pdf-demo",
"id": "dfe462229d91735d6295a289d7986c87a192d2f8",
"size": "9187",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "reportlab/tests/test_charts_textlabels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "720407"
},
{
"name": "C++",
"bytes": "2019"
},
{
"name": "CSS",
"bytes": "16419"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "4321122"
},
{
"name": "Shell",
"bytes": "4864"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import base64
import os
import sys
if sys.version_info >= (3, 0):
from urllib.parse import quote
from io import BytesIO as IOClass
else:
from urllib import quote
try:
from cStringIO import StringIO as IOClass
except ImportError:
from StringIO import StringIO as IOClass
from PIL import Image
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.firefox.webdriver import WebDriver as Firefox
from selenium.webdriver.chrome.webdriver import WebDriver as Chrome
from selenium.webdriver.ie.webdriver import WebDriver as Ie
from selenium.webdriver.opera.webdriver import WebDriver as Opera
from selenium.webdriver.safari.webdriver import WebDriver as Safari
from selenium.webdriver.phantomjs.webdriver import WebDriver as PhantomJS
from selenium.webdriver.remote.webdriver import WebDriver as Remote
try:
# Added in selenium 3.0.0.b3
from selenium.webdriver.firefox.webelement import FirefoxWebElement
except ImportError:
from selenium.webdriver.remote.webelement import WebElement as FirefoxWebElement
class NeedleWebElementMixin(object):
"""
An element on a page that Selenium has opened.
It is a Selenium :py:class:`~selenium.webdriver.remote.webelement.WebElement`
object with some extra methods for testing CSS.
"""
def get_dimensions(self):
"""
Returns a dictionary containing, in pixels, the element's ``width`` and
``height``, and it's ``left`` and ``top`` position relative to the document.
"""
location = self.location
size = self.size
return {
"top": location['y'],
"left": location['x'],
"width": size['width'],
"height": size['height']
}
def get_screenshot(self):
"""
Returns a screenshot of this element as a PIL image.
"""
d = self.get_dimensions()
# Cast values to int in order for _ImageCrop not to break
d['left'] = int(d['left'])
d['top'] = int(d['top'])
d['width'] = int(d['width'])
d['height'] = int(d['height'])
try:
# For selenium >= 2.46.1, W3C WebDriver spec drivers (like geckodriver)
fh = IOClass(self.screenshot_as_png)
image = Image.open(fh).convert('RGB')
# Make sure it isn't actually a full-page screenshot (PhantomJS)
if image.size == (d['width'], d['height']):
return image
except (AttributeError, WebDriverException):
# Fall back to cropping a full page screenshot
image = self._parent.get_screenshot_as_image()
return image.crop((
d['left'],
d['top'],
d['left'] + d['width'],
d['top'] + d['height'],
))
class NeedleWebDriverMixin(object):
"""
Selenium WebDriver mixin with some extra methods for testing CSS.
"""
def load_html(self, html):
"""
Similar to :py:meth:`get`, but instead of passing a URL to load in the
browser, the HTML for the page is provided.
"""
self.get('data:text/html,' + quote(html))
def get_screenshot_as_image(self):
"""
Returns a screenshot of the current page as an RGB
`PIL image <http://www.pythonware.com/library/pil/handbook/image.htm>`_.
"""
fh = IOClass(base64.b64decode(self.get_screenshot_as_base64().encode('ascii')))
return Image.open(fh).convert('RGB')
def load_jquery(self):
"""
Loads jQuery onto the current page so calls to
:py:meth:`execute_script` have access to it.
"""
if (self.execute_script('return typeof(jQuery)') == 'undefined'):
self.execute_script(open(
os.path.join(self._get_js_path(), 'jquery-1.11.0.min.js')
).read() + '\nreturn "";')
def _get_js_path(self):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'js')
def create_web_element(self, element_id, *args, **kwargs):
if isinstance(self, NeedleFirefox):
return NeedleFirefoxWebElement(self, element_id, w3c=self.w3c, *args, **kwargs)
else:
return NeedleWebElement(self, element_id, w3c=self.w3c, *args, **kwargs)
class NeedleRemote(NeedleWebDriverMixin, Remote):
"""
The same as Selenium's remote WebDriver, but with NeedleWebDriverMixin's
functionality.
"""
class NeedlePhantomJS(NeedleWebDriverMixin, PhantomJS):
"""
The same as Selenium's PhantomJS WebDriver, but with NeedleWebDriverMixin's
functionality.
"""
class NeedleFirefox(NeedleWebDriverMixin, Firefox):
"""
The same as Selenium's Firefox WebDriver, but with NeedleWebDriverMixin's
functionality.
"""
class NeedleChrome(NeedleWebDriverMixin, Chrome):
"""
The same as Selenium's Chrome WebDriver, but with NeedleWebDriverMixin's
functionality.
"""
class NeedleIe(NeedleWebDriverMixin, Ie):
"""
The same as Selenium's Internet Explorer WebDriver, but with
NeedleWebDriverMixin's functionality.
"""
class NeedleOpera(NeedleWebDriverMixin, Opera):
"""
The same as Selenium's Opera WebDriver, but with NeedleWebDriverMixin's
functionality.
"""
class NeedleSafari(NeedleWebDriverMixin, Safari):
"""
The same as Selenium's Safari WebDriver, but with NeedleWebDriverMixin's
functionality.
"""
class NeedleWebElement(NeedleWebElementMixin, WebElement):
"""
The same as Selenium's WebElement, but with NeedleWebElementMixin's
functionality.
"""
class NeedleFirefoxWebElement(NeedleWebElementMixin, FirefoxWebElement):
"""
The same as Selenium's FirefoxWebElement, but with NeedleWebElementMixin's
functionality.
"""
|
{
"content_hash": "499601b48739aead0f244e3d092e5c6d",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 91,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.6516891891891892,
"repo_name": "bfirsh/needle",
"id": "cd10b49d660d6ad94c44690ffe2e85d1490ea6dc",
"size": "5938",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "needle/driver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41203"
}
],
"symlink_target": ""
}
|
from nose import SkipTest
from nose.tools import raises
from dxr.testing import DxrInstanceTestCase
class PathAndFileFilterTests(DxrInstanceTestCase):
"""Basic tests for functionality of the 'path:' and 'file:' filters"""
def test_basic_path_results(self):
"""Check that a 'path:' result includes both file and folder matches."""
self.found_files_eq('path:fish', ['fish1', 'fishy_folder/fish2',
'fishy_folder/gill', 'folder/fish3',
'folder/fish4'])
def test_basic_file_results(self):
"""Check that a 'file:' result includes only file matches."""
self.found_files_eq('file:fish', ['fish1', 'fishy_folder/fish2',
'folder/fish3', 'folder/fish4'])
def test_path_and_file_line_promotion(self):
"""Make sure promotion of a 'path:' or 'file:' filter to a LINE query
works.
"""
self.found_files_eq('path:fish fins', ['folder/fish3'])
self.found_files_eq('file:fish fins', ['folder/fish3'])
# This fails because we currently intentionally exclude folder paths from
# FILE query results - remove the @raises line when that's changed. (Of
# course then other tests here will need to be updated as well.)
@raises(AssertionError)
def test_empty_folder_path_results(self):
"""Check that 'path:' results include empty folders."""
self.found_files_eq('path:empty_folder', ['empty_folder'])
def test_basic_wildcard(self):
"""Test basic wildcard functionality."""
# 'path:' and 'file:' currently have the same underlying wildcard
# support, so we're spreading out the basic wildcard testing over both.
self.found_files_eq('path:fish?_fo*er',
['fishy_folder/fish2', 'fishy_folder/gill'])
self.found_files_eq('file:fish[14]', ['fish1', 'folder/fish4'])
def test_unicode(self):
"""Make sure searching for non-ASCII names works."""
raise SkipTest('This test fails on Travis but passes locally. It may '
'be because of an LC_ALL difference.')
self.found_files_eq(u'file:fre\u0301mium*', [u'fre\u0301mium.txt'])
# This one fails locally, perhaps because é is normalized differently
# in ES than here. See bug 1291471.
# self.found_files_eq(u'file:frémium*', [u'frémium.txt'])
|
{
"content_hash": "842d68ca0f5eff0b43926ac2508c58d3",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 45.592592592592595,
"alnum_prop": 0.6141348497156783,
"repo_name": "pelmers/dxr",
"id": "3ebf626b0a3756f7abaa0acf5a506851cc6636da",
"size": "2490",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_path_file_filters/test_path_file_filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1974"
},
{
"name": "C++",
"bytes": "81663"
},
{
"name": "CSS",
"bytes": "24071"
},
{
"name": "HTML",
"bytes": "44076"
},
{
"name": "IDL",
"bytes": "8448"
},
{
"name": "JavaScript",
"bytes": "82076"
},
{
"name": "Makefile",
"bytes": "10011"
},
{
"name": "Python",
"bytes": "736868"
},
{
"name": "Rust",
"bytes": "11710"
},
{
"name": "Shell",
"bytes": "2524"
}
],
"symlink_target": ""
}
|
class InstallationContext(object):
def __init__(self):
super(InstallationContext, self).__init__()
self.installed = set()
|
{
"content_hash": "0b06f314437ac5508655f1a05129e86d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 51,
"avg_line_length": 35.5,
"alnum_prop": 0.6267605633802817,
"repo_name": "vmalloc/pydeploy",
"id": "8f1256543a5c87bf22c5dd235222b9ae0922ec71",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydeploy/installation_context.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "79314"
}
],
"symlink_target": ""
}
|
import codecs
import datetime
import locale
from decimal import Decimal
from urllib.parse import quote
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
super().__init__(*args)
def __str__(self):
return "%s. You passed in %r (%s)" % (
super().__str__(),
self.obj,
type(self.obj),
)
def smart_str(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Return a string representing 's'. Treat bytestrings using the 'encoding'
codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_str(s, encoding, strings_only, errors)
_PROTECTED_TYPES = (
type(None),
int,
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_str(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_str(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_str(), except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), str):
return s
if strings_only and is_protected_type(s):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s
def smart_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Return a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, memoryview):
return bytes(s)
return str(s).encode(encoding, errors)
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from RFC 3987 Section 3.1, slightly simplified since
the input is assumed to be a string rather than an arbitrary byte stream.
Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or
b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded
result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in RFC 3986 Sections 2.2 and 2.3:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.parse.quote() already considers all
# but the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of RFC 3987 Section 3.1 specifically mentions that % must not be
# converted.
if iri is None:
return iri
elif isinstance(iri, Promise):
iri = str(iri)
return quote(iri, safe="/#%[]=:;$&()+,!?*@'~")
# List of byte values that uri_to_iri() decodes from percent encoding.
# First, the unreserved characters from RFC 3986:
_ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)]
_hextobyte = {
(fmt % char).encode(): bytes((char,))
for ascii_range in _ascii_ranges
for char in ascii_range
for fmt in ["%02x", "%02X"]
}
# And then everything above 128, because bytes ≥ 128 are part of multibyte
# Unicode characters.
_hexdig = "0123456789ABCDEFabcdef"
_hextobyte.update(
{(a + b).encode(): bytes.fromhex(a + b) for a in _hexdig[8:] for b in _hexdig}
)
def uri_to_iri(uri):
"""
Convert a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from RFC 3987 Section 3.2, excluding step 4.
Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return
a string containing the encoded result (e.g. '/I%20♥%20Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
# Fast selective unquote: First, split on '%' and then starting with the
# second block, decode the first 2 bytes if they represent a hex code to
# decode. The rest of the block is the part after '%AB', not containing
# any '%'. Add that to the output without further processing.
bits = uri.split(b"%")
if len(bits) == 1:
iri = uri
else:
parts = [bits[0]]
append = parts.append
hextobyte = _hextobyte
for item in bits[1:]:
hex = item[:2]
if hex in hextobyte:
append(hextobyte[item[:2]])
append(item[2:])
else:
append(b"%")
append(item)
iri = b"".join(parts)
return repercent_broken_unicode(iri).decode()
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in RFC
# 3986 Sections 2.2 and 2.3:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to RFC 3986 Section 3.3.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(path, safe="/:@&+$,-_.!~*'()")
def punycode(domain):
"""Return the Punycode of the given domain if it's non-ASCII."""
return domain.encode("idna").decode("ascii")
def repercent_broken_unicode(path):
"""
As per RFC 3987 Section 3.2, step three of converting a URI into an IRI,
repercent-encode any octet produced that is not part of a strictly legal
UTF-8 octet sequence.
"""
while True:
try:
path.decode()
except UnicodeDecodeError as e:
# CVE-2019-14235: A recursion shouldn't be used since the exception
# handling uses massive amounts of memory
repercent = quote(path[e.start : e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = path[: e.start] + repercent.encode() + path[e.end :]
else:
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
Encode certain chars that would normally be recognized as special chars
for URIs. Do not encode the ' character, as it is a valid character
within URIs. See the encodeURIComponent() JavaScript function for details.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(str(path).replace("\\", "/"), safe="/~!*()'")
def get_system_encoding():
"""
The encoding for the character type functions. Fallback to 'ascii' if the
#encoding is unsupported by Python or could not be determined. See tickets
#10335 and #5846.
"""
try:
encoding = locale.getlocale()[1] or "ascii"
codecs.lookup(encoding)
except Exception:
encoding = "ascii"
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
|
{
"content_hash": "ee072910b03ea6db6173309aaf8938bc",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 82,
"avg_line_length": 33.30798479087453,
"alnum_prop": 0.6106164383561644,
"repo_name": "django/django",
"id": "43847b5385101f64d1393bde7aed1d7e3ec01380",
"size": "8766",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "django/utils/encoding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91986"
},
{
"name": "HTML",
"bytes": "238949"
},
{
"name": "JavaScript",
"bytes": "157441"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16195279"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 1
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*7
return prefix + "".join([random.choice(symbols) for x in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + \
[
Group(name=name, header=header, footer=footer)
for name in ["", random_string("name", 15)]
for header in ["", random_string("header", 15)]
for footer in ["", random_string("footer", 15)]
for m in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
{
"content_hash": "12dce170e0d3a2dfbbe4ec76fe40696f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 94,
"avg_line_length": 24.533333333333335,
"alnum_prop": 0.625,
"repo_name": "esemin83/python_training",
"id": "a7f951a2672394d2ae435c9da6a2f94bb81701a6",
"size": "1104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generator/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "1763"
},
{
"name": "HTML",
"bytes": "419542"
},
{
"name": "Python",
"bytes": "63185"
},
{
"name": "RobotFramework",
"bytes": "2121"
}
],
"symlink_target": ""
}
|
__all__ = [
'Route53DNSDriver'
]
import base64
import hmac
import datetime
import uuid
from libcloud.utils.py3 import httplib
from hashlib import sha1
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b, urlencode
from libcloud.utils.xml import findtext, findall, fixxpath
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.common.types import LibcloudError
from libcloud.common.aws import AWSGenericResponse
from libcloud.common.base import ConnectionUserAndKey
API_VERSION = '2012-02-29'
API_HOST = 'route53.amazonaws.com'
API_ROOT = '/%s/' % (API_VERSION)
NAMESPACE = 'https://%s/doc%s' % (API_HOST, API_ROOT)
class InvalidChangeBatch(LibcloudError):
pass
class Route53DNSResponse(AWSGenericResponse):
"""
Amazon Route53 response class.
"""
namespace = NAMESPACE
xpath = 'Error'
exceptions = {
'NoSuchHostedZone': ZoneDoesNotExistError,
'InvalidChangeBatch': InvalidChangeBatch,
}
class Route53Connection(ConnectionUserAndKey):
host = API_HOST
responseCls = Route53DNSResponse
def pre_connect_hook(self, params, headers):
time_string = datetime.datetime.utcnow() \
.strftime('%a, %d %b %Y %H:%M:%S GMT')
headers['Date'] = time_string
tmp = []
signature = self._get_aws_auth_b64(self.key, time_string)
auth = {'AWSAccessKeyId': self.user_id, 'Signature': signature,
'Algorithm': 'HmacSHA1'}
for k, v in auth.items():
tmp.append('%s=%s' % (k, v))
headers['X-Amzn-Authorization'] = 'AWS3-HTTPS ' + ','.join(tmp)
return params, headers
def _get_aws_auth_b64(self, secret_key, time_string):
b64_hmac = base64.b64encode(
hmac.new(b(secret_key), b(time_string), digestmod=sha1).digest()
)
return b64_hmac.decode('utf-8')
class Route53DNSDriver(DNSDriver):
type = Provider.ROUTE53
name = 'Route53 DNS'
website = 'http://aws.amazon.com/route53/'
connectionCls = Route53Connection
RECORD_TYPE_MAP = {
RecordType.NS: 'NS',
RecordType.MX: 'MX',
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.TXT: 'TXT',
RecordType.SRV: 'SRV',
RecordType.PTR: 'PTR',
RecordType.SOA: 'SOA',
RecordType.SPF: 'SPF',
RecordType.TXT: 'TXT'
}
def list_zones(self):
data = self.connection.request(API_ROOT + 'hostedzone').object
zones = self._to_zones(data=data)
return zones
def list_records(self, zone):
self.connection.set_context({'zone_id': zone.id})
uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset'
data = self.connection.request(uri).object
records = self._to_records(data=data, zone=zone)
return records
def get_zone(self, zone_id):
self.connection.set_context({'zone_id': zone_id})
uri = API_ROOT + 'hostedzone/' + zone_id
data = self.connection.request(uri).object
elem = findall(element=data, xpath='HostedZone',
namespace=NAMESPACE)[0]
return self._to_zone(elem)
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
record_type, name = record_id.split(':', 1)
if name:
full_name = ".".join((name, zone.domain))
else:
full_name = zone.domain
self.connection.set_context({'zone_id': zone_id})
params = urlencode({
'name': full_name,
'type': record_type,
'maxitems': '1'
})
uri = API_ROOT + 'hostedzone/' + zone_id + '/rrset?' + params
data = self.connection.request(uri).object
record = self._to_records(data=data, zone=zone)[0]
# A cute aspect of the /rrset filters is that they are more pagination
# hints than filters!!
# So will return a result even if its not what you asked for.
record_type_num = self._string_to_record_type(record_type)
if record.name != name or record.type != record_type_num:
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
zone = ET.Element('CreateHostedZoneRequest', {'xmlns': NAMESPACE})
ET.SubElement(zone, 'Name').text = domain
ET.SubElement(zone, 'CallerReference').text = str(uuid.uuid4())
if extra and 'Comment' in extra:
hzg = ET.SubElement(zone, 'HostedZoneConfig')
ET.SubElement(hzg, 'Comment').text = extra['Comment']
uri = API_ROOT + 'hostedzone'
data = ET.tostring(zone)
rsp = self.connection.request(uri, method='POST', data=data).object
elem = findall(element=rsp, xpath='HostedZone', namespace=NAMESPACE)[0]
return self._to_zone(elem=elem)
def delete_zone(self, zone, ex_delete_records=False):
self.connection.set_context({'zone_id': zone.id})
if ex_delete_records:
self.ex_delete_all_records(zone=zone)
uri = API_ROOT + 'hostedzone/%s' % (zone.id)
response = self.connection.request(uri, method='DELETE')
return response.status in [httplib.OK]
def create_record(self, name, zone, type, data, extra=None):
batch = [('CREATE', name, type, data, extra)]
self._post_changeset(zone, batch)
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
return Record(id=id, name=name, type=type, data=data, zone=zone,
driver=self, extra=extra)
def update_record(self, record, name, type, data, extra):
batch = [
('DELETE', record.name, record.type, record.data, record.extra),
('CREATE', name, type, data, extra)]
self._post_changeset(record.zone, batch)
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
return Record(id=id, name=name, type=type, data=data, zone=record.zone,
driver=self, extra=extra)
def delete_record(self, record):
try:
r = record
batch = [('DELETE', r.name, r.type, r.data, r.extra)]
self._post_changeset(record.zone, batch)
except InvalidChangeBatch:
raise RecordDoesNotExistError(value='', driver=self,
record_id=r.id)
return True
def ex_delete_all_records(self, zone):
"""
Remove all the records for the provided zone.
@param zone: Zone to delete records for.
@type zone: L{Zone}
"""
deletions = []
for r in zone.list_records():
if r.type in (RecordType.NS, RecordType.SOA):
continue
deletions.append(('DELETE', r.name, r.type, r.data, r.extra))
if deletions:
self._post_changeset(zone, deletions)
def _post_changeset(self, zone, changes_list):
attrs = {'xmlns': NAMESPACE}
changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
batch = ET.SubElement(changeset, 'ChangeBatch')
changes = ET.SubElement(batch, 'Changes')
for action, name, type_, data, extra in changes_list:
change = ET.SubElement(changes, 'Change')
ET.SubElement(change, 'Action').text = action
rrs = ET.SubElement(change, 'ResourceRecordSet')
ET.SubElement(rrs, 'Name').text = name + "." + zone.domain
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type_]
ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
rrecs = ET.SubElement(rrs, 'ResourceRecords')
rrec = ET.SubElement(rrecs, 'ResourceRecord')
ET.SubElement(rrec, 'Value').text = data
uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset'
data = ET.tostring(changeset)
self.connection.set_context({'zone_id': zone.id})
self.connection.request(uri, method='POST', data=data)
def _to_zones(self, data):
zones = []
for element in data.findall(fixxpath(xpath='HostedZones/HostedZone',
namespace=NAMESPACE)):
zones.append(self._to_zone(element))
return zones
def _to_zone(self, elem):
name = findtext(element=elem, xpath='Name', namespace=NAMESPACE)
id = findtext(element=elem, xpath='Id',
namespace=NAMESPACE).replace('/hostedzone/', '')
comment = findtext(element=elem, xpath='Config/Comment',
namespace=NAMESPACE)
resource_record_count = int(findtext(element=elem,
xpath='ResourceRecordSetCount',
namespace=NAMESPACE))
extra = {'Comment': comment, 'ResourceRecordSetCount':
resource_record_count}
zone = Zone(id=id, domain=name, type='master', ttl=0, driver=self,
extra=extra)
return zone
def _to_records(self, data, zone):
records = []
elems = data.findall(
fixxpath(xpath='ResourceRecordSets/ResourceRecordSet',
namespace=NAMESPACE))
for elem in elems:
records.append(self._to_record(elem, zone))
return records
def _to_record(self, elem, zone):
name = findtext(element=elem, xpath='Name',
namespace=NAMESPACE)
name = name[:-len(zone.domain) - 1]
type = self._string_to_record_type(findtext(element=elem, xpath='Type',
namespace=NAMESPACE))
ttl = findtext(element=elem, xpath='TTL', namespace=NAMESPACE)
# TODO: Support records with multiple values
value_elem = elem.findall(
fixxpath(xpath='ResourceRecords/ResourceRecord',
namespace=NAMESPACE))[0]
data = findtext(element=(value_elem), xpath='Value',
namespace=NAMESPACE)
extra = {'ttl': ttl}
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
record = Record(id=id, name=name, type=type, data=data, zone=zone,
driver=self, extra=extra)
return record
|
{
"content_hash": "3c692f3cd2bc28036d093b4a9085167f",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 79,
"avg_line_length": 35.840677966101694,
"alnum_prop": 0.5863047384848198,
"repo_name": "IsCoolEntertainment/debpkg_libcloud",
"id": "64918ade6f8473a93ff1e9c683e183ca67613d41",
"size": "11355",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "libcloud/dns/drivers/route53.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2037599"
}
],
"symlink_target": ""
}
|
import os
import sys
import install_venv_common as install_venv
def print_help(venv, root):
help = """
Heat development environment setup is complete.
Heat development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Heat virtualenv for the extent of your current shell
session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help % (venv, root))
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Heat'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
install.post_process()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)
|
{
"content_hash": "cd8f68a336a79f281fda2fa1e02174ce",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 30.0188679245283,
"alnum_prop": 0.6643620364550598,
"repo_name": "JioCloud/heat",
"id": "d4cf4d6a9447d0b1d6342640e4a9e2767038f05a",
"size": "2435",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/install_venv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2807748"
},
{
"name": "Shell",
"bytes": "21618"
}
],
"symlink_target": ""
}
|
from ._cirq_to_stim import cirq_circuit_to_stim_circuit
from ._det_annotation import DetAnnotation
from ._obs_annotation import CumulativeObservableAnnotation
from ._shift_coords_annotation import ShiftCoordsAnnotation
from ._stim_sampler import StimSampler
from ._stim_to_cirq import (
MeasureAndOrResetGate,
stim_circuit_to_cirq_circuit,
TwoQubitAsymmetricDepolarizingChannel,
)
from ._sweep_pauli import SweepPauli
JSON_RESOLVERS_DICT = {
"CumulativeObservableAnnotation": CumulativeObservableAnnotation,
"DetAnnotation": DetAnnotation,
"MeasureAndOrResetGate": MeasureAndOrResetGate,
"ShiftCoordsAnnotation": ShiftCoordsAnnotation,
"SweepPauli": SweepPauli,
"TwoQubitAsymmetricDepolarizingChannel": TwoQubitAsymmetricDepolarizingChannel,
}
JSON_RESOLVER = JSON_RESOLVERS_DICT.get
|
{
"content_hash": "39266db434fdb0d243f78479a2baae79",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 83,
"avg_line_length": 39.095238095238095,
"alnum_prop": 0.8063337393422655,
"repo_name": "quantumlib/Stim",
"id": "6549429eb9cd918ead7c61eb96446d91fce0c2a0",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "glue/cirq/stimcirq/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4500"
},
{
"name": "C++",
"bytes": "2703579"
},
{
"name": "CMake",
"bytes": "4590"
},
{
"name": "HTML",
"bytes": "8333"
},
{
"name": "JavaScript",
"bytes": "14013"
},
{
"name": "Python",
"bytes": "877557"
},
{
"name": "Shell",
"bytes": "4765"
},
{
"name": "Starlark",
"bytes": "3470"
}
],
"symlink_target": ""
}
|
"""
:mod:`nova` -- Cloud IaaS Platform
===================================
.. automodule:: nova
:platform: Unix
:synopsis: Infrastructure-as-a-Service Cloud platform.
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
import gettext
gettext.install("nova", unicode=1)
|
{
"content_hash": "408690b859cb542207ab5365ff1fb2d9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 60,
"avg_line_length": 29.157894736842106,
"alnum_prop": 0.6823104693140795,
"repo_name": "salv-orlando/MyRepo",
"id": "884c4a71309f31edcf5e6de56aaaecef914c95f2",
"size": "1331",
"binary": false,
"copies": "4",
"ref": "refs/heads/bp/xenapi-security-groups",
"path": "nova/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "4477933"
},
{
"name": "Shell",
"bytes": "34174"
}
],
"symlink_target": ""
}
|
"""Starter script for Cinder Volume Backup."""
import logging as python_logging
import shlex
import sys
# NOTE(geguileo): Monkey patching must go before OSLO.log import, otherwise
# OSLO.context will not use greenthread thread local and all greenthreads will
# share the same context.
import eventlet
eventlet.monkey_patch()
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_privsep import priv_context
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder.db import api as session
from cinder import objects
from cinder import service
from cinder import utils
from cinder import version
CONF = cfg.CONF
backup_workers_opt = cfg.IntOpt(
'backup_workers',
default=1, min=1, max=processutils.get_worker_count(),
help='Number of backup processes to launch. Improves performance with '
'concurrent backups.')
CONF.register_opt(backup_workers_opt)
LOG = None
# NOTE(mriedem): The default backup driver uses swift and performs read/write
# operations in a thread. swiftclient will log requests and responses at DEBUG
# level, which can cause a thread switch and break the backup operation. So we
# set a default log level of WARN for swiftclient to try and avoid this issue.
_EXTRA_DEFAULT_LOG_LEVELS = ['swiftclient=WARN']
def _launch_backup_process(launcher, num_process):
try:
server = service.Service.create(binary='cinder-backup',
coordination=True,
process_number=num_process + 1)
except Exception:
LOG.exception('Backup service %s failed to start.', CONF.host)
sys.exit(1)
else:
# Dispose of the whole DB connection pool here before
# starting another process. Otherwise we run into cases where
# child processes share DB connections which results in errors.
session.dispose_engine()
launcher.launch_service(server)
def main():
objects.register_all()
gmr_opts.set_defaults(CONF)
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.set_defaults(
default_log_levels=logging.get_default_log_levels() +
_EXTRA_DEFAULT_LOG_LEVELS)
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
global LOG
LOG = logging.getLogger(__name__)
if CONF.backup_workers > 1:
LOG.info('Backup running with %s processes.', CONF.backup_workers)
launcher = service.get_launcher()
for i in range(CONF.backup_workers):
_launch_backup_process(launcher, i)
launcher.wait()
else:
LOG.info('Backup running in single process mode.')
server = service.Service.create(binary='cinder-backup',
coordination=True,
process_number=1)
service.serve(server)
service.wait()
|
{
"content_hash": "5ef7ae89b91c0e1328f62274cbb201ef",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 33.78350515463917,
"alnum_prop": 0.6847726579188282,
"repo_name": "j-griffith/cinder",
"id": "ffe7cb9460af1a6ded5afbad92c05f85f0564626",
"size": "3961",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/cmd/backup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20155959"
},
{
"name": "Shell",
"bytes": "16354"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0012_mediafile_feedback'),
]
operations = [
migrations.AddField(
model_name='mediafile',
name='original_filename',
field=models.CharField(blank=True, max_length=255),
),
]
|
{
"content_hash": "8feae4e70f717eaee20cc71cd5b1c57a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.6040100250626567,
"repo_name": "hep7agon/city-feedback-hub",
"id": "c28cd7166300d92c45f8a93a3aec634a3f9e2421",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/migrations/0013_mediafile_original_filename.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31781"
},
{
"name": "CoffeeScript",
"bytes": "3296"
},
{
"name": "HTML",
"bytes": "218979"
},
{
"name": "JavaScript",
"bytes": "952021"
},
{
"name": "Python",
"bytes": "100532"
}
],
"symlink_target": ""
}
|
"""
.. module:: app
:platform: linux
:synopsis: The module containing the planet alignment application.
.. moduleauthor:: Paul Fanelli <paul.fanelli@gmail.com>
.. modulecreated:: 6/27/15
"""
from zope.interface import implements
from planet_alignment.app.interface import IApp
class App(object):
"""This class houses the main application and runs the planet alignment.
- **parameters** and **types**::
:param system_data: The system data object containing planet alignment data.
:param plugins_mgr: The plugins manager object containing a list of plugins.
:param time: The amount of time to calculate the alignment for.
:type system_data: SystemData object.
:type plugins_mgr: PluginsManager object.
:type time: float
"""
implements(IApp)
def __init__(self, system_data, plugins_mgr, time):
self._system_data = system_data
self._plugins_mgr = plugins_mgr
self._time = time
def run(self):
"""Runs the planet alignment algorithm.
:return: Returns a list of results, if there are any, else an empty list.
:rtype: list
"""
result_retval = []
for plugin_path in self._plugins_mgr:
try:
plugin_inst = self._plugins_mgr.get_plugin_instance_by_path(plugin_path)
plugin_name = self._plugins_mgr.get_plugin_name_by_path(plugin_path)
except (KeyError, AttributeError) as e:
print("WARNING: {}".format(e))
continue
plugin_str = ''
unique_aligned_list = []
first_entry = True
for x in self._system_data:
aligned_list = []
for y in self._system_data:
# don't compare the planets to themselves
if x.name == y.name:
continue
try:
result = plugin_inst.are_planets_aligned(x, y, self._time)
if result:
if x.name not in aligned_list:
aligned_list.append(x.name)
aligned_list.append(y.name)
aligned_list.sort()
except AttributeError as ae:
print("ERROR: {}: {}".format(plugin_path, ae))
except Exception as e:
print("ERROR: Unknown error {}".format(e))
if aligned_list:
if aligned_list not in unique_aligned_list:
unique_aligned_list.append(aligned_list)
for unique_aligned_entry in unique_aligned_list:
if first_entry:
first_entry = False
else:
plugin_str += '\n'
plugin_str += plugin_name + ': ' + ', '.join(unique_aligned_entry)
if plugin_str:
result_retval.append(plugin_str)
return result_retval
def print_results(self, results):
"""Prints the results from the run of the planet alignment algorithm.
:param results: List of the results output data.
:type results: list
:return: Returns the self reference.
:rtype: App class.
"""
for line in results:
print(line)
return self
|
{
"content_hash": "9d4f432b26f9c229b3c1297ee20b217d",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 88,
"avg_line_length": 34.00990099009901,
"alnum_prop": 0.5327510917030568,
"repo_name": "paulfanelli/planet_alignment",
"id": "27e1cf0bdc4c0c3e39cb1f36c690d0d120ec6e64",
"size": "3435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "planet_alignment/app/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48726"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure link_settings containing -lblah.lib is remapped to just blah.lib.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'linker-flags'
test.run_gyp('library-adjust.gyp', chdir=CHDIR)
test.build('library-adjust.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
{
"content_hash": "54b99c70e8f34c7f63b582d22eed602f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 25.8,
"alnum_prop": 0.686046511627907,
"repo_name": "Jet-Streaming/gyp",
"id": "4ef1fe394e8db659f6c85beed7fde827e0fb4652",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/win/gyptest-link-library-adjust.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1194"
},
{
"name": "Batchfile",
"bytes": "1133"
},
{
"name": "C",
"bytes": "38674"
},
{
"name": "C++",
"bytes": "41140"
},
{
"name": "Objective-C",
"bytes": "10353"
},
{
"name": "Objective-C++",
"bytes": "1958"
},
{
"name": "Python",
"bytes": "3290293"
},
{
"name": "Shell",
"bytes": "12644"
},
{
"name": "Swift",
"bytes": "124"
}
],
"symlink_target": ""
}
|
"""Various Statistical Tests
Author: josef-pktd
License: BSD-3
Notes
-----
Almost fully verified against R or Gretl, not all options are the same.
In many cases of Lagrange multiplier tests both the LM test and the F test is
returned. In some but not all cases, R has the option to choose the test
statistic. Some alternative test statistic results have not been verified.
TODO
* refactor to store intermediate results
* how easy is it to attach a test that is a class to a result instance,
for example CompareCox as a method compare_cox(self, other) ?
* StatTestMC has been moved and should be deleted
missing:
* pvalues for breaks_hansen
* additional options, compare with R, check where ddof is appropriate
* new tests:
- breaks_ap, more recent breaks tests
- specification tests against nonparametric alternatives
"""
from __future__ import print_function
from statsmodels.compat.python import iteritems, lrange, map, long
import numpy as np
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
from statsmodels.tsa.stattools import acf, adfuller
from statsmodels.tsa.tsatools import lagmat
from statsmodels.compat.numpy import np_matrix_rank
#get the old signature back so the examples work
def unitroot_adf(x, maxlag=None, trendorder=0, autolag='AIC', store=False):
return adfuller(x, maxlag=maxlag, regression=trendorder, autolag=autolag,
store=store, regresults=False)
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str
class CompareCox(object):
'''Cox Test for non-nested models
Parameters
----------
results_x : Result instance
result instance of first model
results_z : Result instance
result instance of second model
attach : bool
Formulas from Greene, section 8.3.4 translated to code
produces correct results for Example 8.3, Greene
'''
def run(self, results_x, results_z, attach=True):
'''run Cox test for non-nested models
Parameters
----------
results_x : Result instance
result instance of first model
results_z : Result instance
result instance of second model
attach : bool
If true, then the intermediate results are attached to the instance.
Returns
-------
tstat : float
t statistic for the test that including the fitted values of the
first model in the second model has no effect.
pvalue : float
two-sided pvalue for the t statistic
Notes
-----
Tests of non-nested hypothesis might not provide unambiguous answers.
The test should be performed in both directions and it is possible
that both or neither test rejects. see ??? for more information.
References
----------
???
'''
if not np.allclose(results_x.model.endog, results_z.model.endog):
raise ValueError('endogenous variables in models are not the same')
nobs = results_x.model.endog.shape[0]
x = results_x.model.exog
z = results_z.model.exog
sigma2_x = results_x.ssr/nobs
sigma2_z = results_z.ssr/nobs
yhat_x = results_x.fittedvalues
yhat_z = results_z.fittedvalues
res_dx = OLS(yhat_x, z).fit()
err_zx = res_dx.resid
res_xzx = OLS(err_zx, x).fit()
err_xzx = res_xzx.resid
sigma2_zx = sigma2_x + np.dot(err_zx.T, err_zx)/nobs
c01 = nobs/2. * (np.log(sigma2_z) - np.log(sigma2_zx))
v01 = sigma2_x * np.dot(err_xzx.T, err_xzx) / sigma2_zx**2
q = c01 / np.sqrt(v01)
pval = 2*stats.norm.sf(np.abs(q))
if attach:
self.res_dx = res_dx
self.res_xzx = res_xzx
self.c01 = c01
self.v01 = v01
self.q = q
self.pvalue = pval
self.dist = stats.norm
return q, pval
def __call__(self, results_x, results_z):
return self.run(results_x, results_z, attach=False)
compare_cox = CompareCox()
compare_cox.__doc__ = CompareCox.__doc__
class CompareJ(object):
'''J-Test for comparing non-nested models
Parameters
----------
results_x : Result instance
result instance of first model
results_z : Result instance
result instance of second model
attach : bool
From description in Greene, section 8.3.3
produces correct results for Example 8.3, Greene - not checked yet
#currently an exception, but I don't have clean reload in python session
check what results should be attached
'''
def run(self, results_x, results_z, attach=True):
'''run J-test for non-nested models
Parameters
----------
results_x : Result instance
result instance of first model
results_z : Result instance
result instance of second model
attach : bool
If true, then the intermediate results are attached to the instance.
Returns
-------
tstat : float
t statistic for the test that including the fitted values of the
first model in the second model has no effect.
pvalue : float
two-sided pvalue for the t statistic
Notes
-----
Tests of non-nested hypothesis might not provide unambiguous answers.
The test should be performed in both directions and it is possible
that both or neither test rejects. see ??? for more information.
References
----------
???
'''
if not np.allclose(results_x.model.endog, results_z.model.endog):
raise ValueError('endogenous variables in models are not the same')
nobs = results_x.model.endog.shape[0]
y = results_x.model.endog
x = results_x.model.exog
z = results_z.model.exog
#sigma2_x = results_x.ssr/nobs
#sigma2_z = results_z.ssr/nobs
yhat_x = results_x.fittedvalues
#yhat_z = results_z.fittedvalues
res_zx = OLS(y, np.column_stack((yhat_x, z))).fit()
self.res_zx = res_zx #for testing
tstat = res_zx.tvalues[0]
pval = res_zx.pvalues[0]
if attach:
self.res_zx = res_zx
self.dist = stats.t(res_zx.df_resid)
self.teststat = tstat
self.pvalue = pval
return tstat, pval
def __call__(self, results_x, results_z):
return self.run(results_x, results_z, attach=False)
compare_j = CompareJ()
compare_j.__doc__ = CompareJ.__doc__
def acorr_ljungbox(x, lags=None, boxpierce=False):
'''Ljung-Box test for no autocorrelation
Parameters
----------
x : array_like, 1d
data series, regression residuals when used as diagnostic test
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag length.
If lags is a list or array, then all lags are included up to the largest
lag in the list, however only the tests for the lags in the list are
reported.
If lags is None, then the default maxlag is 12*(nobs/100)^{1/4}
boxpierce : {False, True}
If true, then additional to the results of the Ljung-Box test also the
Box-Pierce test results are returned
Returns
-------
lbvalue : float or array
test statistic
pvalue : float or array
p-value based on chi-square distribution
bpvalue : (optionsal), float or array
test statistic for Box-Pierce test
bppvalue : (optional), float or array
p-value based for Box-Pierce test on chi-square distribution
Notes
-----
Ljung-Box and Box-Pierce statistic differ in their scaling of the
autocorrelation function. Ljung-Box test is reported to have better
small sample properties.
TODO: could be extended to work with more than one series
1d or nd ? axis ? ravel ?
needs more testing
''Verification''
Looks correctly sized in Monte Carlo studies.
not yet compared to verified values
Examples
--------
see example script
References
----------
Greene
Wikipedia
'''
x = np.asarray(x)
nobs = x.shape[0]
if lags is None:
lags = lrange(1,41) #TODO: check default; SS: changed to 40
elif isinstance(lags, (int, long)):
lags = lrange(1,lags+1)
maxlag = max(lags)
lags = np.asarray(lags)
acfx = acf(x, nlags=maxlag) # normalize by nobs not (nobs-nlags)
# SS: unbiased=False is default now
# acf2norm = acfx[1:maxlag+1]**2 / (nobs - np.arange(1,maxlag+1))
acf2norm = acfx[1:maxlag+1]**2 / (nobs - np.arange(1,maxlag+1))
qljungbox = nobs * (nobs+2) * np.cumsum(acf2norm)[lags-1]
pval = stats.chi2.sf(qljungbox, lags)
if not boxpierce:
return qljungbox, pval
else:
qboxpierce = nobs * np.cumsum(acfx[1:maxlag+1]**2)[lags-1]
pvalbp = stats.chi2.sf(qboxpierce, lags)
return qljungbox, pval, qboxpierce, pvalbp
def acorr_lm(x, maxlag=None, autolag='AIC', store=False, regresults=False):
'''Lagrange Multiplier tests for autocorrelation
This is a generic Lagrange Multiplier test for autocorrelation. I don't
have a reference for it, but it returns Engle's ARCH test if x is the
squared residual array. A variation on it with additional exogenous
variables is the Breusch-Godfrey autocorrelation test.
Parameters
----------
resid : ndarray, (nobs,)
residuals from an estimation, or time series
maxlag : int
highest lag to use
autolag : None or string
If None, then a fixed number of lags given by maxlag is used.
store : bool
If true then the intermediate results are also returned
Returns
-------
lm : float
Lagrange multiplier test statistic
lmpval : float
p-value for Lagrange multiplier test
fval : float
fstatistic for F test, alternative version of the same test based on
F test for the parameter restriction
fpval : float
pvalue for F test
resstore : instance (optional)
a class instance that holds intermediate results. Only returned if
store=True
See Also
--------
het_arch
acorr_breusch_godfrey
acorr_ljung_box
'''
if regresults:
store = True
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#for adf from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs/100., 1/4.)))#nobs//4 #TODO: check default, or do AIC/BIC
xdiff = np.diff(x)
#
xdall = lagmat(x[:,None], maxlag, trim='both')
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs,1)), xdall]
xshort = x[-nobs:]
if store: resstore = ResultsStore()
if autolag:
#search for lag length with highest information criteria
#Note: I use the same number of observations to have comparable IC
results = {}
for mlag in range(1, maxlag+1):
results[mlag] = OLS(xshort, xdall[:,:mlag+1]).fit()
if autolag.lower() == 'aic':
bestic, icbestlag = min((v.aic,k) for k,v in iteritems(results))
elif autolag.lower() == 'bic':
icbest, icbestlag = min((v.bic,k) for k,v in iteritems(results))
else:
raise ValueError("autolag can only be None, 'AIC' or 'BIC'")
#rerun ols with best ic
xdall = lagmat(x[:,None], icbestlag, trim='both')
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs,1)), xdall]
xshort = x[-nobs:]
usedlag = icbestlag
if regresults:
resstore.results = results
else:
usedlag = maxlag
resols = OLS(xshort, xdall[:,:usedlag+1]).fit()
fval = resols.fvalue
fpval = resols.f_pvalue
lm = nobs * resols.rsquared
lmpval = stats.chi2.sf(lm, usedlag)
# Note: degrees of freedom for LM test is nvars minus constant = usedlags
#return fval, fpval, lm, lmpval
if store:
resstore.resols = resols
resstore.usedlag = usedlag
return lm, lmpval, fval, fpval, resstore
else:
return lm, lmpval, fval, fpval
def het_arch(resid, maxlag=None, autolag=None, store=False, regresults=False,
ddof=0):
'''Engle's Test for Autoregressive Conditional Heteroscedasticity (ARCH)
Parameters
----------
resid : ndarray, (nobs,)
residuals from an estimation, or time series
maxlag : int
highest lag to use
autolag : None or string
If None, then a fixed number of lags given by maxlag is used.
store : bool
If true then the intermediate results are also returned
ddof : int
Not Implemented Yet
If the residuals are from a regression, or ARMA estimation, then there
are recommendations to correct the degrees of freedom by the number
of parameters that have been estimated, for example ddof=p+a for an
ARMA(p,q) (need reference, based on discussion on R finance mailinglist)
Returns
-------
lm : float
Lagrange multiplier test statistic
lmpval : float
p-value for Lagrange multiplier test
fval : float
fstatistic for F test, alternative version of the same test based on
F test for the parameter restriction
fpval : float
pvalue for F test
resstore : instance (optional)
a class instance that holds intermediate results. Only returned if
store=True
Notes
-----
verified agains R:FinTS::ArchTest
'''
return acorr_lm(resid**2, maxlag=maxlag, autolag=autolag, store=store,
regresults=regresults)
def acorr_breusch_godfrey(results, nlags=None, store=False):
'''Breusch Godfrey Lagrange Multiplier tests for residual autocorrelation
Parameters
----------
results : Result instance
Estimation results for which the residuals are tested for serial
correlation
nlags : int
Number of lags to include in the auxiliary regression. (nlags is
highest lag)
store : bool
If store is true, then an additional class instance that contains
intermediate results is returned.
Returns
-------
lm : float
Lagrange multiplier test statistic
lmpval : float
p-value for Lagrange multiplier test
fval : float
fstatistic for F test, alternative version of the same test based on
F test for the parameter restriction
fpval : float
pvalue for F test
resstore : instance (optional)
a class instance that holds intermediate results. Only returned if
store=True
Notes
-----
BG adds lags of residual to exog in the design matrix for the auxiliary
regression with residuals as endog,
see Greene 12.7.1.
References
----------
Greene Econometrics, 5th edition
'''
x = np.asarray(results.resid)
exog_old = results.model.exog
nobs = x.shape[0]
if nlags is None:
#for adf from Greene referencing Schwert 1989
nlags = np.trunc(12. * np.power(nobs/100., 1/4.))#nobs//4 #TODO: check default, or do AIC/BIC
nlags = int(nlags)
x = np.concatenate((np.zeros(nlags), x))
#xdiff = np.diff(x)
#
xdall = lagmat(x[:,None], nlags, trim='both')
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs,1)), xdall]
xshort = x[-nobs:]
exog = np.column_stack((exog_old, xdall))
k_vars = exog.shape[1]
if store: resstore = ResultsStore()
resols = OLS(xshort, exog).fit()
ft = resols.f_test(np.eye(nlags, k_vars, k_vars - nlags))
fval = ft.fvalue
fpval = ft.pvalue
fval = np.squeeze(fval)[()] #TODO: fix this in ContrastResults
fpval = np.squeeze(fpval)[()]
lm = nobs * resols.rsquared
lmpval = stats.chi2.sf(lm, nlags)
# Note: degrees of freedom for LM test is nvars minus constant = usedlags
#return fval, fpval, lm, lmpval
if store:
resstore.resols = resols
resstore.usedlag = nlags
return lm, lmpval, fval, fpval, resstore
else:
return lm, lmpval, fval, fpval
msg = "Use acorr_breusch_godfrey, acorr_breush_godfrey will be removed " \
"in 0.9 \n (Note: misspelling missing 'c'),"
acorr_breush_godfrey = np.deprecate(acorr_breusch_godfrey, 'acorr_breush_godfrey',
'acorr_breusch_godfrey',
msg)
def het_breuschpagan(resid, exog_het):
'''Breusch-Pagan Lagrange Multiplier test for heteroscedasticity
The tests the hypothesis that the residual variance does not depend on
the variables in x in the form
:math: \sigma_i = \\sigma * f(\\alpha_0 + \\alpha z_i)
Homoscedasticity implies that $\\alpha=0$
Parameters
----------
resid : arraylike, (nobs,)
For the Breusch-Pagan test, this should be the residual of a regression.
If an array is given in exog, then the residuals are calculated by
the an OLS regression or resid on exog. In this case resid should
contain the dependent variable. Exog can be the same as x.
TODO: I dropped the exog option, should I add it back?
exog_het : array_like, (nobs, nvars)
This contains variables that might create data dependent
heteroscedasticity.
Returns
-------
lm : float
lagrange multiplier statistic
lm_pvalue :float
p-value of lagrange multiplier test
fvalue : float
f-statistic of the hypothesis that the error variance does not depend
on x
f_pvalue : float
p-value for the f-statistic
Notes
-----
Assumes x contains constant (for counting dof and calculation of R^2).
In the general description of LM test, Greene mentions that this test
exaggerates the significance of results in small or moderately large
samples. In this case the F-statistic is preferrable.
*Verification*
Chisquare test statistic is exactly (<1e-13) the same result as bptest
in R-stats with defaults (studentize=True).
Implementation
This is calculated using the generic formula for LM test using $R^2$
(Greene, section 17.6) and not with the explicit formula
(Greene, section 11.4.3).
The degrees of freedom for the p-value assume x is full rank.
References
----------
http://en.wikipedia.org/wiki/Breusch%E2%80%93Pagan_test
Greene 5th edition
Breusch, Pagan article
'''
x = np.asarray(exog_het)
y = np.asarray(resid)**2
nobs, nvars = x.shape
resols = OLS(y, x).fit()
fval = resols.fvalue
fpval = resols.f_pvalue
lm = nobs * resols.rsquared
# Note: degrees of freedom for LM test is nvars minus constant
return lm, stats.chi2.sf(lm, nvars-1), fval, fpval
het_breushpagan = np.deprecate(het_breuschpagan, 'het_breushpagan', 'het_breuschpagan',
"Use het_breuschpagan, het_breushpagan will be "
"removed in 0.9 \n(Note: misspelling missing 'c')")
def het_white(resid, exog, retres=False):
'''White's Lagrange Multiplier Test for Heteroscedasticity
Parameters
----------
resid : array_like
residuals, square of it is used as endogenous variable
exog : array_like
possible explanatory variables for variance, squares and interaction
terms are included in the auxilliary regression.
resstore : instance (optional)
a class instance that holds intermediate results. Only returned if
store=True
Returns
-------
lm : float
lagrange multiplier statistic
lm_pvalue :float
p-value of lagrange multiplier test
fvalue : float
f-statistic of the hypothesis that the error variance does not depend
on x. This is an alternative test variant not the original LM test.
f_pvalue : float
p-value for the f-statistic
Notes
-----
assumes x contains constant (for counting dof)
question: does f-statistic make sense? constant ?
References
----------
Greene section 11.4.1 5th edition p. 222
now test statistic reproduces Greene 5th, example 11.3
'''
x = np.asarray(exog)
y = np.asarray(resid)
if x.ndim == 1:
raise ValueError('x should have constant and at least one more variable')
nobs, nvars0 = x.shape
i0,i1 = np.triu_indices(nvars0)
exog = x[:,i0]*x[:,i1]
nobs, nvars = exog.shape
assert nvars == nvars0*(nvars0-1)/2. + nvars0
resols = OLS(y**2, exog).fit()
fval = resols.fvalue
fpval = resols.f_pvalue
lm = nobs * resols.rsquared
# Note: degrees of freedom for LM test is nvars minus constant
#degrees of freedom take possible reduced rank in exog into account
#df_model checks the rank to determine df
#extra calculation that can be removed:
assert resols.df_model == np_matrix_rank(exog) - 1
lmpval = stats.chi2.sf(lm, resols.df_model)
return lm, lmpval, fval, fpval
def _het_goldfeldquandt2_old(y, x, idx, split=None, retres=False):
'''test whether variance is the same in 2 subsamples
Parameters
----------
y : array_like
endogenous variable
x : array_like
exogenous variable, regressors
idx : integer
column index of variable according to which observations are
sorted for the split
split : None or integer or float in intervall (0,1)
index at which sample is split.
If 0<split<1 then split is interpreted as fraction of the observations
in the first sample
retres : boolean
if true, then an instance of a result class is returned,
otherwise 2 numbers, fvalue and p-value, are returned
Returns
-------
(fval, pval) or res
fval : float
value of the F-statistic
pval : float
p-value of the hypothesis that the variance in one subsample is larger
than in the other subsample
res : instance of result class
The class instance is just a storage for the intermediate and final
results that are calculated
Notes
-----
TODO:
add resultinstance - DONE
maybe add drop-middle as option
maybe allow for several breaks
recommendation for users: use this function as pattern for more flexible
split in tests, e.g. drop middle.
can do Chow test for structural break in same way
ran sanity check
'''
x = np.asarray(x)
y = np.asarray(y)
nobs, nvars = x.shape
if split is None:
split = nobs//2
elif (0<split) and (split<1):
split = int(nobs*split)
xsortind = np.argsort(x[:,idx])
y = y[xsortind]
x = x[xsortind,:]
resols1 = OLS(y[:split], x[:split]).fit()
resols2 = OLS(y[split:], x[split:]).fit()
fval = resols1.mse_resid/resols2.mse_resid
if fval>1:
fpval = stats.f.sf(fval, resols1.df_resid, resols2.df_resid)
ordering = 'larger'
else:
fval = 1./fval;
fpval = stats.f.sf(fval, resols2.df_resid, resols1.df_resid)
ordering = 'smaller'
if retres:
res = ResultsStore()
res.__doc__ = 'Test Results for Goldfeld-Quandt test of heterogeneity'
res.fval = fval
res.fpval = fpval
res.df_fval = (resols2.df_resid, resols1.df_resid)
res.resols1 = resols1
res.resols2 = resols2
res.ordering = ordering
res.split = split
#res.__str__
res._str = '''The Goldfeld-Quandt test for null hypothesis that the
variance in the second subsample is %s than in the first subsample:
F-statistic =%8.4f and p-value =%8.4f''' % (ordering, fval, fpval)
return res
else:
return fval, fpval
class HetGoldfeldQuandt(object):
'''test whether variance is the same in 2 subsamples
Parameters
----------
y : array_like
endogenous variable
x : array_like
exogenous variable, regressors
idx : integer
column index of variable according to which observations are
sorted for the split
split : None or integer or float in intervall (0,1)
index at which sample is split.
If 0<split<1 then split is interpreted as fraction of the observations
in the first sample
drop : None, float or int
If this is not None, then observation are dropped from the middle part
of the sorted series. If 0<split<1 then split is interpreted as fraction
of the number of observations to be dropped.
Note: Currently, observations are dropped between split and
split+drop, where split and drop are the indices (given by rounding if
specified as fraction). The first sample is [0:split], the second
sample is [split+drop:]
alternative : string, 'increasing', 'decreasing' or 'two-sided'
default is increasing. This specifies the alternative for the p-value
calculation.
Returns
-------
(fval, pval) or res
fval : float
value of the F-statistic
pval : float
p-value of the hypothesis that the variance in one subsample is larger
than in the other subsample
res : instance of result class
The class instance is just a storage for the intermediate and final
results that are calculated
Notes
-----
The Null hypothesis is that the variance in the two sub-samples are the
same. The alternative hypothesis, can be increasing, i.e. the variance in
the second sample is larger than in the first, or decreasing or two-sided.
Results are identical R, but the drop option is defined differently.
(sorting by idx not tested yet)
'''
#TODO: can do Chow test for structural break in same way
def run(self, y, x, idx=None, split=None, drop=None,
alternative='increasing', attach=True):
'''see class docstring'''
x = np.asarray(x)
y = np.asarray(y)#**2
nobs, nvars = x.shape
if split is None:
split = nobs//2
elif (0<split) and (split<1):
split = int(nobs*split)
if drop is None:
start2 = split
elif (0<drop) and (drop<1):
start2 = split + int(nobs*drop)
else:
start2 = split + drop
if not idx is None:
xsortind = np.argsort(x[:,idx])
y = y[xsortind]
x = x[xsortind,:]
resols1 = OLS(y[:split], x[:split]).fit()
resols2 = OLS(y[start2:], x[start2:]).fit()
fval = resols2.mse_resid/resols1.mse_resid
#if fval>1:
if alternative.lower() in ['i', 'inc', 'increasing']:
fpval = stats.f.sf(fval, resols1.df_resid, resols2.df_resid)
ordering = 'increasing'
elif alternative.lower() in ['d', 'dec', 'decreasing']:
fval = fval;
fpval = stats.f.sf(1./fval, resols2.df_resid, resols1.df_resid)
ordering = 'decreasing'
elif alternative.lower() in ['2', '2-sided', 'two-sided']:
fpval_sm = stats.f.cdf(fval, resols2.df_resid, resols1.df_resid)
fpval_la = stats.f.sf(fval, resols2.df_resid, resols1.df_resid)
fpval = 2*min(fpval_sm, fpval_la)
ordering = 'two-sided'
else:
raise ValueError('invalid alternative')
if attach:
res = self
res.__doc__ = 'Test Results for Goldfeld-Quandt test of heterogeneity'
res.fval = fval
res.fpval = fpval
res.df_fval = (resols2.df_resid, resols1.df_resid)
res.resols1 = resols1
res.resols2 = resols2
res.ordering = ordering
res.split = split
#res.__str__
#TODO: check if string works
res._str = '''The Goldfeld-Quandt test for null hypothesis that the
variance in the second subsample is %s than in the first subsample:
F-statistic =%8.4f and p-value =%8.4f''' % (ordering, fval, fpval)
return fval, fpval, ordering
#return self
def __str__(self):
try:
return self._str
except AttributeError:
return repr(self)
#TODO: missing the alternative option in call
def __call__(self, y, x, idx=None, split=None, drop=None,
alternative='increasing'):
return self.run(y, x, idx=idx, split=split, drop=drop, attach=False,
alternative=alternative)
het_goldfeldquandt = HetGoldfeldQuandt()
het_goldfeldquandt.__doc__ = het_goldfeldquandt.run.__doc__
def linear_harvey_collier(res):
'''Harvey Collier test for linearity
The Null hypothesis is that the regression is correctly modeled as linear.
Parameters
----------
res : Result instance
Returns
-------
tvalue : float
test statistic, based on ttest_1sample
pvalue : float
pvalue of the test
Notes
-----
TODO: add sort_by option
This test is a t-test that the mean of the recursive ols residuals is zero.
Calculating the recursive residuals might take some time for large samples.
'''
#I think this has different ddof than
#B.H. Baltagi, Econometrics, 2011, chapter 8
#but it matches Gretl and R:lmtest, pvalue at decimal=13
rr = recursive_olsresiduals(res, skip=3, alpha=0.95)
from scipy import stats
return stats.ttest_1samp(rr[3][3:], 0)
def linear_rainbow(res, frac = 0.5):
'''Rainbow test for linearity
The Null hypothesis is that the regression is correctly modelled as linear.
The alternative for which the power might be large are convex, check
Parameters
----------
res : Result instance
Returns
-------
fstat : float
test statistic based of F test
pvalue : float
pvalue of the test
'''
nobs = res.nobs
endog = res.model.endog
exog = res.model.exog
lowidx = np.ceil(0.5 * (1 - frac) * nobs).astype(int)
uppidx = np.floor(lowidx + frac * nobs).astype(int)
mi_sl = slice(lowidx, uppidx)
res_mi = OLS(endog[mi_sl], exog[mi_sl]).fit()
nobs_mi = res_mi.model.endog.shape[0]
ss_mi = res_mi.ssr
ss = res.ssr
fstat = (ss - ss_mi) / (nobs-nobs_mi) / ss_mi * res_mi.df_resid
from scipy import stats
pval = stats.f.sf(fstat, nobs - nobs_mi, res_mi.df_resid)
return fstat, pval
def linear_lm(resid, exog, func=None):
'''Lagrange multiplier test for linearity against functional alternative
limitations: Assumes currently that the first column is integer.
Currently it doesn't check whether the transformed variables contain NaNs,
for example log of negative number.
Parameters
----------
resid : ndarray
residuals of a regression
exog : ndarray
exogenous variables for which linearity is tested
func : callable
If func is None, then squares are used. func needs to take an array
of exog and return an array of transformed variables.
Returns
-------
lm : float
Lagrange multiplier test statistic
lm_pval : float
p-value of Lagrange multiplier tes
ftest : ContrastResult instance
the results from the F test variant of this test
Notes
-----
written to match Gretl's linearity test.
The test runs an auxilliary regression of the residuals on the combined
original and transformed regressors.
The Null hypothesis is that the linear specification is correct.
'''
from scipy import stats
if func is None:
func = lambda x: np.power(x, 2)
exog_aux = np.column_stack((exog, func(exog[:,1:])))
nobs, k_vars = exog.shape
ls = OLS(resid, exog_aux).fit()
ftest = ls.f_test(np.eye(k_vars - 1, k_vars * 2 - 1, k_vars))
lm = nobs * ls.rsquared
lm_pval = stats.chi2.sf(lm, k_vars - 1)
return lm, lm_pval, ftest
def _neweywestcov(resid, x):
'''
Did not run yet
from regstats2 ::
if idx(29) % HAC (Newey West)
L = round(4*(nobs/100)^(2/9));
% L = nobs^.25; % as an alternative
hhat = repmat(residuals',p,1).*X';
xuux = hhat*hhat';
for l = 1:L;
za = hhat(:,(l+1):nobs)*hhat(:,1:nobs-l)';
w = 1 - l/(L+1);
xuux = xuux + w*(za+za');
end
d = struct;
d.covb = xtxi*xuux*xtxi;
'''
nobs = resid.shape[0] #TODO: check this can only be 1d
nlags = int(round(4*(nobs/100.)**(2/9.)))
hhat = resid * x.T
xuux = np.dot(hhat, hhat.T)
for lag in range(nlags):
za = np.dot(hhat[:,lag:nobs], hhat[:,:nobs-lag].T)
w = 1 - lag/(nobs + 1.)
xuux = xuux + np.dot(w, za+za.T)
xtxi = np.linalg.inv(np.dot(x.T, x)) #QR instead?
covbNW = np.dot(xtxi, np.dot(xuux, xtxi))
return covbNW
def _recursive_olsresiduals2(olsresults, skip):
'''this is my original version based on Greene and references
keep for now for comparison and benchmarking
'''
y = olsresults.model.endog
x = olsresults.model.exog
nobs, nvars = x.shape
rparams = np.nan * np.zeros((nobs,nvars))
rresid = np.nan * np.zeros((nobs))
rypred = np.nan * np.zeros((nobs))
rvarraw = np.nan * np.zeros((nobs))
#XTX = np.zeros((nvars,nvars))
#XTY = np.zeros((nvars))
x0 = x[:skip]
y0 = y[:skip]
XTX = np.dot(x0.T, x0)
XTY = np.dot(x0.T, y0) #xi * y #np.dot(xi, y)
beta = np.linalg.solve(XTX, XTY)
rparams[skip-1] = beta
yipred = np.dot(x[skip-1], beta)
rypred[skip-1] = yipred
rresid[skip-1] = y[skip-1] - yipred
rvarraw[skip-1] = 1+np.dot(x[skip-1],np.dot(np.linalg.inv(XTX),x[skip-1]))
for i in range(skip,nobs):
xi = x[i:i+1,:]
yi = y[i]
xxT = np.dot(xi.T, xi) #xi is 2d 1 row
xy = (xi*yi).ravel() # XTY is 1d #np.dot(xi, yi) #np.dot(xi, y)
print(xy.shape, XTY.shape)
print(XTX)
print(XTY)
beta = np.linalg.solve(XTX, XTY)
rparams[i-1] = beta #this is beta based on info up to t-1
yipred = np.dot(xi, beta)
rypred[i] = yipred
rresid[i] = yi - yipred
rvarraw[i] = 1 + np.dot(xi,np.dot(np.linalg.inv(XTX),xi.T))
XTX += xxT
XTY += xy
i = nobs
beta = np.linalg.solve(XTX, XTY)
rparams[i-1] = beta
rresid_scaled = rresid/np.sqrt(rvarraw) #this is N(0,sigma2) distributed
nrr = nobs-skip
sigma2 = rresid_scaled[skip-1:].var(ddof=1)
rresid_standardized = rresid_scaled/np.sqrt(sigma2) #N(0,1) distributed
rcusum = rresid_standardized[skip-1:].cumsum()
#confidence interval points in Greene p136 looks strange?
#this assumes sum of independent standard normal
#rcusumci = np.sqrt(np.arange(skip,nobs+1))*np.array([[-1.],[+1.]])*stats.norm.sf(0.025)
a = 1.143 #for alpha=0.99 =0.948 for alpha=0.95
#following taken from Ploberger,
crit = a*np.sqrt(nrr)
rcusumci = (a*np.sqrt(nrr) + a*np.arange(0,nobs-skip)/np.sqrt(nrr)) \
* np.array([[-1.],[+1.]])
return (rresid, rparams, rypred, rresid_standardized, rresid_scaled,
rcusum, rcusumci)
def recursive_olsresiduals(olsresults, skip=None, lamda=0.0, alpha=0.95):
'''calculate recursive ols with residuals and cusum test statistic
Parameters
----------
olsresults : instance of RegressionResults
uses only endog and exog
skip : int or None
number of observations to use for initial OLS, if None then skip is
set equal to the number of regressors (columns in exog)
lamda : float
weight for Ridge correction to initial (X'X)^{-1}
alpha : {0.95, 0.99}
confidence level of test, currently only two values supported,
used for confidence interval in cusum graph
Returns
-------
rresid : array
recursive ols residuals
rparams : array
recursive ols parameter estimates
rypred : array
recursive prediction of endogenous variable
rresid_standardized : array
recursive residuals standardized so that N(0,sigma2) distributed, where
sigma2 is the error variance
rresid_scaled : array
recursive residuals normalize so that N(0,1) distributed
rcusum : array
cumulative residuals for cusum test
rcusumci : array
confidence interval for cusum test, currently hard coded for alpha=0.95
Notes
-----
It produces same recursive residuals as other version. This version updates
the inverse of the X'X matrix and does not require matrix inversion during
updating. looks efficient but no timing
Confidence interval in Greene and Brown, Durbin and Evans is the same as
in Ploberger after a little bit of algebra.
References
----------
jplv to check formulas, follows Harvey
BigJudge 5.5.2b for formula for inverse(X'X) updating
Greene section 7.5.2
Brown, R. L., J. Durbin, and J. M. Evans. “Techniques for Testing the
Constancy of Regression Relationships over Time.”
Journal of the Royal Statistical Society. Series B (Methodological) 37,
no. 2 (1975): 149-192.
'''
y = olsresults.model.endog
x = olsresults.model.exog
nobs, nvars = x.shape
if skip is None:
skip = nvars
rparams = np.nan * np.zeros((nobs,nvars))
rresid = np.nan * np.zeros((nobs))
rypred = np.nan * np.zeros((nobs))
rvarraw = np.nan * np.zeros((nobs))
#intialize with skip observations
x0 = x[:skip]
y0 = y[:skip]
#add Ridge to start (not in jplv
XTXi = np.linalg.inv(np.dot(x0.T, x0)+lamda*np.eye(nvars))
XTY = np.dot(x0.T, y0) #xi * y #np.dot(xi, y)
#beta = np.linalg.solve(XTX, XTY)
beta = np.dot(XTXi, XTY)
#print('beta', beta
rparams[skip-1] = beta
yipred = np.dot(x[skip-1], beta)
rypred[skip-1] = yipred
rresid[skip-1] = y[skip-1] - yipred
rvarraw[skip-1] = 1 + np.dot(x[skip-1],np.dot(XTXi, x[skip-1]))
for i in range(skip,nobs):
xi = x[i:i+1,:]
yi = y[i]
#xxT = np.dot(xi.T, xi) #xi is 2d 1 row
xy = (xi*yi).ravel() # XTY is 1d #np.dot(xi, yi) #np.dot(xi, y)
#print(xy.shape, XTY.shape
#print(XTX
#print(XTY
# get prediction error with previous beta
yipred = np.dot(xi, beta)
rypred[i] = yipred
residi = yi - yipred
rresid[i] = residi
#update beta and inverse(X'X)
tmp = np.dot(XTXi, xi.T)
ft = 1 + np.dot(xi, tmp)
XTXi = XTXi - np.dot(tmp,tmp.T) / ft #BigJudge equ 5.5.15
#print('beta', beta
beta = beta + (tmp*residi / ft).ravel() #BigJudge equ 5.5.14
# #version for testing
# XTY += xy
# beta = np.dot(XTXi, XTY)
# print((tmp*yipred / ft).shape
# print('tmp.shape, ft.shape, beta.shape', tmp.shape, ft.shape, beta.shape
rparams[i] = beta
rvarraw[i] = ft
i = nobs
#beta = np.linalg.solve(XTX, XTY)
#rparams[i] = beta
rresid_scaled = rresid/np.sqrt(rvarraw) #this is N(0,sigma2) distributed
nrr = nobs-skip
#sigma2 = rresid_scaled[skip-1:].var(ddof=1) #var or sum of squares ?
#Greene has var, jplv and Ploberger have sum of squares (Ass.:mean=0)
#Gretl uses: by reverse engineering matching their numbers
sigma2 = rresid_scaled[skip:].var(ddof=1)
rresid_standardized = rresid_scaled/np.sqrt(sigma2) #N(0,1) distributed
rcusum = rresid_standardized[skip-1:].cumsum()
#confidence interval points in Greene p136 looks strange. Cleared up
#this assumes sum of independent standard normal, which does not take into
#account that we make many tests at the same time
#rcusumci = np.sqrt(np.arange(skip,nobs+1))*np.array([[-1.],[+1.]])*stats.norm.sf(0.025)
if alpha == 0.95:
a = 0.948 #for alpha=0.95
elif alpha == 0.99:
a = 1.143 #for alpha=0.99
elif alpha == 0.90:
a = 0.850
else:
raise ValueError('alpha can only be 0.9, 0.95 or 0.99')
#following taken from Ploberger,
crit = a*np.sqrt(nrr)
rcusumci = (a*np.sqrt(nrr) + 2*a*np.arange(0,nobs-skip)/np.sqrt(nrr)) \
* np.array([[-1.],[+1.]])
return (rresid, rparams, rypred, rresid_standardized, rresid_scaled,
rcusum, rcusumci)
def breaks_hansen(olsresults):
'''test for model stability, breaks in parameters for ols, Hansen 1992
Parameters
----------
olsresults : instance of RegressionResults
uses only endog and exog
Returns
-------
teststat : float
Hansen's test statistic
crit : structured array
critical values at alpha=0.95 for different nvars
pvalue Not yet
ft, s : arrays
temporary return for debugging, will be removed
Notes
-----
looks good in example, maybe not very powerful for small changes in
parameters
According to Greene, distribution of test statistics depends on nvar but
not on nobs.
Test statistic is verified against R:strucchange
References
----------
Greene section 7.5.1, notation follows Greene
'''
y = olsresults.model.endog
x = olsresults.model.exog
resid = olsresults.resid
nobs, nvars = x.shape
resid2 = resid**2
ft = np.c_[x*resid[:,None], (resid2 - resid2.mean())]
s = ft.cumsum(0)
assert (np.abs(s[-1]) < 1e10).all() #can be optimized away
F = nobs*(ft[:,:,None]*ft[:,None,:]).sum(0)
S = (s[:,:,None]*s[:,None,:]).sum(0)
H = np.trace(np.dot(np.linalg.inv(F), S))
crit95 = np.array([(2,1.9),(6,3.75),(15,3.75),(19,4.52)],
dtype = [('nobs',int), ('crit', float)])
#TODO: get critical values from Bruce Hansens' 1992 paper
return H, crit95, ft, s
def breaks_cusumolsresid(olsresidual, ddof=0):
'''cusum test for parameter stability based on ols residuals
Parameters
----------
olsresiduals : ndarray
array of residuals from an OLS estimation
ddof : int
number of parameters in the OLS estimation, used as degrees of freedom
correction for error variance.
Returns
-------
sup_b : float
test statistic, maximum of absolute value of scaled cumulative OLS
residuals
pval : float
Probability of observing the data under the null hypothesis of no
structural change, based on asymptotic distribution which is a Brownian
Bridge
crit: list
tabulated critical values, for alpha = 1%, 5% and 10%
Notes
-----
tested agains R:strucchange
Not clear: Assumption 2 in Ploberger, Kramer assumes that exog x have
asymptotically zero mean, x.mean(0) = [1, 0, 0, ..., 0]
Is this really necessary? I don't see how it can affect the test statistic
under the null. It does make a difference under the alternative.
Also, the asymptotic distribution of test statistic depends on this.
From examples it looks like there is little power for standard cusum if
exog (other than constant) have mean zero.
References
----------
Ploberger, Werner, and Walter Kramer. “The Cusum Test with Ols Residuals.”
Econometrica 60, no. 2 (March 1992): 271-285.
'''
resid = olsresidual.ravel()
nobs = len(resid)
nobssigma2 = (resid**2).sum()
if ddof > 0:
#print('ddof', ddof, 1. / (nobs - ddof) * nobs
nobssigma2 = nobssigma2 / (nobs - ddof) * nobs
#B is asymptotically a Brownian Bridge
B = resid.cumsum()/np.sqrt(nobssigma2) # use T*sigma directly
sup_b = np.abs(B).max() #asymptotically distributed as standard Brownian Bridge
crit = [(1,1.63), (5, 1.36), (10, 1.22)]
#Note stats.kstwobign.isf(0.1) is distribution of sup.abs of Brownian Bridge
#>>> stats.kstwobign.isf([0.01,0.05,0.1])
#array([ 1.62762361, 1.35809864, 1.22384787])
pval = stats.kstwobign.sf(sup_b)
return sup_b, pval, crit
#def breaks_cusum(recolsresid):
# '''renormalized cusum test for parameter stability based on recursive residuals
#
#
# still incorrect: in PK, the normalization for sigma is by T not T-K
# also the test statistic is asymptotically a Wiener Process, Brownian motion
# not Brownian Bridge
# for testing: result reject should be identical as in standard cusum version
#
# References
# ----------
# Ploberger, Werner, and Walter Kramer. “The Cusum Test with Ols Residuals.”
# Econometrica 60, no. 2 (March 1992): 271-285.
#
# '''
# resid = recolsresid.ravel()
# nobssigma2 = (resid**2).sum()
# #B is asymptotically a Brownian Bridge
# B = resid.cumsum()/np.sqrt(nobssigma2) # use T*sigma directly
# nobs = len(resid)
# denom = 1. + 2. * np.arange(nobs)/(nobs-1.) #not sure about limits
# sup_b = np.abs(B/denom).max()
# #asymptotically distributed as standard Brownian Bridge
# crit = [(1,1.63), (5, 1.36), (10, 1.22)]
# #Note stats.kstwobign.isf(0.1) is distribution of sup.abs of Brownian Bridge
# #>>> stats.kstwobign.isf([0.01,0.05,0.1])
# #array([ 1.62762361, 1.35809864, 1.22384787])
# pval = stats.kstwobign.sf(sup_b)
# return sup_b, pval, crit
def breaks_AP(endog, exog, skip):
'''supLM, expLM and aveLM by Andrews, and Andrews,Ploberger
p-values by B Hansen
just idea for computation of sequence of tests with given change point
(Chow tests)
run recursive ols both forward and backward, match the two so they form a
split of the data, calculate sum of squares for residuals and get test
statistic for each breakpoint between skip and nobs-skip
need to put recursive ols (residuals) into separate function
alternative: B Hansen loops over breakpoints only once and updates
x'x and xe'xe
update: Andrews is based on GMM estimation not OLS, LM test statistic is
easy to compute because it only requires full sample GMM estimate (p.837)
with GMM the test has much wider applicability than just OLS
for testing loop over single breakpoint Chow test function
'''
pass
#delete when testing is finished
class StatTestMC(object):
"""class to run Monte Carlo study on a statistical test'''
TODO
print(summary, for quantiles and for histogram
draft in trying out script log
this has been copied to tools/mctools.py, with improvements
"""
def __init__(self, dgp, statistic):
self.dgp = dgp #staticmethod(dgp) #no self
self.statistic = statistic # staticmethod(statistic) #no self
def run(self, nrepl, statindices=None, dgpargs=[], statsargs=[]):
'''run the actual Monte Carlo and save results
'''
self.nrepl = nrepl
self.statindices = statindices
self.dgpargs = dgpargs
self.statsargs = statsargs
dgp = self.dgp
statfun = self.statistic # name ?
#single return statistic
if statindices is None:
self.nreturn = nreturns = 1
mcres = np.zeros(nrepl)
for ii in range(nrepl-1):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
mcres[ii] = statfun(x, *statsargs) #unitroot_adf(x, 2,trendorder=0, autolag=None)
#more than one return statistic
else:
self.nreturn = nreturns = len(statindices)
self.mcres = mcres = np.zeros((nrepl, nreturns))
for ii in range(nrepl-1):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
ret = statfun(x, *statsargs)
mcres[ii] = [ret[i] for i in statindices]
self.mcres = mcres
def histogram(self, idx=None, critval=None):
'''calculate histogram values
does not do any plotting
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
if critval is None:
histo = np.histogram(mcres, bins=10)
else:
if not critval[0] == -np.inf:
bins=np.r_[-np.inf, critval, np.inf]
if not critval[0] == -np.inf:
bins=np.r_[bins, np.inf]
histo = np.histogram(mcres,
bins=np.r_[-np.inf, critval, np.inf])
self.histo = histo
self.cumhisto = np.cumsum(histo[0])*1./self.nrepl
self.cumhistoreversed = np.cumsum(histo[0][::-1])[::-1]*1./self.nrepl
return histo, self.cumhisto, self.cumhistoreversed
def quantiles(self, idx=None, frac=[0.01, 0.025, 0.05, 0.1, 0.975]):
'''calculate quantiles of Monte Carlo results
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
self.frac = frac = np.asarray(frac)
self.mcressort = mcressort = np.sort(self.mcres)
return frac, mcressort[(self.nrepl*frac).astype(int)]
if __name__ == '__main__':
examples = ['adf']
if 'adf' in examples:
x = np.random.randn(20)
print(acorr_ljungbox(x,4))
print(unitroot_adf(x))
nrepl = 100
nobs = 100
mcres = np.zeros(nrepl)
for ii in range(nrepl-1):
x = (1e-4+np.random.randn(nobs)).cumsum()
mcres[ii] = unitroot_adf(x, 2,trendorder=0, autolag=None)[0]
print((mcres<-2.57).sum())
print(np.histogram(mcres))
mcressort = np.sort(mcres)
for ratio in [0.01, 0.025, 0.05, 0.1]:
print(ratio, mcressort[int(nrepl*ratio)])
print('critical values in Green table 20.5')
print('sample size = 100')
print('with constant')
print('0.01: -19.8, 0.025: -16.3, 0.05: -13.7, 0.01: -11.0, 0.975: 0.47')
print('0.01: -3.50, 0.025: -3.17, 0.05: -2.90, 0.01: -2.58, 0.975: 0.26')
crvdg = dict([map(float,s.split(':')) for s in ('0.01: -19.8, 0.025: -16.3, 0.05: -13.7, 0.01: -11.0, 0.975: 0.47'.split(','))])
crvd = dict([map(float,s.split(':')) for s in ('0.01: -3.50, 0.025: -3.17, 0.05: -2.90, 0.01: -2.58, 0.975: 0.26'.split(','))])
'''
>>> crvd
{0.050000000000000003: -13.699999999999999, 0.97499999999999998: 0.46999999999999997, 0.025000000000000001: -16.300000000000001, 0.01: -11.0}
>>> sorted(crvd.values())
[-16.300000000000001, -13.699999999999999, -11.0, 0.46999999999999997]
'''
#for trend = 0
crit_5lags0p05 =-4.41519 + (-14.0406)/nobs + (-12.575)/nobs**2
print(crit_5lags0p05)
adfstat, _,_,resstore = unitroot_adf(x, 2,trendorder=0, autolag=None, store=1)
print((mcres>crit_5lags0p05).sum())
print(resstore.resols.model.exog[-5:])
print(x[-5:])
print(np.histogram(mcres, bins=[-np.inf, -3.5, -3.17, -2.9 , -2.58, 0.26, np.inf]))
print(mcressort[(nrepl*(np.array([0.01, 0.025, 0.05, 0.1, 0.975]))).astype(int)])
def randwalksim(nobs=100, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def adf20(x):
return unitroot_adf(x, 2,trendorder=0, autolag=None)[:2]
print('\nResults with MC class')
mc1 = StatTestMC(randwalksim, adf20)
mc1.run(1000, statindices=[0,1])
print(mc1.histogram(0, critval=[-3.5, -3.17, -2.9 , -2.58, 0.26]))
print(mc1.quantiles(0))
print('\nLjung Box')
def lb4(x):
s,p = acorr_ljungbox(x, lags=4)
return s[-1], p[-1]
def lb4(x):
s,p = acorr_ljungbox(x, lags=1)
return s[0], p[0]
print('Results with MC class')
mc1 = StatTestMC(normalnoisesim, lb4)
mc1.run(1000, statindices=[0,1])
print(mc1.histogram(1, critval=[0.01, 0.025, 0.05, 0.1, 0.975]))
print(mc1.quantiles(1))
print(mc1.quantiles(0))
print(mc1.histogram(0))
nobs = 100
x = np.ones((nobs,2))
x[:,1] = np.arange(nobs)/20.
y = x.sum(1) + 1.01*(1+1.5*(x[:,1]>10))*np.random.rand(nobs)
print(het_goldfeldquandt(y,x, 1))
y = x.sum(1) + 1.01*(1+0.5*(x[:,1]>10))*np.random.rand(nobs)
print(het_goldfeldquandt(y,x, 1))
y = x.sum(1) + 1.01*(1-0.5*(x[:,1]>10))*np.random.rand(nobs)
print(het_goldfeldquandt(y,x, 1))
print(het_breuschpagan(y,x))
print(het_white(y,x))
f, fp, fo = het_goldfeldquandt(y,x, 1)
print(f, fp)
resgq = het_goldfeldquandt(y,x, 1, retres=True)
print(resgq)
#this is just a syntax check:
print(_neweywestcov(y, x))
resols1 = OLS(y, x).fit()
print(_neweywestcov(resols1.resid, x))
print(resols1.cov_params())
print(resols1.HC0_se)
print(resols1.cov_HC0)
y = x.sum(1) + 10.*(1-0.5*(x[:,1]>10))*np.random.rand(nobs)
print(HetGoldfeldQuandt().run(y,x, 1, alternative='dec'))
|
{
"content_hash": "ae0938b2115c3ff0e000ae21166cf780",
"timestamp": "",
"source": "github",
"line_count": 1627,
"max_line_length": 149,
"avg_line_length": 32.85863552550707,
"alnum_prop": 0.6146349675464358,
"repo_name": "yl565/statsmodels",
"id": "95c9dddcdbbba51bcb39889908dbb7cb2d79d028",
"size": "53497",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "statsmodels/sandbox/stats/diagnostic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "9670281"
},
{
"name": "R",
"bytes": "55204"
},
{
"name": "Stata",
"bytes": "54989"
}
],
"symlink_target": ""
}
|
__author__ = "EUL Systems"
__copyright__ = "Copyright 2010, 2016. Emory University Library and IT Services"
__credits__ = ["Rebecca Koeser", "Ben Ranker", "Alex Thomas", "Scott Turnbull",
"Alex Zotov"]
__email__ = "libsys-dev@listserv.cc.emory.edu"
# Version Info, parsed below for actual version number.
__version_info__ = (1, 0, 3, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None: # Adds dash
__version__ += ('-%s' % (__version_info__[-1],))
|
{
"content_hash": "2edc4fd04724080e728e2535a937a879",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 45.53846153846154,
"alnum_prop": 0.6199324324324325,
"repo_name": "emory-libraries/pidman",
"id": "d9b283bc9b1107bd866cf8f2d2ac795eadc99374",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pidman/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7736"
},
{
"name": "Python",
"bytes": "195026"
}
],
"symlink_target": ""
}
|
from .resource import Resource
class RouteFilter(Resource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list[~azure.mgmt.network.v2016_12_01.models.RouteFilterRule]
:ivar peerings: A collection of references to express route circuit
peerings.
:vartype peerings:
list[~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering]
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'peerings': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, rules=None, **kwargs) -> None:
super(RouteFilter, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.rules = rules
self.peerings = None
self.provisioning_state = None
self.etag = None
|
{
"content_hash": "3d5db9442e51efa0131006c776482cb2",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 101,
"avg_line_length": 37.5,
"alnum_prop": 0.6097777777777778,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "da9f427e597de978a4c8aa8f84b70d737df2cfc1",
"size": "2724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/route_filter_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""
Tests to assert that various incorporated middleware works as expected.
"""
from http import client as http_client
import os
import tempfile
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
from ironic.tests.unit.api import base
from ironic.tests.unit.api import utils
from ironic.tests.unit.db import utils as db_utils
class TestCORSMiddleware(base.BaseApiTest):
'''Provide a basic smoke test to ensure CORS middleware is active.
The tests below provide minimal confirmation that the CORS middleware
is active, and may be configured. For comprehensive tests, please consult
the test suite in oslo_middleware.
'''
def setUp(self):
# Make sure the CORS options are registered
cfg.CONF.register_opts(cors_middleware.CORS_OPTS, 'cors')
# Load up our valid domain values before the application is created.
cfg.CONF.set_override("allowed_origin",
"http://valid.example.com",
group='cors')
# Create the application.
super(TestCORSMiddleware, self).setUp()
@staticmethod
def _response_string(status_code):
"""Helper function to return string in form of 'CODE DESCRIPTION'.
For example: '200 OK'
"""
return '{} {}'.format(status_code, http_client.responses[status_code])
def test_valid_cors_options_request(self):
req_headers = ['content-type',
'x-auth-token',
'x-openstack-ironic-api-version']
headers = {
'Origin': 'http://valid.example.com',
'Access-Control-Request-Method': 'GET',
'Access-Control-Request-Headers': ','.join(req_headers),
'X-OpenStack-Ironic-API-Version': '1.14'
}
response = self.app.options('/', headers=headers, xhr=True)
# Assert response status.
self.assertEqual(
self._response_string(http_client.OK), response.status)
self.assertIn('Access-Control-Allow-Origin', response.headers)
self.assertEqual('http://valid.example.com',
response.headers['Access-Control-Allow-Origin'])
def test_invalid_cors_options_request(self):
req_headers = ['content-type',
'x-auth-token',
'x-openstack-ironic-api-version']
headers = {
'Origin': 'http://invalid.example.com',
'Access-Control-Request-Method': 'GET',
'Access-Control-Request-Headers': ','.join(req_headers),
'X-OpenStack-Ironic-API-Version': '1.14'
}
response = self.app.options('/', headers=headers, xhr=True)
# Assert response status.
self.assertEqual(
self._response_string(http_client.OK), response.status)
self.assertNotIn('Access-Control-Allow-Origin', response.headers)
def test_valid_cors_get_request(self):
response = self.app \
.get('/nodes/detail',
headers={
'Origin': 'http://valid.example.com'
})
# Assert response status.
self.assertEqual(
self._response_string(http_client.OK), response.status)
self.assertIn('Access-Control-Allow-Origin', response.headers)
self.assertIn('X-OpenStack-Ironic-API-Version', response.headers)
self.assertEqual('http://valid.example.com',
response.headers['Access-Control-Allow-Origin'])
def test_invalid_cors_get_request(self):
response = self.app \
.get('/',
headers={
'Origin': 'http://invalid.example.com'
})
# Assert response status.
self.assertEqual(
self._response_string(http_client.OK), response.status)
self.assertNotIn('Access-Control-Allow-Origin', response.headers)
class TestBasicAuthMiddleware(base.BaseApiTest):
def _make_app(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
f.write('myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.'
'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n')
cfg.CONF.set_override('http_basic_auth_user_file', f.name)
self.addCleanup(os.remove, cfg.CONF.http_basic_auth_user_file)
cfg.CONF.set_override('auth_strategy', 'http_basic')
return super(TestBasicAuthMiddleware, self)._make_app()
def setUp(self):
super(TestBasicAuthMiddleware, self).setUp()
self.environ = {'fake.cache': utils.FakeMemcache()}
self.fake_db_node = db_utils.get_test_node(chassis_id=None)
def test_not_authenticated(self):
response = self.get_json('/chassis', expect_errors=True)
self.assertEqual(http_client.UNAUTHORIZED, response.status_int)
self.assertEqual(
'Basic realm="Baremetal API"',
response.headers['WWW-Authenticate']
)
def test_authenticated(self):
auth_header = {'Authorization': 'Basic bXlOYW1lOm15UGFzc3dvcmQ='}
response = self.get_json('/chassis', headers=auth_header)
self.assertEqual({'chassis': []}, response)
def test_public_unauthenticated(self):
response = self.get_json('/')
self.assertEqual('v1', response['id'])
|
{
"content_hash": "70e7ea92faf8b5a95566c4721d0ca2cf",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.6127340823970038,
"repo_name": "openstack/ironic",
"id": "80f768fd13f8eed5e909a8d9bade0b85b99d0a0c",
"size": "5940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/api/test_middleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "PowerShell",
"bytes": "1676"
},
{
"name": "Python",
"bytes": "9506176"
},
{
"name": "Shell",
"bytes": "188127"
}
],
"symlink_target": ""
}
|
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "1b21e7d6943d7ddadd287c30ecc46c9e",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 80,
"avg_line_length": 40.601941747572816,
"alnum_prop": 0.6343854615016739,
"repo_name": "svn2github/gyp",
"id": "ac15d737277dd83226912c11f2f0732bd0cad54b",
"size": "12726",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pylib/gyp/win_tool.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1021"
},
{
"name": "C",
"bytes": "38131"
},
{
"name": "C++",
"bytes": "37764"
},
{
"name": "Objective-C",
"bytes": "9930"
},
{
"name": "Objective-C++",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "2125081"
},
{
"name": "Shell",
"bytes": "13357"
},
{
"name": "Swift",
"bytes": "116"
}
],
"symlink_target": ""
}
|
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
return {
'install': ['init']
}
def install(job):
# look for build host os service
host_os = None
service = job.service
for parent in service.parents:
if parent.model.role == 'os':
host_os = parent
break
else:
raise j.exceptions.AYSNotFound("Can't find os service")
cuisine = host_os.executor.cuisine
import ipdb;ipdb.set_trace
if cuisine.core.dir_exists('/mnt/building/opt')
cuisine.core.dir_remove('/mnt/building/opt')
dockers = ['packager', 'cockpit', 'portal', 'jumpscale', 'scality', 'geodns', 'php',
'fs', 'grafana', 'python', 'nodejs', 'mongodb', 'golang', 'nginx',
'shellinabox', 'caddy', 'influxdb', 'redis']
for docker in dockers:
try:
check = cuisine.core.run('docker ps -a | grep -o -F %s' %docker)
if check[1] == docker:
cuisine.core.execute_bash('docker rm -f %s' %docker)
except:
continue
|
{
"content_hash": "29ce7b4dc1936521077bdafabea1c2b1",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 89,
"avg_line_length": 34.8,
"alnum_prop": 0.5862068965517241,
"repo_name": "Jumpscale/ays_jumpscale8",
"id": "1dc634877723fccfcbfbc14cc50e0c8c745fa517",
"size": "1218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_services/test_ays_build_clean/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "270835"
}
],
"symlink_target": ""
}
|
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
{
"content_hash": "243eb545f5c6e7515ac91f334fc1e896",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 37.5,
"alnum_prop": 0.6533333333333333,
"repo_name": "uogbuji/amara3-xml",
"id": "b48d5c6a6c341003c2f666fd7eea818e11068bbe",
"size": "415",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pylib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7283"
},
{
"name": "Objective-C",
"bytes": "13116"
},
{
"name": "Python",
"bytes": "154856"
}
],
"symlink_target": ""
}
|
from bx.misc.cdb import *
from tempfile import NamedTemporaryFile
def test():
d = {}
for i in range( 10000 ):
d[ 'foo' + str( i ) ] = 'bar' + str( i )
# Open temporary file and get name
file = NamedTemporaryFile()
file_name = file.name
# Write cdb to file
FileCDBDict.to_file( d, file )
file.flush()
# Open on disk
file2 = open( file_name )
cdb = FileCDBDict( file2 )
for key, value in d.iteritems():
assert cdb[key] == value
try:
cdb['notin']
assert False, "KeyError was not raised"
except KeyError, e:
pass
# Close everything (deletes the temporary file)
file2.close()
file.close()
|
{
"content_hash": "50d372c775a7ecb30d5da16c77b5791f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 51,
"avg_line_length": 21.647058823529413,
"alnum_prop": 0.5543478260869565,
"repo_name": "uhjish/bx-python",
"id": "0fb5d1c404c11435e6c22b0d21eb5a09271e6ae0",
"size": "736",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/bx/misc/cdb_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "99767"
},
{
"name": "PostScript",
"bytes": "1169"
},
{
"name": "Python",
"bytes": "1249917"
},
{
"name": "Shell",
"bytes": "421"
}
],
"symlink_target": ""
}
|
"""linebot.models package."""
from .base import ( # noqa
Base,
)
from .error import ( # noqa
Error,
ErrorDetail,
)
from .events import ( # noqa
Event,
MessageEvent,
FollowEvent,
UnfollowEvent,
JoinEvent,
LeaveEvent,
PostbackEvent,
BeaconEvent,
Postback,
Beacon,
)
from .imagemap import ( # noqa
ImagemapSendMessage,
BaseSize,
ImagemapAction,
URIImagemapAction,
MessageImagemapAction,
ImagemapArea,
)
from .messages import ( # noqa
Message,
TextMessage,
ImageMessage,
VideoMessage,
AudioMessage,
LocationMessage,
StickerMessage,
)
from .responses import ( # noqa
Profile,
)
from .send_messages import ( # noqa
SendMessage,
TextSendMessage,
ImageSendMessage,
VideoSendMessage,
AudioSendMessage,
LocationSendMessage,
StickerSendMessage,
)
from .sources import ( # noqa
Source,
SourceUser,
SourceGroup,
SourceRoom,
)
from .template import ( # noqa
TemplateSendMessage,
Template,
ButtonsTemplate,
ConfirmTemplate,
CarouselTemplate,
CarouselColumn,
TemplateAction,
PostbackTemplateAction,
MessageTemplateAction,
URITemplateAction,
)
|
{
"content_hash": "9c8522e21ae4d88102a9152f99494435",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 36,
"avg_line_length": 18.073529411764707,
"alnum_prop": 0.6680227827502034,
"repo_name": "monhustla/line-bot-sdk-python",
"id": "455b5f14e1783d1719477c5aa9fcbc9ea28c5964",
"size": "1810",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "linebot/models/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "191401"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.