repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dimagi/commcare-hq | corehq/extensions/interface.py | 1 | 5498 | import importlib
import inspect
import itertools
import logging
from contextlib import contextmanager
from enum import Enum
from dimagi.utils.logging import notify_exception
from dimagi.utils.modules import to_function
logger = logging.getLogger("commcare.extensions")
class ExtensionError(Exception):
pass
class ResultFormat(Enum):
FLATTEN = 'flatten'
FIRST = 'first'
def flatten_results(point, results):
return list(itertools.chain.from_iterable(results))
def first_result(point, results):
try:
return next(results)
except StopIteration:
pass
RESULT_FORMATTERS = {
ResultFormat.FIRST: first_result,
ResultFormat.FLATTEN: flatten_results
}
class Extension:
def __init__(self, point, callable_ref, domains):
self.point = point
self.callable = callable_ref
self.domains = set(domains) if domains else None
def validate(self, expected_args):
spec = inspect.getfullargspec(self.callable)
unconsumed_args = set(expected_args) - set(spec.args)
if unconsumed_args and not spec.varkw:
raise ExtensionError(f"Not all extension point args are consumed: {unconsumed_args}")
def should_call_for_domain(self, domain):
return self.domains is None or domain in self.domains
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
def __repr__(self):
return f"{self.callable}"
class ExtensionPoint:
def __init__(self, manager, name, definition_function, result_formatter=None):
self.manager = manager
self.name = name
self.definition_function = definition_function
self.providing_args = inspect.getfullargspec(definition_function).args
self.extensions = []
self.result_formatter = result_formatter
self.__doc__ = inspect.getdoc(definition_function)
def extend(self, impl=None, *, domains=None):
def _extend(impl):
if self.manager.locked:
raise ExtensionError(
"Late extension definition. Extensions must be defined before setup is complete"
)
if not callable(impl):
raise ExtensionError(f"Extension point implementation must be callable: {impl!r}")
extension = Extension(self.name, impl, domains)
extension.validate(self.providing_args)
self.extensions.append(extension)
return impl
if domains is not None and not isinstance(domains, list):
raise ExtensionError("domains must be a list")
if domains is not None and "domain" not in self.providing_args:
raise ExtensionError("domain filtering not supported for this extension point")
return _extend if impl is None else _extend(impl)
def __call__(self, *args, **kwargs):
callargs = inspect.getcallargs(self.definition_function, *args, **kwargs)
domain = callargs.get('domain')
extensions = [
extension for extension in self.extensions
if not domain or extension.should_call_for_domain(domain)
]
if not extensions:
result = self.definition_function(*args, **kwargs)
results = iter([result] if result is not None else [])
else:
results = self._get_results(extensions, *args, **kwargs)
if self.result_formatter:
return self.result_formatter(self, results)
return list(results)
def _get_results(self, extensions, *args, **kwargs):
for extension in extensions:
try:
result = extension(*args, **kwargs)
if result is not None:
yield result
except Exception: # noqa
notify_exception(
None,
message="Error calling extension",
details={
"extention_point": self.name,
"extension": extension,
"kwargs": kwargs
},
)
class CommCareExtensions:
def __init__(self):
self.registry = {}
self.locked = False
def extension_point(self, func=None, *, result_format=None):
"""Decorator for creating an extension point."""
def _decorator(func):
if not callable(func):
raise ExtensionError(f"Extension point must be callable: {func!r}")
name = func.__name__
formatter = RESULT_FORMATTERS[result_format] if result_format else None
point = ExtensionPoint(self, name, func, result_formatter=formatter)
self.registry[name] = point
return point
return _decorator if func is None else _decorator(func)
def load_extensions(self, implementations):
for module_name in implementations:
self.resolve_module(module_name)
self.locked = True
def add_extension_points(self, module_or_name):
self.resolve_module(module_or_name)
def resolve_module(self, module_or_name):
if isinstance(module_or_name, str):
importlib.import_module(module_or_name)
@contextmanager
def disable_extensions(ext_point):
if isinstance(ext_point, str):
ext_point = to_function(ext_point)
extensions = ext_point.extensions
ext_point.extensions = []
try:
yield
finally:
ext_point.extensions = extensions
| bsd-3-clause | 3f1874e4353b78356779b9f422cfd85c | 31.341176 | 100 | 0.618952 | 4.451822 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/projects/migrations/0031_fix_migration_projectstatuslog_creation_20170721_1637.py | 1 | 1247 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-21 14:37
from __future__ import unicode_literals
from django.db import migrations, connection
def forward(apps, schema_editor):
with connection.cursor() as cursor:
cursor.execute("SELECT applied FROM django_migrations WHERE name='0010_fix_export_permissions_migration'")
start_date = cursor.fetchone()[0]
cursor.execute("SELECT applied FROM django_migrations WHERE name='0027_auto_20170602_2240'")
end_date = cursor.fetchone()[0]
ProjectPhaseLog = apps.get_model('projects', 'ProjectPhaseLog')
for status_log in ProjectPhaseLog.objects.filter(start__range=(start_date, end_date)):
new_date = status_log.project.deadline if status_log.project.deadline else status_log.project.created
ProjectPhaseLog.objects.filter(id=status_log.id).update(start=new_date)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('members', '0010_fix_export_permissions_migration'),
('projects', '0027_auto_20170602_2240'),
('projects', '0030_rename_account_bic_20170705_1221'),
]
operations = [
migrations.RunPython(forward, backward),
]
| bsd-3-clause | e66885b2cf76e4f894eed3c584dedd46 | 34.628571 | 114 | 0.694467 | 3.778788 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/time_based/migrations/0058_auto_20210224_1027.py | 1 | 1063 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-01-14 09:56
from __future__ import unicode_literals
from django.db import migrations, connection
from bluebottle.utils.utils import update_group_permissions
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def add_group_permissions(apps, schema_editor):
tenant = Client.objects.get(schema_name=connection.tenant.schema_name)
with LocalTenant(tenant):
group_perms = {
'Staff': {
'perms': (
'add_slotparticipant', 'change_slotparticipant', 'delete_slotparticipant',
)
},
}
update_group_permissions('time_based', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('time_based', '0057_add_contribution_type_20210202_1131'),
]
operations = [
migrations.RunPython(
add_group_permissions,
migrations.RunPython.noop
)
]
| bsd-3-clause | 43ca159e08f5456c8c4398203472e617 | 26.973684 | 94 | 0.647225 | 4.072797 | false | false | false | false |
dimagi/commcare-hq | corehq/messaging/scheduling/scheduling_partitioned/tests/test_dbaccessors_non_partitioned.py | 1 | 9230 | import uuid
from django.db import DEFAULT_DB_ALIAS
from corehq.form_processor.tests.utils import only_run_with_non_partitioned_database
from corehq.messaging.scheduling.scheduling_partitioned.dbaccessors import (
get_alert_schedule_instance,
get_timed_schedule_instance,
save_alert_schedule_instance,
save_timed_schedule_instance,
delete_alert_schedule_instance,
delete_timed_schedule_instance,
get_active_schedule_instance_ids,
get_alert_schedule_instances_for_schedule,
get_timed_schedule_instances_for_schedule,
)
from corehq.messaging.scheduling.models import (
AlertSchedule,
TimedSchedule,
)
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
TimedScheduleInstance,
)
from corehq.util.exceptions import AccessRestricted
from datetime import datetime, date
from django.test import TestCase
@only_run_with_non_partitioned_database
class BaseSchedulingNontPartitionedDBAccessorsTest(TestCase):
@classmethod
def setUpClass(cls):
super(BaseSchedulingNontPartitionedDBAccessorsTest, cls).setUpClass()
cls.domain = 'scheduling-non-partitioned-test'
cls.db = DEFAULT_DB_ALIAS
@classmethod
def make_alert_schedule_instance(cls, schedule_instance_id=None, schedule_id=None, active=True):
return AlertScheduleInstance(
schedule_instance_id=schedule_instance_id or uuid.uuid4(),
domain=cls.domain,
recipient_type='CommCareUser',
recipient_id=uuid.uuid4().hex,
current_event_num=0,
schedule_iteration_num=1,
next_event_due=datetime(2017, 3, 1),
active=active,
alert_schedule_id=schedule_id or uuid.uuid4(),
)
@classmethod
def make_timed_schedule_instance(cls, schedule_instance_id=None, schedule_id=None, active=True):
return TimedScheduleInstance(
schedule_instance_id=schedule_instance_id or uuid.uuid4(),
domain=cls.domain,
recipient_type='CommCareUser',
recipient_id=uuid.uuid4().hex,
current_event_num=0,
schedule_iteration_num=1,
next_event_due=datetime(2017, 3, 1),
active=active,
timed_schedule_id=schedule_id or uuid.uuid4(),
start_date=date(2017, 3, 1),
)
class TestSchedulingNonPartitionedDBAccessorsGetAndSave(BaseSchedulingNontPartitionedDBAccessorsTest):
def tearDown(self):
AlertScheduleInstance.objects.using(self.db).filter(domain=self.domain).delete()
TimedScheduleInstance.objects.using(self.db).filter(domain=self.domain).delete()
def test_save_alert_schedule_instance(self):
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 0)
instance = self.make_alert_schedule_instance()
save_alert_schedule_instance(instance)
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 1)
def test_save_timed_schedule_instance(self):
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 0)
instance = self.make_timed_schedule_instance()
save_timed_schedule_instance(instance)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 1)
def test_get_alert_schedule_instance(self):
instance1 = self.make_alert_schedule_instance()
save_alert_schedule_instance(instance1)
instance2 = get_alert_schedule_instance(instance1.schedule_instance_id)
self.assertTrue(isinstance(instance2, AlertScheduleInstance))
self.assertEqual(instance1.schedule_instance_id, instance2.schedule_instance_id)
with self.assertRaises(AlertScheduleInstance.DoesNotExist):
get_alert_schedule_instance(uuid.uuid4())
def test_get_timed_schedule_instance(self):
instance1 = self.make_timed_schedule_instance()
save_timed_schedule_instance(instance1)
instance2 = get_timed_schedule_instance(instance1.schedule_instance_id)
self.assertTrue(isinstance(instance2, TimedScheduleInstance))
self.assertEqual(instance1.schedule_instance_id, instance2.schedule_instance_id)
with self.assertRaises(TimedScheduleInstance.DoesNotExist):
get_timed_schedule_instance(uuid.uuid4())
class TestSchedulingNonPartitionedDBAccessorsDeleteAndFilter(BaseSchedulingNontPartitionedDBAccessorsTest):
@classmethod
def setUpClass(cls):
super(TestSchedulingNonPartitionedDBAccessorsDeleteAndFilter, cls).setUpClass()
cls.schedule_id1 = uuid.uuid4()
cls.schedule_id2 = uuid.uuid4()
cls.uuid1 = uuid.uuid4()
cls.uuid2 = uuid.uuid4()
cls.uuid3 = uuid.uuid4()
cls.uuid4 = uuid.uuid4()
cls.uuid5 = uuid.uuid4()
cls.uuid6 = uuid.uuid4()
def setUp(self):
self.alert_instance1 = self.make_alert_schedule_instance(self.uuid1)
save_alert_schedule_instance(self.alert_instance1)
self.alert_instance2 = self.make_alert_schedule_instance(self.uuid2, schedule_id=self.schedule_id1)
save_alert_schedule_instance(self.alert_instance2)
self.alert_instance3 = self.make_alert_schedule_instance(self.uuid3, schedule_id=self.schedule_id1,
active=False)
save_alert_schedule_instance(self.alert_instance3)
self.timed_instance1 = self.make_timed_schedule_instance(self.uuid4)
save_timed_schedule_instance(self.timed_instance1)
self.timed_instance2 = self.make_timed_schedule_instance(self.uuid5, schedule_id=self.schedule_id2)
save_timed_schedule_instance(self.timed_instance2)
self.timed_instance3 = self.make_timed_schedule_instance(self.uuid6, schedule_id=self.schedule_id2,
active=False)
save_timed_schedule_instance(self.timed_instance3)
def tearDown(self):
AlertScheduleInstance.objects.using(self.db).filter(domain=self.domain).delete()
TimedScheduleInstance.objects.using(self.db).filter(domain=self.domain).delete()
def test_delete_alert_schedule_instance(self):
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 3)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 3)
delete_alert_schedule_instance(self.alert_instance1)
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 2)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 3)
with self.assertRaises(AlertScheduleInstance.DoesNotExist):
get_alert_schedule_instance(self.uuid1)
def test_delete_timed_schedule_instance(self):
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 3)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 3)
delete_timed_schedule_instance(self.timed_instance1)
self.assertEqual(AlertScheduleInstance.objects.using(self.db).count(), 3)
self.assertEqual(TimedScheduleInstance.objects.using(self.db).count(), 2)
with self.assertRaises(TimedScheduleInstance.DoesNotExist):
get_timed_schedule_instance(self.uuid4)
def test_get_active_alert_schedule_instance_ids(self):
self.assertItemsEqual(
get_active_schedule_instance_ids(
AlertScheduleInstance,
datetime(2017, 4, 1),
due_after=datetime(2017, 2, 1),
),
[(self.domain, self.alert_instance1.schedule_instance_id, self.alert_instance1.next_event_due),
(self.domain, self.alert_instance2.schedule_instance_id, self.alert_instance2.next_event_due)]
)
self.assertItemsEqual(
get_active_schedule_instance_ids(
AlertScheduleInstance,
datetime(2016, 4, 1),
due_after=datetime(2016, 2, 1),
),
[]
)
def test_get_active_timed_schedule_instance_ids(self):
self.assertItemsEqual(
get_active_schedule_instance_ids(
TimedScheduleInstance,
datetime(2017, 4, 1),
due_after=datetime(2017, 2, 1),
),
[(self.domain, self.timed_instance1.schedule_instance_id, self.timed_instance1.next_event_due),
(self.domain, self.timed_instance2.schedule_instance_id, self.timed_instance2.next_event_due)],
)
self.assertItemsEqual(
get_active_schedule_instance_ids(
TimedScheduleInstance,
datetime(2016, 4, 1),
due_after=datetime(2016, 2, 1),
),
[]
)
def test_get_alert_schedule_instances_for_schedule(self):
self.assertItemsEqual(
get_alert_schedule_instances_for_schedule(AlertSchedule(schedule_id=self.schedule_id1)),
[self.alert_instance2, self.alert_instance3]
)
def test_get_timed_schedule_instances_for_schedule(self):
self.assertItemsEqual(
get_timed_schedule_instances_for_schedule(TimedSchedule(schedule_id=self.schedule_id2)),
[self.timed_instance2, self.timed_instance3]
)
| bsd-3-clause | fe11a207b15da739b1764587e7ae7f07 | 40.022222 | 108 | 0.682124 | 3.879781 | false | true | false | false |
dimagi/commcare-hq | corehq/tests/util/artifact.py | 1 | 1235 | import os
import logging
from contextlib import contextmanager
logger = logging.getLogger(__name__)
ROOT_ARTIFACTS_DIR = "artifacts"
@contextmanager
def artifact(filename, stream, mode="b", sub_name=None):
"""Context manager for writing artifact files on failure/error.
:param filename: name of the artifact file to write.
:param stream: file-like object used to read artifact contents
:param mode: (optional) mode for opening artifact file (default=``b``, use
``t`` to open in text-write mode)
:param sub_name: (optional) write the artifact file in a sub-directory
"""
try:
yield
except Exception:
artifact_dir = ROOT_ARTIFACTS_DIR
if not os.path.exists(artifact_dir):
# create the root artifacts directory (in the CWD)
os.mkdir(artifact_dir)
if sub_name:
artifact_dir = os.path.join(artifact_dir, sub_name)
if not os.path.exists(artifact_dir):
os.mkdir(artifact_dir)
artifact_path = os.path.join(artifact_dir, filename)
logger.info(f"writing artifact: {artifact_path}")
with open(artifact_path, f"w{mode}") as file:
file.write(stream.read())
raise
| bsd-3-clause | 91852bb5e3e9d79ccc9a1aa2812abf52 | 34.285714 | 78 | 0.646964 | 3.933121 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/projects/migrations/0069_auto_20180316_1553.py | 1 | 1283 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-03-16 14:53
from __future__ import unicode_literals
import bluebottle.utils.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0068_auto_20180306_1614'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='latitude',
),
migrations.RemoveField(
model_name='project',
name='longitude',
),
migrations.AlterField(
model_name='projectcreatetemplate',
name='default_amount_asked',
field=bluebottle.utils.fields.MoneyField(blank=True, currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=None, max_digits=12, null=True),
),
migrations.AlterField(
model_name='projectlocation',
name='latitude',
field=models.DecimalField(decimal_places=18, max_digits=21, null=True, verbose_name='latitude'),
),
migrations.AlterField(
model_name='projectlocation',
name='longitude',
field=models.DecimalField(decimal_places=18, max_digits=21, null=True, verbose_name='longitude'),
),
]
| bsd-3-clause | f316671164092868906606321679fbb9 | 31.897436 | 162 | 0.599376 | 4.179153 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/funding/models.py | 1 | 23992 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import random
import string
from builtins import object
from builtins import range
from babel.numbers import get_currency_name
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import connection
from django.db import models
from django.db.models import Count
from django.db.models import SET_NULL
from django.db.models.aggregates import Sum
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from future.utils import python_2_unicode_compatible
from moneyed import Money
from polymorphic.models import PolymorphicModel
from tenant_schemas.postgresql_backend.base import FakeTenant
from bluebottle.activities.models import Activity, Contributor
from bluebottle.activities.models import Contribution
from bluebottle.clients import properties
from bluebottle.files.fields import ImageField, PrivateDocumentField
from bluebottle.fsm.triggers import TriggerMixin
from bluebottle.funding.validators import KYCReadyValidator, DeadlineValidator, BudgetLineValidator, TargetValidator, \
DeadlineMaxValidator
from bluebottle.utils.exchange_rates import convert
from bluebottle.utils.fields import MoneyField
from bluebottle.utils.models import BasePlatformSettings, AnonymizationMixin, ValidatedModelMixin
logger = logging.getLogger(__name__)
class PaymentCurrency(models.Model):
provider = models.ForeignKey('funding.PaymentProvider', on_delete=models.CASCADE)
code = models.CharField(max_length=3, default='EUR')
min_amount = models.DecimalField(default=5.0, decimal_places=2, max_digits=10)
max_amount = models.DecimalField(null=True, blank=True, decimal_places=2, max_digits=10)
default1 = models.DecimalField(decimal_places=2, max_digits=10)
default2 = models.DecimalField(decimal_places=2, max_digits=10)
default3 = models.DecimalField(decimal_places=2, max_digits=10)
default4 = models.DecimalField(decimal_places=2, max_digits=10)
class Meta(object):
verbose_name = _('Payment currency')
verbose_name_plural = _('Payment currencies')
@python_2_unicode_compatible
class PaymentProvider(PolymorphicModel):
title = 'Payment Service Provider'
public_settings = {}
private_settings = {}
refund_enabled = False
@property
def available_currencies(self):
currencies = []
for method in self.payment_methods:
for cur in method.currencies:
if cur not in currencies:
currencies.append(cur)
return currencies
@classmethod
def get_currency_choices(cls):
currencies = []
if isinstance(connection.tenant, FakeTenant):
currencies = [('EUR', 'Euro')]
else:
for provider in cls.objects.all():
for cur in provider.paymentcurrency_set.all():
currency = (cur.code, get_currency_name(cur.code))
if currency not in currencies:
currencies.append(currency)
return currencies
@classmethod
def get_default_currency(cls):
if len(cls.get_currency_choices()):
return cls.get_currency_choices()[0]
return 'EUR'
@property
def payment_methods(self):
return []
def __str__(self):
return str(self.polymorphic_ctype)
@property
def name(self):
return self.__class__.__name__.replace('PaymentProvider', '').lower()
def save(self, **kwargs):
created = False
if self.pk is None:
created = True
model = super(PaymentProvider, self).save(**kwargs)
if created:
for currency in self.available_currencies:
PaymentCurrency.objects.create(
provider=self,
code=currency,
min_amount=5,
default1=10,
default2=20,
default3=50,
default4=100,
)
return model
class Funding(Activity):
deadline = models.DateTimeField(
_('deadline'),
null=True,
blank=True,
help_text=_('If you enter a deadline, leave the duration field empty. This will override the duration.')
)
duration = models.PositiveIntegerField(
_('duration'),
null=True,
blank=True,
help_text=_('If you enter a duration, leave the deadline field empty for it to be automatically calculated.')
)
target = MoneyField(default=Money(0, 'EUR'), null=True, blank=True)
amount_matching = MoneyField(default=Money(0, 'EUR'), null=True, blank=True)
country = models.ForeignKey('geo.Country', null=True, blank=True, on_delete=models.SET_NULL)
bank_account = models.ForeignKey('funding.BankAccount', null=True, blank=True, on_delete=SET_NULL)
started = models.DateTimeField(
_('started'),
null=True,
blank=True,
)
needs_review = True
validators = [KYCReadyValidator, DeadlineValidator, DeadlineMaxValidator, BudgetLineValidator, TargetValidator]
auto_approve = False
def admin_clean(self):
for val in self.validators:
validator = val(self)
if not validator.is_valid():
raise ValidationError(validator.message)
@property
def required_fields(self):
fields = super().required_fields + ['title', 'description', 'target', 'bank_account']
if not self.duration:
fields.append('deadline')
return fields
class JSONAPIMeta(object):
resource_name = 'activities/fundings'
class Meta(object):
verbose_name = _("Funding")
verbose_name_plural = _("Funding Activities")
permissions = (
('api_read_funding', 'Can view funding through the API'),
('api_add_funding', 'Can add funding through the API'),
('api_change_funding', 'Can change funding through the API'),
('api_delete_funding', 'Can delete funding through the API'),
('api_read_own_funding', 'Can view own funding through the API'),
('api_add_own_funding', 'Can add own funding through the API'),
('api_change_own_funding', 'Can change own funding through the API'),
('api_delete_own_funding', 'Can delete own funding through the API'),
)
def update_amounts(self):
cache_key = '{}.{}.amount_donated'.format(connection.tenant.schema_name, self.id)
cache.delete(cache_key)
cache_key = '{}.{}.genuine_amount_donated'.format(connection.tenant.schema_name, self.id)
cache.delete(cache_key)
@property
def activity_date(self):
return self.deadline
@property
def donations(self):
return self.contributors.instance_of(Donor)
@property
def amount_donated(self):
"""
The sum of all contributors (donations) converted to the targets currency
"""
from .states import DonorStateMachine
from bluebottle.funding.utils import calculate_total
cache_key = '{}.{}.amount_donated'.format(connection.tenant.schema_name, self.id)
total = cache.get(cache_key)
if not total:
donations = self.donations.filter(
status__in=(
DonorStateMachine.succeeded.value,
DonorStateMachine.activity_refunded.value,
)
)
if self.target and self.target.currency:
total = calculate_total(donations, self.target.currency)
else:
total = calculate_total(donations, properties.DEFAULT_CURRENCY)
cache.set(cache_key, total)
return total
@property
def genuine_amount_donated(self):
"""
The sum of all contributors (donations) without pledges converted to the targets currency
"""
from .states import DonorStateMachine
from bluebottle.funding.utils import calculate_total
cache_key = '{}.{}.genuine_amount_donated'.format(connection.tenant.schema_name, self.id)
total = cache.get(cache_key)
if not total:
donations = self.donations.filter(
status__in=(
DonorStateMachine.succeeded.value,
DonorStateMachine.activity_refunded.value,
),
donor__payment__pledgepayment__isnull=True
)
if self.target and self.target.currency:
total = calculate_total(donations, self.target.currency)
else:
total = calculate_total(donations, properties.DEFAULT_CURRENCY)
cache.set(cache_key, total)
return total
@cached_property
def amount_pledged(self):
"""
The sum of all contributors (donations) converted to the targets currency
"""
from .states import DonorStateMachine
from bluebottle.funding.utils import calculate_total
donations = self.donations.filter(
status__in=(
DonorStateMachine.succeeded.value,
DonorStateMachine.activity_refunded.value,
),
donor__payment__pledgepayment__isnull=False
)
if self.target and self.target.currency:
total = calculate_total(donations, self.target.currency)
else:
total = calculate_total(donations, properties.DEFAULT_CURRENCY)
return total
@property
def amount_raised(self):
"""
The sum of amount donated + amount matching
"""
if self.target:
currency = self.target.currency
else:
currency = 'EUR'
total = self.amount_donated
if self.amount_matching:
total += convert(
self.amount_matching,
currency
)
return total
@property
def stats(self):
from .states import DonorStateMachine
stats = self.donations.filter(
status=DonorStateMachine.succeeded.value
).aggregate(
count=Count('user__id')
)
stats['amount'] = {'amount': self.amount_raised.amount, 'currency': str(self.amount_raised.currency)}
return stats
def save(self, *args, **kwargs):
if self.target:
for reward in self.rewards.all():
if reward.amount and not reward.amount.currency == self.target.currency:
reward.amount = Money(reward.amount.amount, self.target.currency)
reward.save()
for line in self.budget_lines.all():
if self.target and not line.amount.currency == self.target.currency:
line.amount = Money(line.amount.amount, self.target.currency)
line.save()
super(Funding, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Reward(models.Model):
"""
Rewards for donations
"""
amount = MoneyField(_('Amount'))
title = models.CharField(_('Title'), max_length=200)
description = models.CharField(_('Description'), max_length=500)
activity = models.ForeignKey(
'funding.Funding', verbose_name=_('Activity'), related_name='rewards', on_delete=models.CASCADE
)
limit = models.IntegerField(
_('Limit'),
null=True,
blank=True,
help_text=_('How many of this rewards are available')
)
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(auto_now=True)
@property
def count(self):
from .states import DonorStateMachine
return self.donations.filter(
status=DonorStateMachine.succeeded.value
).count()
def __str__(self):
return self.title
class Meta(object):
ordering = ['-activity__created', 'amount']
verbose_name = _("Gift")
verbose_name_plural = _("Gifts")
class JSONAPIMeta(object):
resource_name = 'activities/rewards'
def delete(self, *args, **kwargs):
if self.count:
raise ValueError(_('Not allowed to delete a reward with successful donations.'))
return super(Reward, self).delete(*args, **kwargs)
@python_2_unicode_compatible
class BudgetLine(models.Model):
"""
BudgetLine: Entries to the Activity Budget sheet.
"""
activity = models.ForeignKey(
'funding.Funding', related_name='budget_lines', on_delete=models.CASCADE
)
description = models.CharField(_('description'), max_length=255, default='')
amount = MoneyField()
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(auto_now=True)
class JSONAPIMeta(object):
resource_name = 'activities/budget-lines'
class Meta(object):
verbose_name = _('budget line')
verbose_name_plural = _('budget lines')
def __str__(self):
return u'{0} - {1}'.format(self.description, self.amount)
@python_2_unicode_compatible
class Fundraiser(AnonymizationMixin, models.Model):
owner = models.ForeignKey(
'members.Member', related_name="funding_fundraisers", on_delete=models.CASCADE
)
activity = models.ForeignKey(
'funding.Funding',
verbose_name=_("activity"),
related_name="fundraisers",
on_delete=models.CASCADE
)
title = models.CharField(_("title"), max_length=255)
description = models.TextField(_("description"), blank=True)
image = ImageField(blank=True, null=True)
amount = MoneyField(_("amount"))
deadline = models.DateTimeField(_('deadline'), null=True, blank=True)
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
@cached_property
def amount_donated(self):
from .states import DonorStateMachine
donations = self.donations.filter(
status__in=[
DonorStateMachine.succeeded.value,
DonorStateMachine.activity_refunded.value,
]
)
totals = [
Money(data['amount__sum'], data['amount_currency']) for data in
donations.values('amount_currency').annotate(Sum('amount')).order_by()
]
totals = [convert(amount, self.amount.currency) for amount in totals]
return sum(totals) or Money(0, self.amount.currency)
class Meta(object):
verbose_name = _('fundraiser')
verbose_name_plural = _('fundraisers')
@python_2_unicode_compatible
class Payout(TriggerMixin, models.Model):
activity = models.ForeignKey(
'funding.Funding',
verbose_name=_("activity"),
related_name="payouts",
on_delete=models.CASCADE
)
provider = models.CharField(max_length=100)
currency = models.CharField(max_length=5)
status = models.CharField(max_length=40)
date_approved = models.DateTimeField(_('approved'), null=True, blank=True)
date_started = models.DateTimeField(_('started'), null=True, blank=True)
date_completed = models.DateTimeField(_('completed'), null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@classmethod
def generate(cls, activity):
from .states import PayoutStateMachine
for payout in cls.objects.filter(activity=activity):
if payout.status == PayoutStateMachine.new.value:
payout.delete()
elif payout.donations.count() == 0:
raise AssertionError('Payout without donations already started!')
ready_donations = activity.donations.filter(status='succeeded', donor__payout__isnull=True)
groups = set([
(don.payout_amount_currency, don.payment.provider) for don in
ready_donations
])
for currency, provider in groups:
donations = [
don for don in
ready_donations.filter(donor__payout_amount_currency=currency)
if don.payment.provider == provider
]
payout = cls.objects.create(
activity=activity,
provider=provider,
currency=currency
)
for donation in donations:
donation.payout = payout
donation.save()
@property
def total_amount(self):
if self.currency:
return Money(self.donations.aggregate(total=Sum('payout_amount'))['total'] or 0, self.currency)
return self.donations.aggregate(total=Sum('amount'))['total']
class Meta(object):
verbose_name = _('payout')
verbose_name_plural = _('payouts')
def __str__(self):
return '{} #{} {}'.format(_('Payout'), self.id, self.activity.title)
@python_2_unicode_compatible
class Donor(Contributor):
amount = MoneyField()
payout_amount = MoneyField()
client_secret = models.CharField(max_length=32, blank=True, null=True)
reward = models.ForeignKey(
Reward, null=True, blank=True, related_name="donations", on_delete=models.CASCADE
)
fundraiser = models.ForeignKey(
Fundraiser, null=True, blank=True, related_name="donations", on_delete=models.CASCADE
)
name = models.CharField(max_length=200, null=True, blank=True,
verbose_name=_('Fake name'),
help_text=_('Override donor name / Name for guest donation'))
anonymous = models.BooleanField(_('anonymous'), default=False)
payout = models.ForeignKey(
'funding.Payout', null=True, blank=True, on_delete=SET_NULL, related_name='donations'
)
def save(self, *args, **kwargs):
if not self.user and not self.client_secret:
self.client_secret = ''.join(random.choice(string.ascii_lowercase) for i in range(32))
if not self.payout_amount:
self.payout_amount = self.amount
super(Donor, self).save(*args, **kwargs)
@property
def date(self):
return self.created
@property
def payment_method(self):
if not self.payment:
return None
return self.payment.type
class Meta(object):
verbose_name = _('Donation')
verbose_name_plural = _('Donations')
class JSONAPIMeta(object):
resource_name = 'contributors/donations'
@python_2_unicode_compatible
class MoneyContribution(Contribution):
value = MoneyField()
class Meta(object):
verbose_name = _('Donation')
verbose_name_plural = _('Contributions')
@python_2_unicode_compatible
class Payment(TriggerMixin, PolymorphicModel):
status = models.CharField(max_length=40)
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField()
donation = models.OneToOneField(Donor, related_name='payment', on_delete=models.CASCADE)
@property
def can_update(self):
return hasattr(self, 'update')
@property
def can_refund(self):
return hasattr(self, 'refund')
def save(self, *args, **kwargs):
self.updated = timezone.now()
super(Payment, self).save(*args, **kwargs)
def __str__(self):
return "{} - {}".format(self.polymorphic_ctype, self.id)
class Meta(object):
permissions = (
('refund_payment', 'Can refund payments'),
)
class LegacyPayment(Payment):
method = models.CharField(max_length=100)
data = models.TextField()
provider = 'legacy'
class PaymentMethod(object):
code = ''
provider = ''
name = ''
currencies = []
countries = []
def __init__(self, provider, code, name=None, currencies=None, countries=None):
self.provider = provider
self.code = code
if name:
self.name = name
else:
self.name = code
if currencies:
self.currencies = currencies
if countries:
self.countries = countries
@property
def id(self):
return "{}-{}".format(self.provider, self.code)
@property
def pk(self):
return self.id
class JSONAPIMeta(object):
resource_name = 'payments/payment-methods'
@python_2_unicode_compatible
class PayoutAccount(TriggerMixin, ValidatedModelMixin, AnonymizationMixin, PolymorphicModel):
status = models.CharField(max_length=40)
owner = models.ForeignKey(
'members.Member',
related_name='funding_payout_account',
on_delete=models.CASCADE
)
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(auto_now=True)
reviewed = models.BooleanField(default=False)
@property
def funding(self):
for account in self.external_accounts.all():
for funding in account.funding_set.all():
return funding
def __str__(self):
return "Payout account #{}".format(self.id)
class PlainPayoutAccount(PayoutAccount):
document = PrivateDocumentField(
blank=True, null=True, on_delete=models.deletion.SET_NULL, view_name='kyc-document'
)
ip_address = models.GenericIPAddressField(_('IP address'), blank=True, null=True, default=None)
@property
def verified(self):
return self.reviewed
class Meta(object):
verbose_name = _('Plain KYC account')
verbose_name_plural = _('Plain KYC accounts')
class JSONAPIMeta(object):
resource_name = 'payout-accounts/plains'
@property
def required_fields(self):
required = []
if self.status == 'new':
required.append('document')
return required
def __str__(self):
return "KYC account for {}".format(self.owner.full_name)
@python_2_unicode_compatible
class BankAccount(TriggerMixin, PolymorphicModel):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
reviewed = models.BooleanField(default=False)
connect_account = models.ForeignKey(
'funding.PayoutAccount',
null=True, blank=True,
related_name='external_accounts',
on_delete=models.CASCADE
)
status = models.CharField(max_length=40)
@property
def parent(self):
return self.connect_account
@property
def ready(self):
return True
@property
def owner(self):
return self.connect_account.owner
provider_class = None
@property
def type(self):
return self.provider_class().name
@property
def funding(self):
return self.funding_set.order_by('-created').first()
@property
def payment_methods(self):
try:
currencies = [f.target.currency for f in self.funding_set.all() if f.target]
provider = self.provider_class.objects.filter(paymentcurrency__code__in=currencies).first()
return provider.payment_methods
except (AttributeError, self.provider_class.DoesNotExist) as e:
logging.error(e)
return []
class JSONAPIMeta(object):
resource_name = 'payout-accounts/external-accounts'
public_data = {}
def __str__(self):
return "Bank account #{}".format(self.id)
class Meta:
ordering = ('id',)
class FundingPlatformSettings(BasePlatformSettings):
anonymous_donations = models.BooleanField(
_('Hide names from all donations'), default=False
)
allow_anonymous_rewards = models.BooleanField(
_('Allow guests to donate rewards'), default=True
)
class Meta(object):
verbose_name_plural = _('funding settings')
verbose_name = _('funding settings')
from bluebottle.funding.periodic_tasks import * # noqa
| bsd-3-clause | dc6d0cff99e42282616c12a07af3aad8 | 30.989333 | 119 | 0.628168 | 4.15878 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/app_manager/migrations/0009_resourceoverride.py | 1 | 1077 | # Generated by Django 1.11.22 on 2019-11-01 22:02
from django.core.management import call_command
from django.db import migrations, models
from corehq.util.django_migrations import skip_on_fresh_install
class Migration(migrations.Migration):
dependencies = [
('app_manager', '0008_remove_uses_master_app_form_ids'),
]
operations = [
migrations.CreateModel(
name='ResourceOverride',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('app_id', models.CharField(max_length=255)),
('root_name', models.CharField(max_length=32)),
('pre_id', models.CharField(max_length=255)),
('post_id', models.CharField(max_length=255)),
],
),
migrations.AlterUniqueTogether(
name='resourceoverride',
unique_together=set([('domain', 'app_id', 'root_name', 'pre_id')]),
),
]
| bsd-3-clause | d09e48cecb27bf7dddfd0b10b2390fc9 | 33.741935 | 114 | 0.588672 | 4.033708 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/data_interfaces/migrations/0006_case_rule_refactor.py | 1 | 4903 | # Generated by Django 1.10.6 on 2017-04-04 12:54
import django.db.models.deletion
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0005_remove_match_type_choices'),
]
operations = [
migrations.CreateModel(
name='CaseRuleAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='CaseRuleCriteria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ClosedParentDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(default='parent', max_length=126)),
('relationship_id', models.PositiveSmallIntegerField(default=1)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CustomActionDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=126)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CustomMatchDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=126)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MatchPropertyDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('property_name', models.CharField(max_length=126)),
('property_value', models.CharField(max_length=126, null=True)),
('match_type', models.CharField(max_length=15)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UpdateCaseDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('properties_to_update', jsonfield.fields.JSONField(default=list)),
('close_case', models.BooleanField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='automaticupdaterule',
name='migrated',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='caserulecriteria',
name='closed_parent_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.ClosedParentDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='custom_match_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.CustomMatchDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='match_property_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.MatchPropertyDefinition'),
),
migrations.AddField(
model_name='caserulecriteria',
name='rule',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule'),
),
migrations.AddField(
model_name='caseruleaction',
name='custom_action_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.CustomActionDefinition'),
),
migrations.AddField(
model_name='caseruleaction',
name='rule',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule'),
),
migrations.AddField(
model_name='caseruleaction',
name='update_case_definition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='data_interfaces.UpdateCaseDefinition'),
),
]
| bsd-3-clause | 44a4661644ed84f64f39870214776b78 | 39.188525 | 138 | 0.567408 | 4.586529 | false | false | false | false |
dimagi/commcare-hq | corehq/ex-submodules/casexml/apps/phone/data_providers/case/livequery.py | 1 | 15756 | """Restore logic implementation aiming to minimize database queries
Example case graphs with outcomes:
a <--ext-- d(owned) >> a b d
<--ext-- b
e(owned) --ext--> a(closed) >> a b e
--ext--> b
b(closed) <--chi-- a(owned) >> a b c
<--ext-- c
a(closed) <--ext-- d(owned) >> []
a <--ext-- b <--ext-- c(owned) >> a b c
a(closed) <--ext-- b <--ext-- c(owned) >> []
a(closed) <--ext-- b <--ext-- c(owned) <--chi-- d >> []
a(closed) <--ext-- b <--chi-- c(owned) >> []
"""
import logging
from collections import defaultdict
from functools import partial, wraps
from itertools import chain, islice
from casexml.apps.case.const import CASE_INDEX_EXTENSION as EXTENSION
from casexml.apps.phone.const import ASYNC_RETRY_AFTER
from casexml.apps.phone.tasks import ASYNC_RESTORE_SENT
from corehq.form_processor.models import CommCareCase, CommCareCaseIndex
from corehq.sql_db.routers import read_from_plproxy_standbys
from corehq.toggles import LIVEQUERY_READ_FROM_STANDBYS, NAMESPACE_USER
from corehq.util.metrics import metrics_counter, metrics_histogram
from corehq.util.metrics.load_counters import case_load_counter
from corehq.util.timer import TimingContext
from .load_testing import get_xml_for_response
from .stock import get_stock_payload
from .utils import get_case_sync_updates
def livequery_read_from_standbys(func):
@wraps(func)
def _inner(timing_context, restore_state, response, async_task=None):
if LIVEQUERY_READ_FROM_STANDBYS.enabled(restore_state.restore_user.user_id, NAMESPACE_USER):
with read_from_plproxy_standbys():
return func(timing_context, restore_state, response, async_task)
else:
return func(timing_context, restore_state, response, async_task)
return _inner
@livequery_read_from_standbys
def do_livequery(timing_context, restore_state, response, async_task=None):
"""Get case sync restore response
This function makes no changes to external state other than updating
the `restore_state.current_sync_log` and progress of `async_task`.
Extends `response` with restore elements.
"""
debug = logging.getLogger(__name__).debug
domain = restore_state.domain
owner_ids = list(restore_state.owner_ids)
debug("sync %s for %r", restore_state.current_sync_log._id, owner_ids)
with timing_context("livequery"):
with timing_context("get_case_ids_by_owners"):
owned_ids = CommCareCase.objects.get_case_ids_in_domain_by_owners(
domain, owner_ids, closed=False)
debug("owned: %r", owned_ids)
live_ids, indices = get_live_case_ids_and_indices(domain, owned_ids, timing_context)
if restore_state.last_sync_log:
with timing_context("discard_already_synced_cases"):
debug('last sync: %s', restore_state.last_sync_log._id)
sync_ids = discard_already_synced_cases(live_ids, restore_state)
else:
sync_ids = live_ids
dependent_ids = live_ids - set(owned_ids)
debug('updating synclog: live=%r dependent=%r', live_ids, dependent_ids)
restore_state.current_sync_log.case_ids_on_phone = live_ids
restore_state.current_sync_log.dependent_case_ids_on_phone = dependent_ids
total_cases = len(sync_ids)
with timing_context("compile_response(%s cases)" % total_cases):
iaccessor = PrefetchIndexCaseAccessor(domain, indices)
metrics_histogram(
'commcare.restore.case_load',
len(sync_ids),
'cases',
RESTORE_CASE_LOAD_BUCKETS,
tags={
'domain': domain,
'restore_type': 'incremental' if restore_state.last_sync_log else 'fresh'
}
)
metrics_counter('commcare.restore.case_load.count', total_cases, {'domain': domain})
compile_response(
timing_context,
restore_state,
response,
batch_cases(iaccessor, sync_ids),
init_progress(async_task, total_cases),
total_cases,
)
def get_case_hierarchy(domain, cases):
"""Get the combined case hierarchy for the input cases"""
domains = {case.domain for case in cases}
assert domains == {domain}, "All cases must belong to the same domain"
case_ids = {case.case_id for case in cases}
all_case_ids, indices = get_live_case_ids_and_indices(domain, case_ids, TimingContext())
new_case_ids = list(all_case_ids - case_ids)
new_cases = PrefetchIndexCaseAccessor(domain, indices).get_cases(new_case_ids)
return cases + new_cases
def get_live_case_ids_and_indices(domain, owned_ids, timing_context):
def index_key(index):
return '{} {}'.format(index.case_id, index.identifier)
def is_extension(case_id):
"""Determine if case_id is an extension case
A case that is both a child and an extension is not an extension.
"""
return case_id in hosts_by_extension and case_id not in parents_by_child
def has_live_extension(case_id, cache={}):
"""Check if available case_id has a live extension case
Do not check for live children because an available parent
cannot cause it's children to become live. This is unlike an
available host, which can cause its available extension to
become live through the recursive rules:
- A case is available if
- it is open and not an extension case (applies to host).
- it is open and is the extension of an available case.
- A case is live if it is owned and available.
The result is cached to reduce recursion in subsequent calls
and to prevent infinite recursion.
"""
try:
return cache[case_id]
except KeyError:
cache[case_id] = False
cache[case_id] = result = any(
ext_id in live_ids # has live extension
or ext_id in owned_ids # ext is owned and available, will be live
or has_live_extension(ext_id)
for ext_id in extensions_by_host[case_id]
)
return result
def enliven(case_id):
"""Mark the given case, its extensions and their hosts as live
This closure mutates `live_ids` from the enclosing function.
"""
if case_id in live_ids:
# already live
return
debug('enliven(%s)', case_id)
live_ids.add(case_id)
# case is open and is the extension of a live case
ext_ids = extensions_by_host.get(case_id, [])
# case has live extension
host_ids = hosts_by_extension.get(case_id, [])
# case has live child
parent_ids = parents_by_child.get(case_id, [])
for cid in chain(ext_ids, host_ids, parent_ids):
enliven(cid)
def classify(index, prev_ids):
"""Classify index as either live or extension with live status pending
This closure mutates case graph data structures from the
enclosing function.
:returns: Case id for next related index fetch or IGNORE
if the related case should be ignored.
"""
sub_id = index.case_id
ref_id = index.referenced_id # aka parent/host/super
relationship = index.relationship
ix_key = index_key(index)
if ix_key in seen_ix[sub_id]:
return IGNORE # unexpected, don't process duplicate index twice
seen_ix[sub_id].add(ix_key)
seen_ix[ref_id].add(ix_key)
debug("%s --%s--> %s", sub_id, relationship, ref_id)
if sub_id in live_ids:
# ref has a live child or extension
enliven(ref_id)
# It does not matter that sub_id -> ref_id never makes it into
# hosts_by_extension since both are live and therefore this index
# will not need to be traversed in other liveness calculations.
elif relationship == EXTENSION:
if sub_id in open_ids:
if ref_id in live_ids:
# sub is open and is the extension of a live case
enliven(sub_id)
else:
# live status pending:
# if ref becomes live -> sub is open extension of live case
# if sub becomes live -> ref has a live extension
extensions_by_host[ref_id].add(sub_id)
hosts_by_extension[sub_id].add(ref_id)
else:
return IGNORE # closed extension
elif sub_id in owned_ids:
# sub is owned and available (open and not an extension case)
enliven(sub_id)
# ref has a live child
enliven(ref_id)
else:
# live status pending: if sub becomes live -> ref has a live child
parents_by_child[sub_id].add(ref_id)
next_id = ref_id if sub_id in prev_ids else sub_id
if next_id not in all_ids:
return next_id
return IGNORE # circular reference
def update_open_and_deleted_ids(related):
"""Update open_ids and deleted_ids with related case_ids
TODO store referenced case (parent) deleted and closed status in
CommCareCaseIndex to reduce number of related indices fetched
and avoid this extra query per related query.
"""
case_ids = {case_id
for index in related
for case_id in [index.case_id, index.referenced_id]
if case_id not in all_ids}
# we know these are open since we filter by closed and deleted when fetching the indexes
open_cases = {
index.case_id for index in related
if index.relationship == 'extension'
}
check_cases = list(set(case_ids) - open_cases)
rows = CommCareCase.objects.get_closed_and_deleted_ids(domain, check_cases)
for case_id, closed, deleted in rows:
if deleted:
deleted_ids.add(case_id)
if closed or deleted:
case_ids.remove(case_id)
open_ids.update(case_ids)
def filter_deleted_indices(related):
live_related = []
for index in related:
# add all indices to `indices` so that they are included in the restore
indices[index.case_id].append(index)
if index.referenced_id:
live_related.append(index)
return live_related
IGNORE = object()
debug = logging.getLogger(__name__).debug
# case graph data structures
live_ids = set()
deleted_ids = set()
extensions_by_host = defaultdict(set) # host_id -> (open) extension_ids
hosts_by_extension = defaultdict(set) # (open) extension_id -> host_ids
parents_by_child = defaultdict(set) # child_id -> parent_ids
indices = defaultdict(list) # case_id -> list of CommCareCaseIndex-like, used as a cache for later
seen_ix = defaultdict(set) # case_id -> set of '<index.case_id> <index.identifier>'
next_ids = all_ids = set(owned_ids)
owned_ids = set(owned_ids) # owned, open case ids (may be extensions)
open_ids = set(owned_ids)
get_related_indices = partial(CommCareCaseIndex.objects.get_related_indices, domain)
while next_ids:
exclude = set(chain.from_iterable(seen_ix[id] for id in next_ids))
with timing_context("get_related_indices({} cases, {} seen)".format(len(next_ids), len(exclude))):
related = get_related_indices(list(next_ids), exclude)
related_not_deleted = filter_deleted_indices(related)
if not related:
break
update_open_and_deleted_ids(related_not_deleted)
next_ids = {classify(index, next_ids)
for index in related_not_deleted
if index.referenced_id not in deleted_ids
and index.case_id not in deleted_ids}
next_ids.discard(IGNORE)
all_ids.update(next_ids)
debug('next: %r', next_ids)
with timing_context("enliven open roots (%s cases)" % len(open_ids)):
debug('open: %r', open_ids)
# owned, open, not an extension -> live
for case_id in owned_ids:
if not is_extension(case_id):
enliven(case_id)
# available case with live extension -> live
for case_id in open_ids:
if (case_id not in live_ids
and not is_extension(case_id)
and has_live_extension(case_id)):
enliven(case_id)
debug('live: %r', live_ids)
return live_ids, indices
def discard_already_synced_cases(live_ids, restore_state):
debug = logging.getLogger(__name__).debug
sync_log = restore_state.last_sync_log
phone_ids = sync_log.case_ids_on_phone
debug("phone_ids: %r", phone_ids)
if phone_ids:
sync_ids = live_ids - phone_ids # sync all live cases not on phone
# also sync cases on phone that have been modified since last sync
sync_ids.update(CommCareCase.objects.get_modified_case_ids(
restore_state.domain, list(phone_ids), sync_log))
else:
sync_ids = live_ids
debug('sync_ids: %r', sync_ids)
return sync_ids
class PrefetchIndexCaseAccessor:
def __init__(self, domain, indices):
self.domain = domain
self.indices = indices
def get_cases(self, case_ids, **kw):
assert 'prefetched_indices' not in kw
kw['prefetched_indices'] = [ix
for case_id in case_ids
for ix in self.indices[case_id]]
return CommCareCase.objects.get_cases(case_ids, self.domain, **kw)
def batch_cases(accessor, case_ids):
def take(n, iterable):
# https://docs.python.org/2/library/itertools.html#recipes
return list(islice(iterable, n))
track_load = case_load_counter("livequery_restore", accessor.domain)
ids = iter(case_ids)
while True:
next_ids = take(1000, ids)
if not next_ids:
break
track_load(len(next_ids))
yield accessor.get_cases(next_ids)
def init_progress(async_task, total):
if not async_task:
return lambda done: None
def update_progress(done):
async_task.update_state(
state=ASYNC_RESTORE_SENT,
meta={
'done': done,
'total': total,
'retry-after': ASYNC_RETRY_AFTER
}
)
update_progress(0)
return update_progress
def compile_response(
timing_context,
restore_state,
response,
batches,
update_progress,
total_cases,
):
done = 0
for cases in batches:
with timing_context("get_stock_payload"):
response.extend(get_stock_payload(
restore_state.project,
restore_state.stock_settings,
cases,
))
with timing_context("get_case_sync_updates (%s cases)" % len(cases)):
updates = get_case_sync_updates(
restore_state.domain, cases, restore_state.last_sync_log)
with timing_context("get_xml_for_response (%s updates)" % len(updates)):
response.extend(
item for update in updates
for item in get_xml_for_response(
update, restore_state, total_cases
)
)
done += len(cases)
update_progress(done)
RESTORE_CASE_LOAD_BUCKETS = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000]
| bsd-3-clause | 3be06335ed13cab4346ea95f23145050 | 36.514286 | 115 | 0.603833 | 3.864606 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/events/migrations/0004_add_permissions.py | 1 | 1384 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-05-15 10:40
from __future__ import unicode_literals
from django.db import migrations, connection
from bluebottle.utils.utils import update_group_permissions
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def add_group_permissions(apps, schema_editor):
tenant = Client.objects.get(schema_name=connection.tenant.schema_name)
with LocalTenant(tenant):
group_perms = {
'Staff': {
'perms': (
'add_participant', 'change_participant', 'delete_participant',
)
},
'Anonymous': {
'perms': ('api_read_event', ) if not properties.CLOSED_SITE else ()
},
'Authenticated': {
'perms': (
'api_read_participant',
'api_add_participant',
'api_change_own_participant',
)
}
}
update_group_permissions('events', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20190522_1329'),
]
operations = [
migrations.RunPython(
add_group_permissions,
migrations.RunPython.noop
)
]
| bsd-3-clause | 7bf292540211eb13472b045520254377 | 27.244898 | 83 | 0.567919 | 4.393651 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/projects/migrations/0031_add_project_roles.py | 1 | 1127 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-08-10 20:09
from __future__ import unicode_literals
import bluebottle.utils.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0030_rename_account_bic_20170705_1221'),
]
operations = [
migrations.AddField(
model_name='project',
name='promoter',
field=models.ForeignKey(blank=True, help_text='Project Promoter', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='promoter', to=settings.AUTH_USER_MODEL, verbose_name='promoter'),
),
migrations.AddField(
model_name='project',
name='task_manager',
field=models.ForeignKey(blank=True, help_text='Project Task Manager', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_manager', to=settings.AUTH_USER_MODEL, verbose_name='task manager'),
),
]
| bsd-3-clause | 4089fd71c5834c53ada1642002f1cc16 | 37.862069 | 225 | 0.674357 | 3.820339 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/hqwebapp/session_details_endpoint/views.py | 1 | 3146 | import json
from django.http import Http404, HttpResponse, HttpResponseBadRequest, JsonResponse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from corehq import toggles
from corehq.apps.domain.auth import formplayer_auth
from corehq.apps.enterprise.models import EnterprisePermissions
from corehq.apps.hqadmin.utils import get_django_user_from_session, get_session
from corehq.apps.users.models import CouchUser
from corehq.feature_previews import previews_enabled_for_domain
from corehq.middleware import TimeoutMiddleware
from corehq.toggles import toggles_enabled_for_user, toggles_enabled_for_domain
@method_decorator(csrf_exempt, name='dispatch')
@method_decorator(formplayer_auth, name='dispatch')
class SessionDetailsView(View):
"""
Internal API to allow formplayer to get the Django user ID
from the session key.
Authentication is done by HMAC signing of the request body:
secret = settings.FORMPLAYER_INTERNAL_AUTH_KEY
data = '{"session_id": "123"}'
digest = base64.b64encode(hmac.new(secret, data, hashlib.sha256).digest())
requests.post(url, data=data, headers={'X-MAC-DIGEST': digest})
"""
urlname = 'session_details'
http_method_names = ['post']
def post(self, request, *args, **kwargs):
try:
data = json.loads(request.body.decode('utf-8'))
except ValueError:
return HttpResponseBadRequest()
if not data or not isinstance(data, dict):
return HttpResponseBadRequest()
session_id = data.get('sessionId', None)
if not session_id:
return HttpResponseBadRequest()
session = get_session(session_id)
user = get_django_user_from_session(session)
if user:
couch_user = CouchUser.get_by_username(user.username)
if not couch_user:
raise Http404
else:
raise Http404
domain = data.get('domain')
if domain and toggles.DISABLE_WEB_APPS.enabled(domain):
return HttpResponse('Service Temporarily Unavailable', content_type='text/plain', status=503)
# reset the session's expiry if there's some formplayer activity
secure_session = session.get('secure_session')
TimeoutMiddleware.update_secure_session(session, secure_session, couch_user, domain=domain)
session.save()
domains = set()
for member_domain in couch_user.domains:
domains.add(member_domain)
domains.update(EnterprisePermissions.get_domains(member_domain))
enabled_toggles = toggles_enabled_for_user(user.username) | toggles_enabled_for_domain(domain)
return JsonResponse({
'username': user.username,
'djangoUserId': user.pk,
'superUser': user.is_superuser,
'authToken': None,
'domains': list(domains),
'anonymous': False,
'enabled_toggles': list(enabled_toggles),
'enabled_previews': list(previews_enabled_for_domain(domain))
})
| bsd-3-clause | c8982ad724a31cf55e005d67ec9fd817 | 37.839506 | 105 | 0.674507 | 4.038511 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/linked_domain/migrations/0018_auto_20210806_1526.py | 1 | 1341 | # Generated by Django 2.2.24 on 2021-08-06 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('linked_domain', '0017_add_tableau_choice_20210720_1908'),
]
operations = [
migrations.AlterField(
model_name='domainlinkhistory',
name='model',
field=models.CharField(choices=[
('app', 'Application'),
('fixture', 'Lookup Table'),
('report', 'Report'),
('keyword', 'Keyword'),
('custom_user_data', 'Custom User Data Fields'),
('custom_location_data', 'Custom Location Data Fields'),
('roles', 'User Roles'),
('previews', 'Feature Previews'),
('case_search_data', 'Case Search Settings'),
('data_dictionary', 'Data Dictionary'),
('dialer_settings', 'Dialer Settings'),
('otp_settings', 'OTP Pass-through Settings'),
('hmac_callout_settings', 'Signed Callout'),
('tableau_server_and_visualizations', 'Tableau Server and Visualizations'),
('custom_product_data', 'Custom Product Data Fields'),
('toggles', 'Feature Flags')
], max_length=128),
),
]
| bsd-3-clause | f3d884b2bdfc8e5e2a21a836c110af16 | 37.314286 | 91 | 0.525727 | 4.530405 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/userreports/forms.py | 1 | 1412 | from django import forms
from django.utils.translation import gettext_lazy as _
from crispy_forms import layout as crispy
from corehq.apps.hqwebapp.crispy import HQFormHelper
from corehq.apps.userreports.models import UCRExpression
from corehq.apps.userreports.ui.fields import JsonField
class UCRExpressionForm(forms.ModelForm):
class Meta:
model = UCRExpression
fields = [
"name",
"expression_type",
"description",
"definition",
]
def __init__(self, request, *args, **kwargs):
super().__init__(*args, **kwargs)
self.domain = request.domain
self.fields['description'] = forms.CharField(required=False)
self.fields['definition'] = JsonField(initial={"type": "property_name", "property_name": "name"})
self.helper = HQFormHelper()
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_('Expression'),
crispy.Field('name'),
crispy.Field('expression_type'),
crispy.Field('description'),
crispy.Field('definition'),
)
)
self.helper.add_input(
crispy.Submit('submit', _('Save'))
)
self.helper.render_required_fields = True
def save(self, commit=True):
self.instance.domain = self.domain
return super().save(commit)
| bsd-3-clause | fed9bfd61d230142abb6e56717c731c3 | 31.837209 | 105 | 0.594901 | 4.069164 | false | false | false | false |
dimagi/commcare-hq | corehq/messaging/smsbackends/grapevine/models.py | 1 | 8527 | from xml.etree import cElementTree as ElementTree
from django.http import HttpResponse
from tastypie.authentication import Authentication
from tastypie.authorization import Authorization
from tastypie.resources import Resource
from tastypie.serializers import Serializer
from tastypie.throttle import CacheThrottle
from corehq.messaging.smsbackends.grapevine.forms import GrapevineBackendForm
from corehq.apps.sms.util import clean_phone_number
from corehq.apps.sms.models import SQLSMSBackend
from xml.sax.saxutils import escape, unescape
from django.conf import settings
from corehq.apps.sms.api import incoming as incoming_sms
import logging
import requests
import six
logger = logging.getLogger(__name__)
TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<gviSmsMessage>
<affiliateCode>{affiliate_code}</affiliateCode>
<authenticationCode>{auth_code}</authenticationCode>
<messageType>text</messageType>
<recipientList>
<message>{message}</message>
<recipient>
<msisdn>{msisdn}</msisdn>
</recipient>
</recipientList>
</gviSmsMessage>"""
class GrapevineException(Exception):
pass
class SQLGrapevineBackend(SQLSMSBackend):
url = 'http://www.gvi.bms9.vine.co.za/httpInputhandler/ApplinkUpload'
show_inbound_api_key_during_edit = False
class Meta(object):
app_label = 'sms'
proxy = True
@classmethod
def get_available_extra_fields(cls):
return [
'affiliate_code',
'authentication_code',
]
@classmethod
def get_opt_in_keywords(cls):
return ['START']
@classmethod
def get_opt_out_keywords(cls):
return ['STOP', 'END', 'CANCEL', 'UNSUBSCRIBE', 'QUIT']
@classmethod
def get_api_id(cls):
return 'GVI'
@classmethod
def get_generic_name(cls):
return "Grapevine"
@classmethod
def get_form_class(cls):
return GrapevineBackendForm
def handle_response(self, response):
"""
Raising an exception makes the framework retry sending the message.
"""
status_code = response.status_code
response_text = response.text
if status_code != 200:
raise GrapevineException("Received status code %s" % status_code)
try:
root = ElementTree.fromstring(response_text)
except (TypeError, ElementTree.ParseError):
raise GrapevineException("Invalid XML returned from API")
result_code = root.find('resultCode')
if result_code is None:
raise GrapevineException("resultCode tag not found in XML response")
if result_code.text != '0':
raise GrapevineException("Received non-zero result code: %s" % result_code.text)
def send(self, msg, *args, **kwargs):
phone_number = clean_phone_number(msg.phone_number)
text = msg.text
config = self.config
data = TEMPLATE.format(
affiliate_code=escape(config.affiliate_code),
auth_code=escape(config.authentication_code),
message=escape(text),
msisdn=escape(phone_number)
)
response = requests.post(
self.url,
data=data.encode('utf-8'),
headers={'content-type': 'text/xml'},
timeout=settings.SMS_GATEWAY_TIMEOUT,
)
self.handle_response(response)
class SmsMessage(object):
phonenumber = ''
text = ''
def __init__(self, phonenumber=None, text=None):
self.phonenumber = phonenumber
self.text = unescape(text) if text else ''
@property
def is_complete(self):
return bool(self.phonenumber)
class UrlencodedDeserializer(Serializer):
formats = ['json', 'jsonp', 'xml', 'yaml', 'html', 'plist', 'urlencode']
content_types = {
'json': 'application/json',
'jsonp': 'text/javascript',
'xml': 'application/xml',
'yaml': 'text/yaml',
'html': 'text/html',
'plist': 'application/x-plist',
'urlencode': 'application/x-www-form-urlencoded',
}
def from_urlencode(self, data, options=None):
""" handles basic form encoded url posts """
qs = dict((k, v if len(v) > 1 else v[0])
for k, v in six.moves.urllib.parse.parse_qs(data).items())
return qs
def to_urlencode(self, content):
pass
class SimpleApiAuthentication(Authentication):
def is_authenticated(self, request, **kwargs):
user = self.get_identifier(request)
key = request.GET.get('apikey')
expected_key = getattr(settings, 'SIMPLE_API_KEYS', {}).get(user)
if not expected_key:
logger.warning("No apikey defined for user '%s'" % user)
return expected_key and key == expected_key
def get_identifier(self, request):
return request.GET.get('apiuser', 'nouser')
class GrapevineResource(Resource):
"""
Handles grapevine messaging requests
* incoming SMS
<gviSms>
<smsDateTime>2013-10-29T12:55:58</smsDateTime>
<gatewayIdentifier>vodacomPremMGA2Rx1</gatewayIdentifier>
<cellNumber>27827891099</cellNumber>
<smsLocation>30665</smsLocation>
<content>Another test</content>
</gviSms>
* Replies to SMS
<gviSmsResponse xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<responseDateTime>2013-10-29T13:19:07</responseDateTime>
<recipient>
<msisdn>27827891099</msisdn>
</recipient>
<responseType>reply</responseType>
<response>Test reply</response>
</gviSmsResponse>
* SMS Status reports (not currently implemented)
<gviSmsResponse xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<responseDateTime>2013-10-29T13:15:52</responseDateTime>
<submitDateTime>2013-10-29T13:15:49</submitDateTime>
<recipient>
<msisdn>27827891099</msisdn>
</recipient>
<responseType>receipt</responseType>
<status>
<code>0</code>
<reason>Message is delivered to destination. stat:DELIVRD</reason>
</status>
</gviSmsResponse>
smsDateTime: The date and time when the original SMS arrived at GVI's SMS gateway.
gatewayIdentifier: Identifies the network and the rate of the SMS.
cellNumber: The number (in international MSISDN format) of the mobile phone that sent the SMS.
smsLocation: The short code to which the SMS was sent.
content: The message text of the SMS message.
"""
class Meta(object):
resource_name = 'sms'
object_class = SmsMessage
authorization = Authorization()
allowed_methods = ['post']
serializer = UrlencodedDeserializer()
throttle = CacheThrottle(throttle_at=600, timeframe=10, expiration=86400)
authentication = SimpleApiAuthentication()
def detail_uri_kwargs(self, bundle_or_obj):
return {}
def full_hydrate(self, bundle):
if not bundle.data or not bundle.data.get('XML'):
return bundle
# http://bugs.python.org/issue11033
xml = bundle.data['XML'].encode('utf-8')
root = ElementTree.fromstring(xml)
if root.tag == 'gviSms':
date_string = root.find('smsDateTime').text
phone_number = root.find('cellNumber').text
content_text = root.find('content').text
bundle.obj = SmsMessage(phone_number, content_text)
elif root.tag == 'gviSmsResponse':
date_string = root.find('responseDateTime').text
phone_number = root.find('recipient/msisdn').text
resp_type = root.find('responseType').text # receipt, reply or error
if resp_type == 'reply':
response_text = root.find('response').text
bundle.obj = SmsMessage(phone_number, response_text)
return bundle
def obj_create(self, bundle, request=None, **kwargs):
bundle = self.full_hydrate(bundle)
if bundle.obj.is_complete:
incoming_sms(bundle.obj.phonenumber, bundle.obj.text, SQLGrapevineBackend.get_api_id())
return bundle
def post_list(self, request, **kwargs):
super(GrapevineResource, self).post_list(request, **kwargs)
# respond with 200 OK instead of 201 CREATED
return HttpResponse()
| bsd-3-clause | a645195b93b619ce1d423e2a670ccce7 | 31.545802 | 99 | 0.628122 | 3.886509 | false | false | false | false |
onepercentclub/bluebottle | scripts/tne_campaign_ranking.py | 1 | 1881 | from datetime import date
import xlsxwriter
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
from bluebottle.funding.models import Funding, Donor
from bluebottle.geo.models import Location
OFFICE_NAME = 'Mogadishu'
TARGET = 500
DEADLINES = [date(2022, 8, 20), date(2022, 8, 21)]
def run(*args):
tne = Client.objects.get(client_name='nexteconomy')
with LocalTenant(tne, clear_tenant=True):
result = []
location = Location.objects.get(name=OFFICE_NAME)
campaigns = Funding.objects.filter(
initiative__location__name=OFFICE_NAME,
deadline__date__in=DEADLINES,
status__in=('succeeded', 'partially_funded')
)
print(len(campaigns))
for activity in campaigns:
print(activity.title, activity.amount_raised, activity.status)
for campaign in campaigns:
donors = campaign.contributors.instance_of(
Donor
).filter(
status='succeeded'
).order_by(
'created'
)
total = 0
for donor in donors:
total += donor.amount.amount
if total >= TARGET:
result.append({
'id': campaign.id,
'title': campaign.title,
'status': campaign.status,
'target reached': str(donor.created),
})
break
workbook = xlsxwriter.Workbook(f'TNE-{location.name}-{DEADLINES[0]}.xlsx', {'remove_timezone': True})
worksheet = workbook.add_worksheet()
worksheet.write_row(0, 0, result[0].keys())
for (index, row) in enumerate(result):
worksheet.write_row(index + 1, 0, row.values())
workbook.close()
| bsd-3-clause | 5cf058f3114c781965c1ca3f5b0e1ff1 | 28.390625 | 109 | 0.56034 | 4.08913 | false | false | false | false |
dimagi/commcare-hq | corehq/blobs/management/commands/emergency_restore_blobs_for_domain.py | 1 | 3267 | # credit to Danny Roberts for the bulk of this code
from gzip import GzipFile
from io import BytesIO
from django.core.management import BaseCommand
from corehq.blobs import get_blob_db
from corehq.blobs.models import BlobMeta, DeletedBlobMeta
from corehq.form_processor.models.forms import XFormInstance
class Command(BaseCommand):
"""
WARNING: intended to only be used in the rare circumstance blobs are unintentionally deleted from a domain
If forms and cases have been hard deleted, you should restore the SQL form and case docs before running this
Note you need the "s3:ListBucketVersions", "s3:GetObjectVersion" permissions added to the policy responsible
for accessing S3 for this to work properly
This command was only used to restore form xml and form attachments (BLOB_CODES 2 and 3). You may need to
modify this to handle other types, specifically how the content is formatted in _get_stream_for_object_version
"""
def handle(self, domain, **options):
blob_metas = get_blob_metas_for_domain(domain)
db = get_blob_db()
restore_blobs(db, blob_metas)
def get_blob_metas_for_domain(domain):
form_ids = XFormInstance.objects.get_form_ids_in_domain(domain)
blob_metas = []
for form_id in form_ids:
blob_metas += _get_blob_metas(form_id)
if not blob_metas:
deleted_blobs = []
for form_id in form_ids:
deleted_blobs += _get_deleted_blob_metas(form_id)
if deleted_blobs:
print("You should restore the BlobMeta objects before attempting to restore actual blobs.")
exit(1)
return blob_metas
def restore_blobs(db, blob_metas):
for meta in blob_metas:
stream = _get_stream_of_latest_version_before_deletion_for_object(meta)
db.put(stream, meta=meta)
def _get_blob_metas(parent_id):
return BlobMeta.objects.partitioned_query(parent_id).filter(parent_id=parent_id)
def _get_deleted_blob_metas(parent_id):
return DeletedBlobMeta.objects.partitioned_query(parent_id).filter(parent_id=parent_id)
def _get_stream_of_latest_version_before_deletion_for_object(meta):
version_id = _get_latest_version_id_before_deletion_for_object(meta.key)
return _get_stream_for_object_version(meta, version_id)
def _get_latest_version_id_before_deletion_for_object(key):
versions_dict = _get_versions_dict_for_object(key)
assert any(delete_marker['IsLatest'] for delete_marker in versions_dict['DeleteMarkers'])
return sorted(
versions_dict['Versions'], key=lambda version: version['LastModified'], reverse=True
)[0]['VersionId']
def _get_stream_for_object_version(meta, version_id):
object_dict = _get_object_dict_for_version(meta.key, version_id)
if meta.is_compressed:
return GzipFile(meta.key, mode='rb', fileobj=object_dict['Body'])
else:
return BytesIO(object_dict['Body'])
def _get_versions_dict_for_object(key):
blob_db = get_blob_db()
return blob_db.db.meta.client.list_object_versions(Bucket=blob_db.s3_bucket_name, Prefix=key)
def _get_object_dict_for_version(key, version_id):
blob_db = get_blob_db()
return blob_db.db.meta.client.get_object(Bucket=blob_db.s3_bucket_name, Key=key, VersionId=version_id)
| bsd-3-clause | b5de3d0cf130187c4a9bc4b691a8190e | 36.551724 | 114 | 0.712886 | 3.41023 | false | true | false | false |
dimagi/commcare-hq | corehq/util/public_only_requests/public_only_requests.py | 1 | 1355 | import requests
from requests.adapters import HTTPAdapter
def get_public_only_session(domain_name, src):
session = requests.Session()
make_session_public_only(session, domain_name, src)
return session
def make_session_public_only(session, domain_name, src):
"""
Modifies `session` to validate urls before sending and accept only hosts resolving to public IPs
Once this function has been called on a session, session.request, etc., will
raise PossibleSSRFAttempt whenever called with a url host that resolves to a non-public IP.
"""
# the following two lines entirely replace the default adapters with our custom ones
# by redefining the adapter to use for the two default prefixes
session.mount('http://', PublicOnlyHttpAdapter(domain_name=domain_name, src=src))
session.mount('https://', PublicOnlyHttpAdapter(domain_name=domain_name, src=src))
class PublicOnlyHttpAdapter(HTTPAdapter):
def __init__(self, domain_name, src):
self.domain_name = domain_name
self.src = src
super().__init__()
def get_connection(self, url, proxies=None):
from corehq.motech.requests import validate_user_input_url_for_repeaters
validate_user_input_url_for_repeaters(url, domain=self.domain_name, src=self.src)
return super().get_connection(url, proxies=proxies)
| bsd-3-clause | ca06750a5170507d0734d959d8a8b691 | 40.060606 | 100 | 0.721033 | 3.938953 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/funding/tests/test_api.py | 1 | 67859 | import json
from datetime import timedelta
from io import BytesIO
import mock
import munch
import stripe
from django.contrib.auth.models import Group
from django.urls import reverse
from django.utils.timezone import now
from moneyed import Money
from openpyxl import load_workbook
from rest_framework import status
from rest_framework.authtoken.models import Token
from bluebottle.funding.models import Donor, FundingPlatformSettings, Funding
from bluebottle.funding.tests.factories import (
FundingFactory, RewardFactory, DonorFactory,
BudgetLineFactory
)
from bluebottle.funding_flutterwave.tests.factories import (
FlutterwaveBankAccountFactory, FlutterwavePaymentFactory, FlutterwavePaymentProviderFactory
)
from bluebottle.funding_lipisha.models import LipishaPaymentProvider
from bluebottle.funding_lipisha.tests.factories import (
LipishaBankAccountFactory, LipishaPaymentFactory, LipishaPaymentProviderFactory
)
from bluebottle.funding_pledge.tests.factories import (
PledgeBankAccountFactory, PledgePaymentProviderFactory
)
from bluebottle.funding_pledge.tests.factories import PledgePaymentFactory
from bluebottle.funding_stripe.models import StripePaymentProvider
from bluebottle.funding_stripe.tests.factories import ExternalAccountFactory, StripePaymentProviderFactory, \
StripePayoutAccountFactory, StripeSourcePaymentFactory
from bluebottle.funding_vitepay.models import VitepayPaymentProvider
from bluebottle.funding_vitepay.tests.factories import (
VitepayBankAccountFactory, VitepayPaymentFactory, VitepayPaymentProviderFactory
)
from bluebottle.initiatives.models import InitiativePlatformSettings
from bluebottle.initiatives.tests.factories import InitiativeFactory
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.geo import GeolocationFactory
from bluebottle.test.utils import BluebottleTestCase, JSONAPITestClient, APITestCase
from bluebottle.segments.tests.factories import SegmentTypeFactory
class BudgetLineListTestCase(BluebottleTestCase):
def setUp(self):
super(BudgetLineListTestCase, self).setUp()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.initiative = InitiativeFactory.create()
self.initiative.states.submit()
self.initiative.states.approve(save=True)
self.funding = FundingFactory.create(
owner=self.user,
initiative=self.initiative,
)
self.create_url = reverse('funding-budget-line-list')
self.funding_url = reverse('funding-detail', args=(self.funding.pk,))
self.data = {
'data': {
'type': 'activities/budget-lines',
'attributes': {
'description': 'test',
'amount': {'amount': 100, 'currency': 'EUR'},
},
'relationships': {
'activity': {
'data': {
'type': 'activities/fundings',
'id': self.funding.pk,
}
}
}
}
}
def test_create(self):
response = self.client.post(self.create_url, data=json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
self.assertEqual(
data['data']['attributes']['description'],
self.data['data']['attributes']['description']
)
response = self.client.get(self.funding_url, user=self.user)
funding_data = json.loads(response.content)
self.assertEqual(
len(funding_data['data']['relationships']['budget-lines']['data']), 1
)
self.assertEqual(
funding_data['data']['relationships']['budget-lines']['data'][0]['id'],
data['data']['id']
)
def test_create_wrong_currency(self):
self.data['data']['attributes']['amount']['currency'] = 'USD'
response = self.client.post(
self.create_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_other_user(self):
response = self.client.post(
self.create_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_no_user(self):
response = self.client.post(
self.create_url,
data=json.dumps(self.data),
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class BudgetLineDetailTestCase(BluebottleTestCase):
def setUp(self):
super(BudgetLineDetailTestCase, self).setUp()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.initiative = InitiativeFactory.create()
self.initiative.states.submit()
self.initiative.states.approve(save=True)
self.funding = FundingFactory.create(
owner=self.user,
initiative=self.initiative
)
self.budget_line = BudgetLineFactory.create(activity=self.funding)
self.update_url = reverse('funding-budget-line-detail', args=(self.budget_line.pk,))
self.data = {
'data': {
'type': 'activities/budget-lines',
'id': self.budget_line.pk,
'attributes': {
'description': 'Some other title',
},
}
}
def test_update(self):
response = self.client.patch(
self.update_url,
data=json.dumps(self.data),
user=self.funding.owner
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.budget_line.refresh_from_db()
self.assertEqual(
self.budget_line.description,
self.data['data']['attributes']['description']
)
def test_update_anonymous(self):
response = self.client.patch(
self.update_url,
data=json.dumps(self.data)
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_other_user(self):
response = self.client.patch(
self.update_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_get_anonymous(self):
response = self.client.get(
self.update_url
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_other_user(self):
response = self.client.get(
self.update_url,
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class RewardListTestCase(BluebottleTestCase):
def setUp(self):
super(RewardListTestCase, self).setUp()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.initiative = InitiativeFactory.create()
self.initiative.states.submit()
self.initiative.states.approve(save=True)
self.funding = FundingFactory.create(
owner=self.user,
initiative=self.initiative
)
self.create_url = reverse('funding-reward-list')
self.funding_url = reverse('funding-detail', args=(self.funding.pk,))
self.data = {
'data': {
'type': 'activities/rewards',
'attributes': {
'title': 'Test title',
'description': 'Test description',
'amount': {'amount': 100, 'currency': 'EUR'},
'limit': 10,
},
'relationships': {
'activity': {
'data': {
'type': 'activities/fundings',
'id': self.funding.pk,
}
}
}
}
}
def test_create(self):
response = self.client.post(self.create_url, data=json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
self.assertEqual(
data['data']['attributes']['description'],
self.data['data']['attributes']['description']
)
self.assertEqual(
data['data']['attributes']['title'],
self.data['data']['attributes']['title']
)
response = self.client.get(self.funding_url)
funding_data = json.loads(response.content)
self.assertEqual(
len(funding_data['data']['relationships']['rewards']['data']), 1
)
self.assertEqual(
funding_data['data']['relationships']['rewards']['data'][0]['id'], str(data['data']['id'])
)
def test_create_wrong_currency(self):
self.data['data']['attributes']['amount']['currency'] = 'USD'
response = self.client.post(
self.create_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_other_user(self):
response = self.client.post(
self.create_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_no_user(self):
response = self.client.post(
self.create_url,
data=json.dumps(self.data),
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class RewardDetailTestCase(BluebottleTestCase):
def setUp(self):
super(RewardDetailTestCase, self).setUp()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.initiative = InitiativeFactory.create()
self.initiative.states.submit()
self.initiative.states.approve(save=True)
self.funding = FundingFactory.create(
owner=self.user,
initiative=self.initiative
)
self.reward = RewardFactory.create(activity=self.funding)
self.update_url = reverse('funding-reward-detail', args=(self.reward.pk,))
self.data = {
'data': {
'type': 'activities/rewards',
'id': self.reward.pk,
'attributes': {
'title': 'Some other title',
},
}
}
def test_update(self):
response = self.client.patch(
self.update_url,
data=json.dumps(self.data),
user=self.funding.owner
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.reward.refresh_from_db()
self.assertEqual(
self.reward.title,
self.data['data']['attributes']['title']
)
def test_update_anonymous(self):
response = self.client.patch(
self.update_url,
data=json.dumps(self.data)
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_other_user(self):
response = self.client.patch(
self.update_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_get_anonymous(self):
response = self.client.get(
self.update_url
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_other_user(self):
response = self.client.get(
self.update_url,
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class FundingDetailTestCase(BluebottleTestCase):
def setUp(self):
super(FundingDetailTestCase, self).setUp()
StripePaymentProvider.objects.all().delete()
StripePaymentProviderFactory.create()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.geolocation = GeolocationFactory.create(locality='Barranquilla')
self.initiative = InitiativeFactory.create(
owner=self.user,
place=self.geolocation
)
self.initiative.states.submit()
self.initiative.states.approve(save=True)
self.funding = FundingFactory.create(
initiative=self.initiative,
owner=self.user,
target=Money(5000, 'EUR'),
deadline=now() + timedelta(days=15)
)
BudgetLineFactory.create(activity=self.funding)
self.funding.bank_account = ExternalAccountFactory.create(
account_id='some-external-account-id',
status='verified'
)
self.funding.save()
self.funding.states.submit()
self.funding.states.approve(save=True)
self.funding_url = reverse('funding-detail', args=(self.funding.pk,))
self.data = {
'data': {
'id': self.funding.pk,
'type': 'activities/fundings',
'attributes': {
'title': 'New title',
}
}
}
def test_view_funding_owner(self):
initiative_settings = InitiativePlatformSettings.load()
initiative_settings.enable_participant_exports = True
initiative_settings.save()
co_financer = BlueBottleUserFactory.create(is_co_financer=True)
DonorFactory.create(
user=co_financer,
amount=Money(200, 'EUR'),
activity=self.funding,
status='succeeded')
DonorFactory.create_batch(
4,
amount=Money(200, 'EUR'),
activity=self.funding,
status='succeeded')
DonorFactory.create_batch(
2,
amount=Money(100, 'EUR'),
activity=self.funding,
status='new')
self.funding.amount_matching = Money(500, 'EUR')
self.funding.save()
response = self.client.get(self.funding_url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content)
self.assertEqual(
data['data']['attributes']['description'],
self.funding.description
)
self.assertEqual(
data['data']['attributes']['title'],
self.funding.title
)
self.assertEqual(
data['data']['attributes']['target'],
{u'currency': u'EUR', u'amount': 5000.0}
)
self.assertEqual(
data['data']['attributes']['amount-donated'],
{u'currency': u'EUR', u'amount': 1000.0}
)
self.assertEqual(
data['data']['attributes']['amount-matching'],
{u'currency': u'EUR', u'amount': 500.0}
)
self.assertEqual(
data['data']['attributes']['amount-raised'],
{u'currency': u'EUR', u'amount': 1500.0}
)
self.assertEqual(
response.json()['data']['meta']['contributor-count'],
5
)
co_financers = response.json()['data']['relationships']['co-financers']
self.assertEqual(len(co_financers), 1)
# Test that geolocation is included too
geolocation = self.included_by_type(response, 'geolocations')[0]
self.assertEqual(geolocation['attributes']['locality'], 'Barranquilla')
self.assertIsNotNone(data['data']['attributes']['supporters-export-url'])
def test_get_owner_export_disabled(self):
initiative_settings = InitiativePlatformSettings.load()
initiative_settings.enable_participant_exports = False
initiative_settings.save()
DonorFactory.create_batch(
4,
amount=Money(200, 'EUR'),
activity=self.funding,
status='succeeded')
DonorFactory.create_batch(
2,
amount=Money(100, 'EUR'),
activity=self.funding,
status='new')
response = self.client.get(self.funding_url, user=self.funding.owner)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()['data']
export_url = data['attributes']['supporters-export-url']
self.assertIsNone(export_url)
def test_get_owner_export_enabled(self):
SegmentTypeFactory.create()
initiative_settings = InitiativePlatformSettings.load()
initiative_settings.enable_participant_exports = True
initiative_settings.save()
DonorFactory.create(activity=self.funding, amount=Money(20, 'EUR'), status='new')
DonorFactory.create(activity=self.funding, user=None, amount=Money(35, 'EUR'), status='succeeded')
response = self.client.get(self.funding_url, user=self.funding.owner)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()['data']
export_url = data['attributes']['supporters-export-url']['url']
export_response = self.client.get(export_url)
sheet = load_workbook(filename=BytesIO(export_response.content)).get_active_sheet()
self.assertEqual(sheet['A1'].value, 'Email')
self.assertEqual(sheet['B1'].value, 'Name')
self.assertEqual(sheet['C1'].value, 'Date')
self.assertEqual(sheet['D1'].value, 'Amount')
self.assertEqual(sheet['D2'].value, '35.00 €')
self.assertEqual(sheet['D3'].value, None)
wrong_signature_response = self.client.get(export_url + '111')
self.assertEqual(
wrong_signature_response.status_code, 404
)
def test_get_bank_account(self):
self.funding.bank_account = ExternalAccountFactory.create(
account_id='some-external-account-id',
status='verified'
)
self.funding.save()
connect_account = stripe.Account('some-connect-id')
connect_account.update({
'country': 'NL',
'external_accounts': stripe.ListObject({
'data': [connect_account]
})
})
with mock.patch(
'stripe.Account.retrieve', return_value=connect_account
):
with mock.patch(
'stripe.ListObject.retrieve', return_value=connect_account
):
response = self.client.get(self.funding_url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
bank_account = response.json()['data']['relationships']['bank-account']['data']
self.assertEqual(
bank_account['id'], str(self.funding.bank_account.pk)
)
def test_other_user(self):
DonorFactory.create_batch(5, amount=Money(200, 'EUR'), activity=self.funding, status='succeeded')
DonorFactory.create_batch(2, amount=Money(100, 'EUR'), activity=self.funding, status='new')
self.funding.bank_account = ExternalAccountFactory.create(
account_id='some-external-account-id',
status='verified'
)
self.funding.save()
connect_account = stripe.Account('some-connect-id')
connect_account.update({
'country': 'NL',
'external_accounts': stripe.ListObject({
'data': [connect_account]
})
})
with mock.patch(
'stripe.Account.retrieve', return_value=connect_account
):
with mock.patch(
'stripe.ListObject.retrieve', return_value=connect_account
):
response = self.client.get(self.funding_url, user=BlueBottleUserFactory.create())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('bank_account' not in response.json()['data']['relationships'])
self.assertIsNone(response.json()['data']['attributes']['supporters-export-url'])
def test_update(self):
response = self.client.patch(
self.funding_url,
data=json.dumps(self.data),
user=self.user
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json()['data']['attributes']['title'],
'New title'
)
def test_recalculate_refund(self):
self.funding.status = 'succeeded'
self.funding.save()
response = self.client.get(self.funding_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(response.json()['data']['meta']['transitions']),
0
)
response = self.client.get(self.funding_url, user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(response.json()['data']['meta']['transitions']),
0
)
def test_update_bank_account(self):
external_account = ExternalAccountFactory.create(
account_id='some-external-account-id',
status='verified'
)
connect_account = stripe.Account('some-connect-id')
connect_account.update({
'country': 'NL',
'external_accounts': stripe.ListObject({
'data': [connect_account]
})
})
with mock.patch(
'stripe.Account.retrieve', return_value=connect_account
):
with mock.patch(
'stripe.ListObject.retrieve', return_value=connect_account
):
response = self.client.patch(
self.funding_url,
data=json.dumps({
'data': {
'id': self.funding.pk,
'type': 'activities/fundings',
'relationships': {
'bank_account': {
'data': {
'id': external_account.pk,
'type': 'payout-accounts/stripe-external-accounts'
}
}
}
}
}),
user=self.user
)
self.assertEqual(response.status_code, 200)
bank_account = response.json()['data']['relationships']['bank-account']['data']
self.assertEqual(
bank_account['id'], str(external_account.pk)
)
self.assertEqual(
bank_account['type'], 'payout-accounts/stripe-external-accounts'
)
def test_update_unauthenticated(self):
response = self.client.put(self.funding_url, json.dumps(self.data))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_wrong_user(self):
response = self.client.put(
self.funding_url, json.dumps(self.data), user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_deleted(self):
self.funding = FundingFactory.create()
self.funding.states.delete(save=True)
response = self.client.put(self.funding_url, json.dumps(self.data), user=self.funding.owner)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_rejected(self):
self.funding = FundingFactory.create()
self.funding.states.reject(save=True)
response = self.client.put(self.funding_url, json.dumps(self.data), user=self.funding.owner)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class FundingTestCase(BluebottleTestCase):
def setUp(self):
super(FundingTestCase, self).setUp()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.initiative = InitiativeFactory.create(owner=self.user)
self.bank_account = PledgeBankAccountFactory.create(status='verified')
self.create_url = reverse('funding-list')
self.data = {
'data': {
'type': 'activities/fundings',
'attributes': {
'title': 'test',
'description': 'Yeah',
'target': {'currency': 'EUR', 'amount': 3500},
'deadline': str(now() + timedelta(days=30))
},
'relationships': {
'initiative': {
'data': {
'type': 'initiatives',
'id': self.initiative.pk,
},
},
}
}
}
def test_create(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
data = response.json()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(
data['data']['meta']['permissions']['PATCH']
)
self.assertTrue(
self.included_by_type(response, 'geolocations')[0]
)
def test_create_other_user(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=BlueBottleUserFactory.create())
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_other_user_open(self):
self.initiative.is_open = True
self.initiative.states.submit()
self.initiative.states.approve(save=True)
response = self.client.post(
self.create_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_other_user_open_not_approved(self):
self.initiative.is_open = True
self.initiative.save()
response = self.client.post(
self.create_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_without_errors(self):
self.initiative.status = 'approved'
self.initiative.save()
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = response.json()
update_url = reverse('funding-detail', args=(data['data']['id'],))
response = self.client.put(update_url, data, user=self.user)
data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
funding = Funding.objects.last()
funding.bank_account = self.bank_account
BudgetLineFactory.create_batch(2, activity=funding)
funding.save()
response = self.client.get(update_url, data, user=self.user)
data = response.json()
self.assertEqual(
len(data['data']['meta']['errors']),
0
)
self.assertEqual(
len(data['data']['meta']['required']),
0
)
funding.states.submit(save=True)
funding.states.approve(save=True)
data['data']['attributes'] = {
'deadline': now() + timedelta(days=80),
}
response = self.client.put(update_url, data, user=self.user)
data = response.json()
self.assertEqual(
data['data']['meta']['errors'][0]['title'],
'The deadline should not be more then 60 days in the future'
)
class DonationTestCase(BluebottleTestCase):
def setUp(self):
super(DonationTestCase, self).setUp()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.initiative = InitiativeFactory.create()
self.initiative.states.submit()
self.initiative.states.approve(save=True)
self.funding = FundingFactory.create(initiative=self.initiative)
self.create_url = reverse('funding-donation-list')
self.funding_url = reverse('funding-detail', args=(self.funding.pk,))
self.data = {
'data': {
'type': 'contributors/donations',
'attributes': {
'amount': {'amount': 100, 'currency': 'EUR'},
},
'relationships': {
'activity': {
'data': {
'type': 'activities/fundings',
'id': self.funding.pk,
}
}
}
}
}
def test_create(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
self.assertEqual(data['data']['attributes']['status'], 'new')
self.assertEqual(data['data']['attributes']['amount'], {'amount': 100, 'currency': 'EUR'})
self.assertEqual(data['data']['relationships']['activity']['data']['id'], str(self.funding.pk))
self.assertEqual(data['data']['relationships']['user']['data']['id'], str(self.user.pk))
self.assertIsNone(data['data']['attributes']['client-secret'])
def test_donate(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
donation = Donor.objects.get(pk=data['data']['id'])
donation.states.succeed()
donation.save()
response = self.client.get(self.funding_url, user=self.user)
self.assertTrue(response.json()['data']['attributes']['is-follower'])
self.assertEqual(response.json()['data']['meta']['contributor-count'], 1)
def test_donate_anonymous(self):
self.data['data']['attributes']['anonymous'] = True
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
self.assertEqual(data['data']['attributes']['status'], 'new')
self.assertEqual(data['data']['attributes']['anonymous'], True)
donation = Donor.objects.get(pk=data['data']['id'])
self.assertTrue(donation.user, self.user)
donation.states.succeed()
donation.save()
response = self.client.get(self.funding_url, user=self.user)
self.assertEqual(response.json()['data']['meta']['contributor-count'], 1)
def test_update(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'attributes': {
'amount': {'amount': 200, 'currency': 'EUR'},
},
}
}
response = self.client.patch(update_url, json.dumps(patch_data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content)
self.assertEqual(data['data']['attributes']['amount'], {'amount': 200, 'currency': 'EUR'})
def test_update_set_donor_name(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'attributes': {
'amount': {'amount': 200, 'currency': 'EUR'},
'name': 'Pietje'
},
}
}
response = self.client.patch(update_url, json.dumps(patch_data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content)
self.assertEqual(data['data']['attributes']['name'], 'Pietje')
def test_update_change_user(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'relationships': {
'user': {
'data': {
'id': BlueBottleUserFactory.create().pk,
'type': 'members',
}
}
},
}
}
response = self.client.patch(update_url, json.dumps(patch_data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = json.loads(response.content)
self.assertEqual(
data['errors'][0]['detail'],
u'User can only be set, not changed.'
)
def test_update_wrong_user(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'attributes': {
'amount': {'amount': 200, 'currency': 'EUR'},
},
}
}
response = self.client.patch(update_url, json.dumps(patch_data), user=BlueBottleUserFactory.create())
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_no_token(self):
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'attributes': {
'amount': {'amount': 200, 'currency': 'EUR'},
},
}
}
response = self.client.patch(update_url, json.dumps(patch_data))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_no_user(self):
response = self.client.post(self.create_url, json.dumps(self.data))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
self.assertEqual(data['data']['attributes']['status'], 'new')
self.assertEqual(data['data']['attributes']['amount'], {'amount': 100, 'currency': 'EUR'})
self.assertEqual(len(data['data']['attributes']['client-secret']), 32)
self.assertEqual(data['data']['relationships']['activity']['data']['id'], str(self.funding.pk))
self.assertEqual(data['data']['relationships']['user']['data'], None)
def test_claim(self):
response = self.client.post(self.create_url, json.dumps(self.data))
data = response.json()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'relationships': {
'user': {
'data': {
'id': self.user.pk,
'type': 'members',
}
}
},
}
}
response = self.client.patch(
update_url,
json.dumps(patch_data),
HTTP_AUTHORIZATION='Donation {}'.format(data['data']['attributes']['client-secret'])
)
data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data['data']['attributes']['status'], 'new')
self.assertEqual(data['data']['attributes']['amount'], {'amount': 100, 'currency': 'EUR'})
self.assertEqual(data['data']['relationships']['user']['data']['id'], str(self.user.pk))
self.assertTrue('client-secret' not in data['data']['attributes'])
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'attributes': {
'amount': {'amount': 200, 'currency': 'EUR'},
},
}
}
response = self.client.patch(
update_url,
json.dumps(patch_data),
user=self.user
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_claim_authorized(self):
response = self.client.post(self.create_url, json.dumps(self.data))
data = response.json()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'relationships': {
'user': {
'data': {
'id': self.user.pk,
'type': 'members',
}
}
},
}
}
response = self.client.patch(
update_url,
json.dumps(patch_data),
user=self.user
)
data = response.json()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_no_user(self):
response = self.client.post(self.create_url, json.dumps(self.data))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'attributes': {
'amount': {'amount': 200, 'currency': 'EUR'},
},
}
}
response = self.client.patch(
update_url,
json.dumps(patch_data),
HTTP_AUTHORIZATION='Donation {}'.format(data['data']['attributes']['client-secret'])
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content)
self.assertEqual(data['data']['attributes']['amount'], {'amount': 200, 'currency': 'EUR'})
def test_update_no_user_set_user(self):
response = self.client.post(self.create_url, json.dumps(self.data))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'attributes': {
'amount': {'amount': 200, 'currency': 'EUR'},
},
'relationships': {
'user': {
'data': {
'id': self.user.pk,
'type': 'members',
}
}
}
}
}
response = self.client.patch(
update_url,
json.dumps(patch_data),
HTTP_AUTHORIZATION='Donation {}'.format(data['data']['attributes']['client-secret'])
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content)
self.assertEqual(data['data']['attributes']['amount'], {'amount': 200, 'currency': 'EUR'})
self.assertEqual(data['data']['relationships']['user']['data']['id'], str(self.user.pk))
def test_update_no_user_wrong_token(self):
response = self.client.post(self.create_url, json.dumps(self.data))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
update_url = reverse('funding-donation-detail', args=(data['data']['id'],))
patch_data = {
'data': {
'type': 'contributors/donations',
'id': data['data']['id'],
'attributes': {
'amount': {'amount': 200, 'currency': 'EUR'},
},
}
}
response = self.client.patch(
update_url,
json.dumps(patch_data),
HTTP_AUTHORIZATION='Donation wrong-token'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_reward(self):
reward = RewardFactory.create(amount=Money(100, 'EUR'), activity=self.funding)
self.data['data']['relationships']['reward'] = {
'data': {'id': reward.pk, 'type': 'activities/rewards'}
}
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
self.assertEqual(data['data']['relationships']['reward']['data']['id'], str(reward.pk))
def test_create_reward_higher_amount(self):
reward = RewardFactory.create(amount=Money(50, 'EUR'), activity=self.funding)
self.data['data']['relationships']['reward'] = {
'data': {'id': reward.pk, 'type': 'activities/rewards'}
}
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
self.assertEqual(data['data']['relationships']['reward']['data']['id'], str(reward.pk))
def test_create_reward_lower_amount(self):
reward = RewardFactory.create(amount=Money(150, 'EUR'), activity=self.funding)
self.data['data']['relationships']['reward'] = {
'data': {'id': reward.pk, 'type': 'activities/rewards'}
}
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_reward_wrong_activity(self):
reward = RewardFactory.create(amount=Money(100, 'EUR'))
self.data['data']['relationships']['reward'] = {
'data': {'id': reward.pk, 'type': 'activities/rewards'}
}
response = self.client.post(self.create_url, json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class CurrencySettingsTestCase(BluebottleTestCase):
def setUp(self):
super(CurrencySettingsTestCase, self).setUp()
self.settings_url = reverse('settings')
stripe = StripePaymentProviderFactory.create()
stripe.paymentcurrency_set.filter(code__in=['AUD', 'GBP']).all().delete()
flutterwave_provider = FlutterwavePaymentProviderFactory.create()
cur = flutterwave_provider.paymentcurrency_set.first()
cur.min_amount = 1000
cur.default1 = 1000
cur.default2 = 2000
cur.default3 = 5000
cur.default4 = 10000
cur.save()
def test_currency_settings(self):
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['platform']['currencies'], [{
'code': 'EUR',
'name': 'Euro',
'maxAmount': None,
'symbol': '€',
'minAmount': 5.00,
'defaultAmounts': [10.00, 20.00, 50.00, 100.00],
'provider': 'stripe',
'providerName': 'Stripe'
}, {
'code': 'USD',
'name': 'US Dollar',
'maxAmount': None,
'symbol': '$',
'minAmount': 5.00,
'defaultAmounts': [10.00, 20.00, 50.00, 100.00],
'provider': 'stripe',
'providerName': 'Stripe'
}, {
'code': 'NGN',
'name': 'Nigerian Naira',
'maxAmount': None,
'symbol': '₦',
'minAmount': 1000.00,
'defaultAmounts': [1000.00, 2000.00, 5000.00, 10000.00],
'provider': 'flutterwave',
'providerName': 'Flutterwave'
}, {
'code': 'KES',
'name': 'Kenyan Shilling',
'maxAmount': None,
'symbol': 'KES',
'minAmount': 5.00,
'defaultAmounts': [10.00, 20.00, 50.00, 100.00],
'provider': 'flutterwave',
'providerName': 'Flutterwave'
}, {
'code': 'USD',
'name': 'US Dollar',
'maxAmount': None,
'symbol': '$',
'minAmount': 5.00,
'defaultAmounts': [10.00, 20.00, 50.00, 100.00],
'provider': 'flutterwave',
'providerName': 'Flutterwave'
}, {
'code': 'XOF',
'name': 'West African CFA Franc',
'maxAmount': None,
'symbol': 'CFA',
'minAmount': 5.00,
'defaultAmounts': [10.00, 20.00, 50.00, 100.00],
'provider': 'flutterwave',
'providerName': 'Flutterwave'
}]
)
class PayoutAccountTestCase(BluebottleTestCase):
def setUp(self):
super(PayoutAccountTestCase, self).setUp()
StripePaymentProvider.objects.all().delete()
self.stripe = StripePaymentProviderFactory.create()
flutterwave_provider = FlutterwavePaymentProviderFactory.create()
cur = flutterwave_provider.paymentcurrency_set.first()
cur.min_amount = 1000
cur.default1 = 1000
cur.default2 = 2000
cur.default3 = 5000
cur.default4 = 10000
cur.save()
self.stripe_account = StripePayoutAccountFactory.create()
self.stripe_bank = ExternalAccountFactory.create(connect_account=self.stripe_account, status='verified')
self.funding = FundingFactory.create(
bank_account=self.stripe_bank,
target=Money(5000, 'EUR'),
status='open'
)
self.funding_url = reverse('funding-detail', args=(self.funding.id,))
self.connect_account = stripe.Account('some-connect-id')
self.connect_account.update({
'country': 'NL',
'external_accounts': stripe.ListObject({
'data': [self.connect_account]
})
})
def test_stripe_methods(self):
self.stripe.paymentcurrency_set.filter(code__in=['AUD', 'GBP']).all().delete()
with mock.patch(
'stripe.Account.retrieve', return_value=self.connect_account
):
with mock.patch(
'stripe.ListObject.retrieve', return_value=self.connect_account
):
response = self.client.get(self.funding_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
included = json.loads(response.content)['included']
payment_methods = [method['attributes'] for method in included if method['type'] == u'payments/payment-methods']
self.assertEqual(
payment_methods,
[
{
u'code': u'bancontact',
u'name': u'Bancontact',
u'provider': u'stripe',
u'currencies': [u'EUR'],
u'countries': [u'BE']
},
{
u'code': u'credit-card',
u'name': u'Credit card',
u'provider': u'stripe',
u'currencies': [u'EUR', u'USD'],
u'countries': []
},
{
u'code': u'direct-debit',
u'name': u'Direct debit',
u'provider': u'stripe',
u'currencies': [u'EUR'],
u'countries': []
},
{
u'code': u'ideal',
u'name': u'iDEAL',
u'provider': u'stripe',
u'currencies': [u'EUR'],
u'countries': [u'NL']
}
]
)
def test_stripe_just_credit_card(self):
self.stripe.ideal = False
self.stripe.direct_debit = False
self.stripe.bancontact = False
self.stripe.save()
with mock.patch(
'stripe.Account.retrieve', return_value=self.connect_account
):
with mock.patch(
'stripe.ListObject.retrieve', return_value=self.connect_account
):
response = self.client.get(self.funding_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
included = json.loads(response.content)['included']
payment_methods = [method['attributes'] for method in included if method['type'] == u'payments/payment-methods']
self.assertEqual(
payment_methods,
[
{
u'code': u'credit-card',
u'name': u'Credit card',
u'currencies': [u'USD', u'EUR', u'GBP', u'AUD'],
u'provider': u'stripe',
u'countries': []
}
]
)
class PayoutDetailTestCase(BluebottleTestCase):
def setUp(self):
super(PayoutDetailTestCase, self).setUp()
StripePaymentProvider.objects.all().delete()
StripePaymentProviderFactory.create()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.token = Token.objects.create(user=self.user)
self.user.groups.add(Group.objects.get(name='Financial'))
self.geolocation = GeolocationFactory.create(locality='Barranquilla')
self.initiative = InitiativeFactory.create(
place=self.geolocation
)
self.initiative.states.submit()
self.initiative.states.approve(save=True)
self.funding = FundingFactory.create(
initiative=self.initiative,
target=Money(1000, 'EUR'),
deadline=now() + timedelta(days=15)
)
BudgetLineFactory.create(activity=self.funding)
def get_payout_url(self, payout):
return reverse('payout-details', args=(payout.pk,))
def test_get_stripe_payout(self):
self.funding.bank_account = ExternalAccountFactory.create(
account_id='some-external-account-id',
status='verified'
)
self.funding.save()
with mock.patch(
'bluebottle.funding_stripe.models.ExternalAccount.verified', new_callable=mock.PropertyMock
) as verified:
verified.return_value = True
self.funding.states.submit()
self.funding.states.approve()
for i in range(5):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding, status='succeeded',
payment=PledgePaymentFactory.create()
)
PledgePaymentFactory.create(donation=donation)
for i in range(5):
donation = DonorFactory.create(
amount=Money(300, 'USD'),
payout_amount=Money(200, 'EUR'),
activity=self.funding, status='succeeded',
)
with mock.patch('stripe.Source.modify'):
StripeSourcePaymentFactory.create(donation=donation)
for i in range(2):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding,
status='new',
)
with mock.patch('stripe.Source.modify'):
StripeSourcePaymentFactory.create(donation=donation)
donation.states.fail()
donation.save()
self.funding.states.succeed()
self.funding.save()
with mock.patch(
'bluebottle.funding_stripe.models.ExternalAccount.account', new_callable=mock.PropertyMock
) as account:
external_account = stripe.BankAccount('some-bank-token')
external_account.update(munch.munchify({
'object': 'bank_account',
'account_holder_name': 'Jane Austen',
'account_holder_type': 'individual',
'bank_name': 'STRIPE TEST BANK',
'country': 'US',
'currency': 'usd',
'fingerprint': '1JWtPxqbdX5Gamtc',
'last4': '6789',
'metadata': {
'order_id': '6735'
},
'routing_number': '110000000',
'status': 'new',
'account': 'acct_1032D82eZvKYlo2C'
}))
account.return_value = external_account
response = self.client.get(
self.get_payout_url(self.funding.payouts.first()),
HTTP_AUTHORIZATION='Token {}'.format(self.token.key)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()
self.assertEqual(data['data']['id'], str(self.funding.payouts.first().pk))
self.assertEqual(len(data['data']['relationships']['donations']['data']), 5)
self.assertEqual(
sum(
donation['attributes']['amount']['amount']
for donation in data['included']
if donation['type'] == 'contributors/donations'
),
1000.0
)
def test_get_vitepay_payout(self):
VitepayPaymentProvider.objects.all().delete()
VitepayPaymentProviderFactory.create()
self.funding.bank_account = VitepayBankAccountFactory.create(
account_name='Test Tester',
mobile_number='12345',
status='verified'
)
self.funding.states.submit()
self.funding.states.approve(save=True)
for i in range(5):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding, status='succeeded',
)
VitepayPaymentFactory.create(donation=donation)
for i in range(2):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding,
status='new',
)
VitepayPaymentFactory.create(donation=donation)
donation.states.fail()
donation.save()
self.funding.states.succeed()
self.funding.save()
response = self.client.get(
self.get_payout_url(self.funding.payouts.first()),
HTTP_AUTHORIZATION='Token {}'.format(self.token.key)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()
self.assertEqual(data['data']['id'], str(self.funding.payouts.first().pk))
self.assertEqual(len(data['data']['relationships']['donations']['data']), 5)
def test_get_lipisha_payout(self):
LipishaPaymentProvider.objects.all().delete()
LipishaPaymentProviderFactory.create()
self.funding.bank_account = LipishaBankAccountFactory.create(
status='verified'
)
self.funding.states.submit()
self.funding.states.approve(save=True)
for i in range(5):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding, status='succeeded',
)
LipishaPaymentFactory.create(donation=donation)
for i in range(2):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding,
status='new',
)
LipishaPaymentFactory.create(donation=donation)
donation.states.fail()
donation.save()
self.funding.states.succeed()
self.funding.save()
response = self.client.get(
self.get_payout_url(self.funding.payouts.first()),
HTTP_AUTHORIZATION='Token {}'.format(self.token.key)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()
self.assertEqual(data['data']['id'], str(self.funding.payouts.first().pk))
self.assertEqual(len(data['data']['relationships']['donations']['data']), 5)
def test_get_flutterwave_payout(self):
FlutterwavePaymentProviderFactory.create()
self.funding.bank_account = FlutterwaveBankAccountFactory.create(
status='verified'
)
self.funding.states.submit()
self.funding.states.approve(save=True)
for i in range(5):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding, status='succeeded',
)
FlutterwavePaymentFactory.create(donation=donation)
for i in range(2):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding,
status='new',
)
FlutterwavePaymentFactory.create(donation=donation)
donation.states.fail()
donation.save()
self.funding.states.succeed()
self.funding.save()
response = self.client.get(
self.get_payout_url(self.funding.payouts.first()),
HTTP_AUTHORIZATION='Token {}'.format(self.token.key)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()
self.assertEqual(data['data']['id'], str(self.funding.payouts.first().pk))
self.assertEqual(len(data['data']['relationships']['donations']['data']), 5)
def test_get_pledge_payout(self):
PledgePaymentProviderFactory.create()
self.funding.bank_account = PledgeBankAccountFactory.create(
status='verified'
)
self.funding.states.submit()
self.funding.states.approve(save=True)
for i in range(5):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding, status='succeeded',
)
PledgePaymentFactory.create(donation=donation)
for i in range(2):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding,
status='new',
)
PledgePaymentFactory.create(donation=donation)
donation.states.fail()
donation.save()
self.funding.states.succeed()
self.funding.save()
response = self.client.get(
self.get_payout_url(self.funding.payouts.first()),
HTTP_AUTHORIZATION='Token {}'.format(self.token.key)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()
self.assertEqual(data['data']['id'], str(self.funding.payouts.first().pk))
self.assertEqual(len(data['data']['relationships']['donations']['data']), 5)
def test_put(self):
PledgePaymentProviderFactory.create()
self.funding.bank_account = PledgeBankAccountFactory.create(
status='verified'
)
BudgetLineFactory.create(activity=self.funding)
self.funding.states.submit()
self.funding.states.approve(save=True)
for i in range(5):
donation = DonorFactory.create(
amount=Money(200, 'EUR'),
activity=self.funding, status='succeeded',
)
PledgePaymentFactory.create(donation=donation)
self.funding.states.succeed()
self.funding.save()
payout = self.funding.payouts.first()
response = self.client.put(
self.get_payout_url(payout),
data=json.dumps({
'data': {
'id': payout.pk,
'type': 'funding/payouts',
'attributes': {
'status': 'scheduled'
}
}
}),
HTTP_AUTHORIZATION='Token {}'.format(self.token.key)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
payout.refresh_from_db()
self.assertEqual(payout.status, 'scheduled')
class FundingAPIPermissionsTestCase(BluebottleTestCase):
def setUp(self):
super(FundingAPIPermissionsTestCase, self).setUp()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory.create()
def assertPostNotAllowed(self, url, user=None):
data = self.client.get(url, user=user)
response = self.client.patch(url, data.json(), user=user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_funding_detail(self):
funding = FundingFactory.create()
url = reverse('funding-detail', args=(funding.id,))
self.assertPostNotAllowed(url, self.user)
def test_funding_budgetline_list(self):
BudgetLineFactory.create()
url = reverse('funding-budget-line-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.get(url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_funding_budgetline_detail(self):
budget_line = BudgetLineFactory.create()
url = reverse('funding-budget-line-detail', args=(budget_line.id,))
self.assertPostNotAllowed(url, self.user)
def test_funding_reward_list(self):
url = reverse('funding-reward-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.get(url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_funding_reward_detail(self):
reward = RewardFactory.create()
url = reverse('funding-reward-detail', args=(reward.id,))
self.assertPostNotAllowed(url, self.user)
def test_donation_list(self):
DonorFactory.create(status='succeeded')
url = reverse('funding-donation-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.get(url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class FundingAPITestCase(APITestCase):
def setUp(self):
super().setUp()
owner = BlueBottleUserFactory.create(
is_co_financer=True
)
self.initiative = InitiativeFactory.create(status='approved')
payout_account = StripePayoutAccountFactory.create(status='verified')
bank_account = ExternalAccountFactory.create(connect_account=payout_account, status='verified')
self.activity = FundingFactory.create(
owner=owner,
initiative=self.initiative,
target=Money(500, 'EUR'),
deadline=now() + timedelta(weeks=2),
bank_account=bank_account
)
BudgetLineFactory.create(activity=self.activity)
self.activity.bank_account.reviewed = True
self.activity.states.submit()
self.activity.states.approve(save=True)
self.donors = DonorFactory.create_batch(
5, activity=self.activity
)
self.url = reverse('funding-detail', args=(self.activity.pk, ))
def test_get_owner(self):
self.perform_get(user=self.activity.owner)
self.assertStatus(status.HTTP_200_OK)
class FundingPlatformSettingsAPITestCase(APITestCase):
def setUp(self):
super(FundingPlatformSettingsAPITestCase, self).setUp()
self.user = BlueBottleUserFactory.create()
def test_anonymous_donations_setting(self):
funding_settings = FundingPlatformSettings.load()
funding_settings.anonymous_donations = True
funding_settings.allow_anonymous_rewards = True
funding_settings.save()
response = self.client.get('/api/config', user=self.user)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEquals(
data['platform']['funding'],
{
'anonymous_donations': True,
'allow_anonymous_rewards': True
}
)
class FundingAnonymousDonationsTestCase(APITestCase):
def setUp(self):
super(FundingAnonymousDonationsTestCase, self).setUp()
self.user = BlueBottleUserFactory.create()
donation = DonorFactory.create(
user=BlueBottleUserFactory.create(),
status='succeeded'
)
self.url = reverse('funding-donation-detail', args=(donation.id,))
def test_donation(self):
self.perform_get()
self.assertTrue('user' in self.response.json()['data']['relationships'])
self.assertRelationship('user', self.user)
def test_anonymous_donation(self):
funding_settings = FundingPlatformSettings.load()
funding_settings.anonymous_donations = True
funding_settings.save()
self.perform_get()
self.assertFalse('user' in self.response.json()['data']['relationships'])
| bsd-3-clause | cd391a0fac53ed28d3f36d3cc97a38d7 | 35.207577 | 120 | 0.565531 | 4.075011 | false | true | false | false |
onepercentclub/bluebottle | bluebottle/cms/migrations/0034_auto_20171017_1549.py | 1 | 2175 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-17 13:49
from __future__ import unicode_literals
import bluebottle.utils.fields
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('cms', '0033_auto_20171017_1353'),
]
operations = [
migrations.AlterUniqueTogether(
name='slidetranslation',
unique_together=set([]),
),
migrations.RemoveField(
model_name='slidetranslation',
name='master',
),
migrations.AddField(
model_name='slide',
name='background_image',
field=bluebottle.utils.fields.ImageField(blank=True, max_length=255, null=True, upload_to=b'banner_slides/', verbose_name='Background image'),
),
migrations.AddField(
model_name='slide',
name='image',
field=bluebottle.utils.fields.ImageField(blank=True, max_length=255, null=True, upload_to=b'banner_slides/', verbose_name='Image'),
),
migrations.AddField(
model_name='slide',
name='link_text',
field=models.CharField(blank=True, help_text='This is the text on the button inside the banner.', max_length=400, verbose_name='Link text'),
),
migrations.AddField(
model_name='slide',
name='link_url',
field=models.CharField(blank=True, help_text='This is the link for the button inside the banner.', max_length=400, verbose_name='Link url'),
),
migrations.AddField(
model_name='slide',
name='tab_text',
field=models.CharField(default='', help_text='This is shown on tabs beneath the banner.', max_length=100, verbose_name='Tab text'),
preserve_default=False,
),
migrations.AddField(
model_name='slide',
name='video_url',
field=models.URLField(blank=True, default=b'', max_length=100, verbose_name='Video url'),
),
migrations.DeleteModel(
name='SlideTranslation',
),
]
| bsd-3-clause | c77f8ac4b0540c3d45b7f67cc959f77b | 35.864407 | 154 | 0.590345 | 4.182692 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/userreports/indicators/factory.py | 1 | 6076 | from django.utils.translation import gettext as _
from jsonobject.exceptions import BadValueError
from corehq.apps.userreports.exceptions import BadSpecError
from corehq.apps.userreports.expressions.factory import ExpressionFactory
from corehq.apps.userreports.filters import (
CustomFilter,
SinglePropertyValueFilter,
)
from corehq.apps.userreports.filters.factory import FilterFactory
from corehq.apps.userreports.indicators import (
BooleanIndicator,
Column,
CompoundIndicator,
DueListDateIndicator,
LedgerBalancesIndicator,
RawIndicator,
SmallBooleanIndicator,
)
from corehq.apps.userreports.indicators.specs import (
BooleanIndicatorSpec,
ChoiceListIndicatorSpec,
DueListDateIndicatorSpec,
ExpressionIndicatorSpec,
IndicatorSpecBase,
LedgerBalancesIndicatorSpec,
RawIndicatorSpec,
SmallBooleanIndicatorSpec,
)
def _build_count_indicator(spec, factory_context):
wrapped = IndicatorSpecBase.wrap(spec)
return BooleanIndicator(
wrapped.display_name,
wrapped.column_id,
CustomFilter(lambda item, evaluation_context=None: True),
wrapped,
)
def _build_raw_indicator(spec, factory_context):
wrapped = RawIndicatorSpec.wrap(spec)
column = Column(
id=wrapped.column_id,
datatype=wrapped.datatype,
is_nullable=wrapped.is_nullable,
is_primary_key=wrapped.is_primary_key,
create_index=wrapped.create_index,
)
return RawIndicator(
wrapped.display_name,
column,
getter=wrapped.getter,
wrapped_spec=wrapped,
)
def _build_expression_indicator(spec, factory_context):
wrapped = ExpressionIndicatorSpec.wrap(spec)
column = Column(
id=wrapped.column_id,
datatype=wrapped.datatype,
is_nullable=wrapped.is_nullable,
is_primary_key=wrapped.is_primary_key,
create_index=wrapped.create_index,
)
return RawIndicator(
wrapped.display_name,
column,
getter=wrapped.parsed_expression(factory_context),
wrapped_spec=wrapped,
)
def _build_small_boolean_indicator(spec, factory_context):
wrapped = SmallBooleanIndicatorSpec.wrap(spec)
return SmallBooleanIndicator(
wrapped.display_name,
wrapped.column_id,
FilterFactory.from_spec(wrapped.filter, factory_context),
wrapped_spec=wrapped,
)
def _build_boolean_indicator(spec, factory_context):
wrapped = BooleanIndicatorSpec.wrap(spec)
return BooleanIndicator(
wrapped.display_name,
wrapped.column_id,
FilterFactory.from_spec(wrapped.filter, factory_context),
wrapped_spec=wrapped,
)
def _build_choice_list_indicator(spec, factory_context):
wrapped_spec = ChoiceListIndicatorSpec.wrap(spec)
base_display_name = wrapped_spec.display_name
def _construct_display(choice):
return '{base} ({choice})'.format(base=base_display_name, choice=choice)
def _construct_column(choice):
return '{col}_{choice}'.format(col=spec['column_id'], choice=choice)
choice_indicators = [
BooleanIndicator(
display_name=_construct_display(choice),
column_id=_construct_column(choice),
filter=SinglePropertyValueFilter(
expression=wrapped_spec.getter,
operator=wrapped_spec.get_operator(),
reference_expression=ExpressionFactory.from_spec(choice),
),
wrapped_spec=None,
) for choice in spec['choices']
]
return CompoundIndicator(base_display_name, choice_indicators, wrapped_spec)
def _build_ledger_balances_indicator(spec, factory_context):
wrapped_spec = LedgerBalancesIndicatorSpec.wrap(spec)
return LedgerBalancesIndicator(wrapped_spec)
def _build_due_list_date_indicator(spec, factory_context):
wrapped_spec = DueListDateIndicatorSpec.wrap(spec)
return DueListDateIndicator(wrapped_spec)
def _build_repeat_iteration_indicator(spec, factory_context):
return RawIndicator(
"base document iteration",
Column(
id="repeat_iteration",
datatype="integer",
is_nullable=False,
is_primary_key=True,
),
getter=lambda doc, ctx: ctx.iteration,
wrapped_spec=None,
)
def _build_inserted_at(spec, factory_context):
return RawIndicator(
"inserted at",
Column(
id="inserted_at",
datatype="datetime",
is_nullable=False,
is_primary_key=False,
),
getter=lambda doc, ctx: ctx.inserted_timestamp,
wrapped_spec=None,
)
class IndicatorFactory(object):
constructor_map = {
'small_boolean': _build_small_boolean_indicator,
'boolean': _build_boolean_indicator,
'choice_list': _build_choice_list_indicator,
'due_list_date': _build_due_list_date_indicator,
'count': _build_count_indicator,
'expression': _build_expression_indicator,
'inserted_at': _build_inserted_at,
'ledger_balances': _build_ledger_balances_indicator,
'raw': _build_raw_indicator,
'repeat_iteration': _build_repeat_iteration_indicator,
}
@classmethod
def from_spec(cls, spec, factory_context=None):
cls.validate_spec(spec)
try:
return cls.constructor_map[spec['type']](spec, factory_context)
except BadValueError as e:
# for now reraise jsonobject exceptions as BadSpecErrors
raise BadSpecError(str(e))
@classmethod
def validate_spec(self, spec):
if 'type' not in spec:
raise BadSpecError(_('Indicator specification must include a root level type field.'))
elif spec['type'] not in self.constructor_map:
raise BadSpecError(
_('Illegal indicator type: "{0}", must be one of the following choice: ({1})'.format(
spec['type'],
', '.join(self.constructor_map)
))
)
| bsd-3-clause | 38112e7bd19fcddabe421083b8b7cab0 | 30.481865 | 101 | 0.658986 | 4.086079 | false | false | false | false |
dimagi/commcare-hq | corehq/pillows/mappings/xform_mapping.py | 1 | 7616 | from corehq.apps.es.forms import form_adapter
from corehq.pillows.core import DATE_FORMATS_STRING, DATE_FORMATS_ARR
from corehq.pillows.mappings.const import NULL_VALUE
from corehq.util.elastic import prefix_for_tests
from pillowtop.es_utils import ElasticsearchIndexInfo, XFORM_HQ_INDEX_NAME
XFORM_INDEX = form_adapter.index_name
XFORM_ES_TYPE = form_adapter.type
XFORM_ALIAS = prefix_for_tests("xforms")
XFORM_MAPPING = {
"_meta": {
"created": "2013-08-13"
},
"date_detection": False,
"date_formats": DATE_FORMATS_ARR, # for parsing the explicitly defined dates
"dynamic": False,
"properties": {
"#export_tag": {
"index": "not_analyzed",
"type": "string"
},
"@uiVersion": {
"type": "string"
},
"@version": {
"type": "string"
},
"__retrieved_case_ids": {
"index": "not_analyzed",
"type": "string"
},
"_attachments": {
"dynamic": False,
"type": "object"
},
"app_id": {
"index": "not_analyzed",
"type": "string"
},
"backend_id": {
"index": "not_analyzed",
"type": "string"
},
"build_id": {
"index": "not_analyzed",
"type": "string"
},
"doc_type": {
"type": "string"
},
"domain": {
"fields": {
"domain": {
"index": "analyzed",
"type": "string"
},
"exact": {
# exact is full text string match - hyphens get parsed in standard
# analyzer
# in queries you can access by domain.exact
"index": "not_analyzed",
"type": "string"
}
},
"type": "multi_field"
},
"external_blobs": {
"dynamic": False,
"type": "object"
},
"form": {
"dynamic": False,
"properties": {
"#type": {
"index": "not_analyzed",
"type": "string"
},
"@name": {
"index": "not_analyzed",
"type": "string"
},
"case": {
"dynamic": False,
"properties": {
# Note, the case_id method here assumes single case
# properties within a form.
# In order to support multi case properties, a dynamic
# template needs to be added along with fundamentally
# altering case queries
"@case_id": {
"index": "not_analyzed",
"type": "string"
},
"@date_modified": {
"format": DATE_FORMATS_STRING,
"type": "date"
},
"@user_id": {
"index": "not_analyzed",
"type": "string"
},
"@xmlns": {
"index": "not_analyzed",
"type": "string"
},
"case_id": {
"index": "not_analyzed",
"type": "string"
},
"date_modified": {
"format": DATE_FORMATS_STRING,
"type": "date"
},
"user_id": {
"index": "not_analyzed",
"type": "string"
},
"xmlns": {
"index": "not_analyzed",
"type": "string"
}
}
},
"meta": {
"dynamic": False,
"properties": {
"appVersion": {
"index": "not_analyzed",
"type": "string"
},
"app_build_version": {
"index": "not_analyzed",
"type": "string"
},
"commcare_version": {
"index": "not_analyzed",
"type": "string"
},
"deviceID": {
"index": "not_analyzed",
"type": "string"
},
"geo_point": {
"geohash": True,
"geohash_precision": "10m",
"geohash_prefix": True,
"lat_lon": True,
"type": "geo_point"
},
"instanceID": {
"index": "not_analyzed",
"type": "string"
},
"timeEnd": {
"format": DATE_FORMATS_STRING,
"type": "date"
},
"timeStart": {
"format": DATE_FORMATS_STRING,
"type": "date"
},
"userID": {
"index": "not_analyzed",
"null_value": NULL_VALUE,
"type": "string"
},
"username": {
"index": "not_analyzed",
"type": "string"
}
}
}
}
},
"initial_processing_complete": {
"type": "boolean"
},
"inserted_at": {
"format": DATE_FORMATS_STRING,
"type": "date"
},
"partial_submission": {
"type": "boolean"
},
"path": {
"index": "not_analyzed",
"type": "string"
},
"received_on": {
"format": DATE_FORMATS_STRING,
"type": "date"
},
"server_modified_on": {
"format": DATE_FORMATS_STRING,
"type": "date"
},
"submit_ip": {
"type": "ip"
},
"user_type": {
"index": "not_analyzed",
"null_value": NULL_VALUE,
"type": "string"
},
"xmlns": {
"fields": {
"exact": {
"index": "not_analyzed",
"type": "string"
},
"xmlns": {
"index": "analyzed",
"type": "string"
}
},
"type": "multi_field"
}
}
}
if form_adapter.settings.get("DISABLE_ALL"):
XFORM_MAPPING["_all"] = {"enabled": False}
XFORM_INDEX_INFO = ElasticsearchIndexInfo(
index=XFORM_INDEX,
alias=XFORM_ALIAS,
type=XFORM_ES_TYPE,
mapping=XFORM_MAPPING,
hq_index_name=XFORM_HQ_INDEX_NAME,
)
| bsd-3-clause | 7bf657bccf8ad4ad943239a9b3f3cf89 | 31.969697 | 86 | 0.329438 | 4.977778 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/hqcase/api/get_bulk.py | 1 | 2768 | import dataclasses
from dataclasses import dataclass, field
from corehq.apps.es.case_search import CaseSearchES, case_search_adapter
from corehq.apps.hqcase.api.core import UserError, serialize_es_case
from corehq.apps.hqcase.api.get_list import MAX_PAGE_SIZE
@dataclass
class BulkFetchResults:
cases: list = field(default_factory=list)
matching_records: int = 0
missing_records: int = 0
def merge(self, results):
self.cases.extend(results.cases)
self.matching_records += results.matching_records
self.missing_records += results.missing_records
def get_bulk(domain, case_ids=None, external_ids=None):
"""Get cases in bulk.
This must return a result for each case ID passed in and the results must
be in the same order as the original list of case IDs.
If both case IDs and external IDs are passed then results will include
cases loaded by ID first followed by cases loaded by external ID.
If a case is not found or belongs to a different domain then
an error stub is included in the result set.
"""
case_ids = case_ids or []
external_ids = external_ids or []
if len(case_ids) + len(external_ids) > MAX_PAGE_SIZE:
raise UserError(f"You cannot request more than {MAX_PAGE_SIZE} cases per request.")
results = BulkFetchResults()
if case_ids:
results.merge(_get_cases_by_id(domain, case_ids))
if external_ids:
results.merge(_get_cases_by_external_id(domain, external_ids))
return dataclasses.asdict(results)
def _get_cases_by_id(domain, case_ids):
es_results = case_search_adapter.get_docs(case_ids)
return _prepare_result(
domain, es_results, case_ids,
es_id_field='_id', serialized_id_field='case_id'
)
def _get_cases_by_external_id(domain, external_ids):
query = CaseSearchES().domain(domain).external_id(external_ids)
es_results = query.run().hits
return _prepare_result(
domain, es_results, external_ids,
es_id_field='external_id', serialized_id_field='external_id'
)
def _prepare_result(domain, es_results, doc_ids, es_id_field, serialized_id_field):
def _get_error_doc(id_value):
return {serialized_id_field: id_value, 'error': 'not found'}
def _get_doc(doc_id):
doc = results_by_id.get(doc_id)
if doc:
return serialize_es_case(doc)
missing_ids.append(doc_id)
return _get_error_doc(doc_id)
missing_ids = []
results_by_id = {res[es_id_field]: res for res in es_results if res['domain'] == domain}
final_results = [_get_doc(doc_id) for doc_id in doc_ids]
total = len(doc_ids)
not_found = len(missing_ids)
return BulkFetchResults(final_results, total - not_found, not_found)
| bsd-3-clause | 0373d962fcd9326759fbd779165b146b | 31.186047 | 92 | 0.677384 | 3.447073 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/users/bulk_download.py | 1 | 17577 | import uuid
from django.conf import settings
from django.utils.translation import gettext
from corehq.apps.enterprise.models import EnterpriseMobileWorkerSettings
from couchexport.writers import Excel2007ExportWriter
from soil import DownloadBase
from soil.util import expose_download, get_download_file_path
from corehq import privileges
from corehq import toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.custom_data_fields.models import (
PROFILE_SLUG,
CustomDataFieldsDefinition,
CustomDataFieldsProfile,
)
from corehq.apps.groups.models import Group
from corehq.apps.locations.models import SQLLocation
from corehq.apps.user_importer.importer import BulkCacheBase, GroupMemoizer
from corehq.apps.users.dbaccessors import (
count_invitations_by_filters,
count_mobile_users_by_filters,
count_web_users_by_filters,
get_invitations_by_filters,
get_mobile_users_by_filters,
get_mobile_usernames_by_filters,
get_web_users_by_filters,
)
from corehq.apps.users.models import UserRole, DeactivateMobileWorkerTrigger
from corehq.util.workbook_json.excel import (
alphanumeric_sort_key,
flatten_json,
json_to_headers,
)
from couchdbkit import ResourceNotFound
class LocationIdToSiteCodeCache(BulkCacheBase):
def lookup(self, location_id):
return SQLLocation.objects.get(
domain=self.domain, # this is only for safety
location_id=location_id
).site_code
def build_data_headers(keys, header_prefix='data'):
return json_to_headers(
{header_prefix: {key: None for key in keys}}
)
def get_devices(user):
"""
Returns a comma-separated list of IMEI numbers of the user's devices, sorted with most-recently-used first
"""
return ', '.join([device.device_id for device in sorted(
user.devices, key=lambda d: d.last_used, reverse=True
)])
def get_location_codes(location_cache, loc_id, assigned_loc_ids):
location_codes = []
try:
location_codes.append(location_cache.get(loc_id))
except SQLLocation.DoesNotExist:
pass
for location_id in assigned_loc_ids:
# skip if primary location_id, as it is already added to the start of list above
if location_id != loc_id:
try:
location_codes.append(location_cache.get(location_id))
except SQLLocation.DoesNotExist:
pass
return location_codes
def get_phone_numbers(user_data):
phone_numbers_dict = {}
if user_data.phone_number:
phone_numbers_dict.update({
'phone-number 1': user_data.phone_number
})
user_data.phone_numbers.remove(user_data.phone_number)
for n, mobile_number in enumerate(user_data.phone_numbers):
# Add 2 to n, so number index will start at 2
# since phone-number-1 is reserved for primary number
phone_numbers_dict.update({f'phone-number {n + 2}': mobile_number})
return phone_numbers_dict
def make_mobile_user_dict(user, group_names, location_cache, domain, fields_definition, deactivation_triggers):
model_data = {}
uncategorized_data = {}
model_data, uncategorized_data = (
fields_definition.get_model_and_uncategorized(user.metadata)
)
role = user.get_role(domain)
profile = None
if PROFILE_SLUG in user.metadata and domain_has_privilege(domain, privileges.APP_USER_PROFILES):
try:
profile = CustomDataFieldsProfile.objects.get(id=user.metadata[PROFILE_SLUG])
except CustomDataFieldsProfile.DoesNotExist:
profile = None
activity = user.reporting_metadata
location_codes = get_location_codes(location_cache, user.location_id, user.assigned_location_ids)
def _format_date(date):
return date.strftime('%Y-%m-%d %H:%M:%S') if date else ''
user_dict = {
'data': model_data,
'uncategorized_data': uncategorized_data,
'group': group_names,
'name': user.full_name,
'password': "********", # dummy display string for passwords
'email': user.email,
'username': user.raw_username,
'language': user.language,
'user_id': user._id,
'is_active': str(user.is_active),
'User IMEIs (read only)': get_devices(user),
'location_code': location_codes,
'role': role.name if role else '',
'domain': domain,
'user_profile': profile.name if profile else '',
'registered_on (read only)': _format_date(user.created_on),
'last_submission (read only)': _format_date(activity.last_submission_for_user.submission_date),
'last_sync (read only)': activity.last_sync_for_user.sync_date,
'deactivate_after': deactivation_triggers.get(user._id, ''),
}
user_dict.update(get_phone_numbers(user))
return user_dict
def get_user_role_name(domain_membership):
if domain_membership.is_admin:
return gettext('Admin')
else:
role_name = ''
if domain_membership.role_id:
try:
role_name = UserRole.objects.by_couch_id(domain_membership.role_id).name
except UserRole.DoesNotExist:
role_name = gettext('Unknown Role')
return role_name
def make_web_user_dict(user, location_cache, domain):
domain_membership = user.get_domain_membership(domain)
role_name = get_user_role_name(domain_membership)
location_codes = get_location_codes(location_cache, domain_membership.location_id,
domain_membership.assigned_location_ids)
return {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'role': role_name,
'location_code': location_codes,
'status': gettext('Active User'),
'last_access_date (read only)': domain_membership.last_accessed,
'last_login (read only)': user.last_login,
'remove': '',
'domain': domain,
}
def make_invited_web_user_dict(invite, location_cache):
location_codes = []
try:
location_codes.append(location_cache.get(invite.supply_point))
except SQLLocation.DoesNotExist:
pass
return {
'username': invite.email,
'first_name': 'N/A',
'last_name': 'N/A',
'email': invite.email,
'role': invite.get_role_name(),
'location_code': location_codes,
'status': gettext('Invited'),
'last_access_date (read only)': 'N/A',
'last_login (read only)': 'N/A',
'remove': '',
'domain': invite.domain,
}
def get_user_rows(user_dicts, user_headers):
for user_dict in user_dicts:
row = dict(flatten_json(user_dict))
yield [row.get(header, '') for header in user_headers]
def parse_mobile_users(domain, user_filters, task=None, total_count=None):
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
fields_definition = CustomDataFieldsDefinition.get_or_create(
domain,
UserFieldsView.field_type
)
unrecognized_user_data_keys = set()
user_groups_length = 0
max_location_length = 0
phone_numbers_length = 0
user_dicts = []
(is_cross_domain, domains_list) = get_domains_from_user_filters(domain, user_filters)
current_user_downloaded_count = 0
for current_domain in domains_list:
location_cache = LocationIdToSiteCodeCache(current_domain)
if EnterpriseMobileWorkerSettings.is_domain_using_custom_deactivation(domain):
deactivation_triggers = {
f.user_id: f.deactivate_after.strftime('%m-%Y')
for f in DeactivateMobileWorkerTrigger.objects.filter(domain=domain)
}
else:
deactivation_triggers = {}
for n, user in enumerate(get_mobile_users_by_filters(current_domain, user_filters)):
group_memoizer = load_memoizer(current_domain)
group_names = sorted([
group_memoizer.get(id).name for id in Group.by_user_id(user.user_id, wrap=False)
], key=alphanumeric_sort_key)
user_dict = make_mobile_user_dict(
user,
group_names,
location_cache,
current_domain,
fields_definition,
deactivation_triggers,
)
user_dicts.append(user_dict)
unrecognized_user_data_keys.update(user_dict['uncategorized_data'])
user_groups_length = max(user_groups_length, len(group_names))
max_location_length = max(max_location_length, len(user_dict["location_code"]))
user_phone_numbers = [k for k in user_dict.keys() if 'phone-number' in k]
phone_numbers_length = max(phone_numbers_length, len(user_phone_numbers))
current_user_downloaded_count += 1
DownloadBase.set_progress(task, current_user_downloaded_count, total_count)
user_headers = [
'username', 'password', 'name', 'email', 'language', 'role',
'user_id', 'is_active', 'User IMEIs (read only)', 'registered_on (read only)',
'last_submission (read only)', 'last_sync (read only)',
]
user_headers.extend(json_to_headers(
{'phone-number': list(range(1, phone_numbers_length + 1))}
))
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_headers += ['user_profile']
if EnterpriseMobileWorkerSettings.is_domain_using_custom_deactivation(domain):
user_headers += ['deactivate_after']
user_data_fields = [f.slug for f in fields_definition.get_fields(include_system=False)]
user_headers.extend(build_data_headers(user_data_fields))
user_headers.extend(build_data_headers(
unrecognized_user_data_keys,
header_prefix='uncategorized_data'
))
user_headers.extend(json_to_headers(
{'group': list(range(1, user_groups_length + 1))}
))
if domain_has_privilege(domain, privileges.LOCATIONS):
user_headers.extend(json_to_headers(
{'location_code': list(range(1, max_location_length + 1))}
))
if is_cross_domain:
user_headers += ['domain']
return user_headers, get_user_rows(user_dicts, user_headers)
def parse_web_users(domain, user_filters, task=None, total_count=None):
user_dicts = []
max_location_length = 0
(is_cross_domain, domains_list) = get_domains_from_user_filters(domain, user_filters)
progress = 0
for current_domain in domains_list:
location_cache = LocationIdToSiteCodeCache(current_domain)
for user in get_web_users_by_filters(current_domain, user_filters):
user_dict = make_web_user_dict(user, location_cache, current_domain)
user_dicts.append(user_dict)
max_location_length = max(max_location_length, len(user_dict["location_code"]))
progress += 1
DownloadBase.set_progress(task, progress, total_count)
for invite in get_invitations_by_filters(current_domain, user_filters):
user_dict = make_invited_web_user_dict(invite, location_cache)
user_dicts.append(user_dict)
progress += 1
DownloadBase.set_progress(task, progress, total_count)
user_headers = [
'username', 'first_name', 'last_name', 'email', 'role', 'last_access_date (read only)',
'last_login (read only)', 'status', 'remove'
]
if domain_has_privilege(domain, privileges.LOCATIONS):
user_headers.extend(json_to_headers(
{'location_code': list(range(1, max_location_length + 1))}
))
if is_cross_domain:
user_headers += ['domain']
return user_headers, get_user_rows(user_dicts, user_headers)
def get_domains_from_user_filters(domain, user_filters):
domains_list = [domain]
is_cross_domain = False
if 'domains' in user_filters:
domains_list = user_filters['domains']
if domains_list != [domain]:
is_cross_domain = True
return (is_cross_domain, domains_list)
def parse_groups(groups):
def _make_group_dict(group):
return {
'id': group.get_id,
'name': group.name,
'case-sharing': group.case_sharing,
'reporting': group.reporting,
'data': group.metadata,
}
group_data_keys = set()
group_dicts = []
sorted_groups = sorted(
groups,
key=lambda group: alphanumeric_sort_key(group.name)
)
for group in sorted_groups:
group_dicts.append(_make_group_dict(group))
group_data_keys.update(group.metadata if group.metadata else [])
group_headers = ['id', 'name', 'case-sharing?', 'reporting?']
group_headers.extend(build_data_headers(group_data_keys))
def _get_group_rows():
for group_dict in group_dicts:
row = dict(flatten_json(group_dict))
yield [row.get(header, '') for header in group_headers]
return group_headers, _get_group_rows()
def count_users_and_groups(domain, user_filters, group_memoizer):
users_count = count_mobile_users_by_filters(domain, user_filters)
groups_count = len(group_memoizer.groups)
return users_count + groups_count
def dump_usernames(domain, download_id, user_filters, task, owner_id):
(is_cross_domain, domains_list) = get_domains_from_user_filters(domain, user_filters)
users_count = 0
for download_domain in domains_list:
users_count += count_web_users_by_filters(download_domain, user_filters)
DownloadBase.set_progress(task, 0, users_count)
usernames = []
for download_domain in domains_list:
usernames += get_mobile_usernames_by_filters(download_domain, user_filters)
headers = [('users', [['username']])]
rows = [('users', [[username] for username in usernames])]
location_id = user_filters.get('location_id')
location_name = ""
if location_id:
location = SQLLocation.active_objects.get_or_None(location_id=location_id)
location_name = location.name if location else ""
filename_prefix = "_".join([a for a in [domain, location_name] if bool(a)])
filename = "{}_users.xlsx".format(filename_prefix)
_dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, users_count, owner_id)
def _dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, total_count, owner_id):
writer = Excel2007ExportWriter(format_as_text=True)
use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
file_path = get_download_file_path(use_transfer, filename)
writer.open(
header_table=headers,
file=file_path,
)
writer.write(rows)
writer.close()
expose_download(use_transfer, file_path, filename, download_id, 'xlsx', owner_ids=[owner_id])
DownloadBase.set_progress(task, total_count, total_count)
def load_memoizer(domain):
group_memoizer = GroupMemoizer(domain=domain)
# load groups manually instead of calling group_memoizer.load_all()
# so that we can detect blank groups
blank_groups = set()
for group in Group.by_domain(domain):
if group.name:
group_memoizer.add_group(group)
else:
blank_groups.add(group)
if blank_groups:
raise GroupNameError(blank_groups=blank_groups)
return group_memoizer
def dump_users_and_groups(domain, download_id, user_filters, task, owner_id):
(is_cross_domain, domains_list) = get_domains_from_user_filters(domain, user_filters)
users_groups_count = 0
groups = set()
for current_domain in domains_list:
group_memoizer = load_memoizer(current_domain)
users_groups_count += count_users_and_groups(current_domain, user_filters, group_memoizer)
groups.update(group_memoizer.groups)
DownloadBase.set_progress(task, 0, users_groups_count)
user_headers, user_rows = parse_mobile_users(
domain,
user_filters,
task,
users_groups_count,
)
group_headers, group_rows = parse_groups(groups)
headers = [
('users', [user_headers]),
('groups', [group_headers]),
]
rows = [
('users', user_rows),
('groups', group_rows),
]
filename = "{}_users_{}.xlsx".format(domain, uuid.uuid4().hex)
_dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, users_groups_count, owner_id)
def dump_web_users(domain, download_id, user_filters, task, owner_id):
(is_cross_domain, domains_list) = get_domains_from_user_filters(domain, user_filters)
total_count = 0
for current_domain in domains_list:
total_count += count_web_users_by_filters(current_domain, user_filters)
total_count += count_invitations_by_filters(current_domain, user_filters)
DownloadBase.set_progress(task, 0, total_count)
user_headers, user_rows = parse_web_users(domain, user_filters, task, total_count)
headers = [('users', [user_headers])]
rows = [('users', user_rows)]
filename = "{}_users_{}.xlsx".format(domain, uuid.uuid4().hex)
_dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, total_count, owner_id)
class GroupNameError(Exception):
def __init__(self, blank_groups):
self.blank_groups = blank_groups
@property
def message(self):
return "The following group ids have a blank name: %s." % (
', '.join([group.get_id for group in self.blank_groups])
)
| bsd-3-clause | 5bbb82a649950e4fbc1ed739d3e7283b | 36.082278 | 111 | 0.648063 | 3.642147 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/hqadmin/management/commands/clean_2fa_sessions.py | 1 | 3913 | from distutils.version import LooseVersion
from getpass import getpass
from importlib import import_module
from pkg_resources import DistributionNotFound, get_distribution
from django.conf import settings
from django.core.cache import caches
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = (
"Remove outdated/sensitive information from active Django sessions. "
"See https://github.com/Bouke/django-two-factor-auth/security/advisories/GHSA-vhr6-pvjm-9qwf"
)
def add_arguments(self, parser):
parser.add_argument(
'--one-session',
action='store_true',
default=False,
help='Lookup one session only (will prompt for a session key).',
)
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='Count the number of sessions that would be affected, '
'but do not modify them.',
)
def handle(self, one_session=False, dry_run=False, **options):
if dry_run:
print("DRY RUN sessions will not be modified")
tf_ver = get_two_factor_version()
if tf_ver and LooseVersion(tf_ver) < LooseVersion("1.12"):
print(f"WARNING old/insecure django-two-factor-auth version detected: {tf_ver}")
print("Please run this tool again after upgrading.")
else:
print(f"found django-two-factor-auth version {tf_ver}")
print("scanning sessions...")
count = i = 0
for i, session in enumerate(iter_sessions(one_session), start=1):
if i % 10000 == 0:
print(f"processed {i} sessions")
if has_sensitive_info(session):
count += 1
if not dry_run:
sanitize(session)
if dry_run:
print(f"DRY RUN {count} of {i} sessions need to be sanitized")
else:
print(f"Sanitized {count} of {i} sessions")
def sanitize(session):
for data in iter_wizard_login_views(session):
del data["step_data"]
del data["validated_step_data"]
session.save()
assert not has_sensitive_info(session)
def iter_sessions(one_session):
"""Iterate over one or all existing django sessions
Assumes that redis is the default cache in which all sessions are stored.
"""
assert settings.SESSION_ENGINE == "django.contrib.sessions.backends.cache", \
f"unsupported session engine: {settings.SESSION_ENGINE}"
engine = import_module(settings.SESSION_ENGINE)
if one_session:
session_key = getpass(prompt="Session key: ")
yield engine.SessionStore(session_key)
return
cache = caches[settings.SESSION_CACHE_ALIAS]
prefix_length = len(engine.SessionStore.cache_key_prefix)
for key in cache.iter_keys(engine.SessionStore.cache_key_prefix + "*"):
session_key = key[prefix_length:]
yield engine.SessionStore(session_key)
def has_sensitive_info(session):
def has_key(data, path):
value = data
for name in path:
if not isinstance(value, dict) or name not in value:
return False
value = value[name]
return True
return any(
has_key(data, STEP_DATA_PATH) or has_key(data, VALIDATED_STEP_DATA_PATH)
for data in iter_wizard_login_views(session)
)
def iter_wizard_login_views(session):
for key, data in session.items():
if key.startswith("wizard_") and key.endswith("_login_view"):
yield data
STEP_DATA_PATH = ["step_data", "auth", "auth-password"]
VALIDATED_STEP_DATA_PATH = ["validated_step_data", "auth", "password"]
def get_two_factor_version():
try:
dist = get_distribution("django-two-factor-auth")
except DistributionNotFound:
return None
return dist.version
| bsd-3-clause | c8673b58118d9ea397270f5af000d700 | 32.444444 | 101 | 0.626885 | 4.02572 | false | false | false | false |
dimagi/commcare-hq | custom/reports/mc/reports/models.py | 1 | 4466 | import json
from corehq.apps.fixtures.models import LookupTable, LookupTableRow
from corehq.apps.reports.filters.base import BaseReportFilter
from django.urls import reverse
class AsyncDrillableFilter(BaseReportFilter):
# todo: add documentation
# todo: cleanup template
"""
example_hierarchy = [{"type": "state", "display": "name"},
{"type": "district", "parent_ref": "state_id", "references": "id", "display": "name"},
{"type": "block", "parent_ref": "district_id", "references": "id", "display": "name"},
{"type": "village", "parent_ref": "block_id", "references": "id", "display": "name"}]
"""
template = "custom/reports/mc/reports/templates/mc/reports/drillable_async.html"
# a list of fixture data type names that representing different levels of the hierarchy. Starting with the root
hierarchy = []
def fdi_to_json(self, fdi):
return {
'fixture_type': fdi.table_id.hex,
'fields': fdi.fields_without_attributes,
'id': fdi.id.hex,
'children': getattr(fdi, '_children', None),
}
fdts = {}
def data_types(self, index=None):
if not self.fdts:
self.fdts = [
LookupTable.objects.by_domain_tag(self.domain, h["type"])
for h in self.hierarchy
]
return self.fdts if index is None else self.fdts[index]
@property
def api_root(self):
return reverse('api_dispatch_list', kwargs={'domain': self.domain,
'resource_name': 'fixture_internal',
'api_name': 'v0.5'})
@property
def full_hierarchy(self):
ret = []
for i, h in enumerate(self.hierarchy):
new_h = dict(h)
new_h['id'] = self.data_types(i).id.hex
ret.append(new_h)
return ret
def generate_lineage(self, leaf_type, leaf_item_id):
leaf_fdi = LookupTableRow.objects.get(id=leaf_item_id)
index = None
for i, h in enumerate(self.hierarchy[::-1]):
if h["type"] == leaf_type:
index = i
if index is None:
raise Exception(
"Could not generate lineage for AsyncDrillableFilter due to a nonexistent leaf_type (%s)"
% leaf_type)
lineage = [leaf_fdi]
for i, h in enumerate(self.full_hierarchy[::-1]):
if i < index or i >= len(self.hierarchy) - 1:
continue
real_index = len(self.hierarchy) - (i + 1)
lineage.insert(
0, LookupTableRow.objects.with_value(
self.domain,
self.data_types(real_index - 1).id,
h["references"],
lineage[0].fields_without_attributes[h["parent_ref"]]
).get())
return lineage
@property
def filter_context(self):
root_fdis = [self.fdi_to_json(f) for f in LookupTableRow.objects.iter_sorted(
self.domain, self.data_types(0).id)]
f_id = self.request.GET.get('fixture_id', None)
selected_fdi_type = f_id.split(':')[0] if f_id else None
selected_fdi_id = f_id.split(':')[1] if f_id else None
if selected_fdi_id:
lineage = self.generate_lineage(selected_fdi_type, selected_fdi_id)
parent = {'children': root_fdis}
for i, fdi in enumerate(lineage[:-1]):
this_fdi = [f for f in parent['children']
if f['id'] == fdi.get_id][0]
next_h = self.hierarchy[i + 1]
this_fdi['children'] = [self.fdi_to_json(f) for f in
LookupTableRow.objects.with_value(
self.domain,
self.data_types(i + 1).id,
next_h["parent_ref"],
fdi.fields_without_attributes[next_h["references"]])]
parent = this_fdi
return {
'api_root': self.api_root,
'control_name': self.label,
'control_slug': self.slug,
'selected_fdi_id': selected_fdi_id,
'fdis': json.dumps(root_fdis),
'hierarchy': self.full_hierarchy
}
| bsd-3-clause | 88e48294f10ae49fd46d84dace6935f0 | 38.875 | 115 | 0.512315 | 3.94523 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/ivr/migrations/0001_initial.py | 1 | 3352 | import django.db.models.deletion
from django.db import migrations, models
import dimagi.utils.couch.migration
class Migration(migrations.Migration):
dependencies = [
('sms', '0012_add_lastreadmessage_expectedcallback'),
]
operations = [
migrations.CreateModel(
name='Call',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=126, null=True, db_index=True)),
('date', models.DateTimeField(null=True, db_index=True)),
('couch_recipient_doc_type', models.CharField(max_length=126, null=True, db_index=True)),
('couch_recipient', models.CharField(max_length=126, null=True, db_index=True)),
('phone_number', models.CharField(max_length=126, null=True, db_index=True)),
('direction', models.CharField(max_length=1, null=True)),
('error', models.BooleanField(null=True, default=False)),
('system_error_message', models.TextField(null=True)),
('system_phone_number', models.CharField(max_length=126, null=True)),
('backend_api', models.CharField(max_length=126, null=True)),
('backend_id', models.CharField(max_length=126, null=True)),
('billed', models.BooleanField(null=True, default=False)),
('workflow', models.CharField(max_length=126, null=True)),
('xforms_session_couch_id', models.CharField(max_length=126, null=True, db_index=True)),
('reminder_id', models.CharField(max_length=126, null=True)),
('location_id', models.CharField(max_length=126, null=True)),
('couch_id', models.CharField(max_length=126, null=True, db_index=True)),
('answered', models.BooleanField(null=True, default=False)),
('duration', models.IntegerField(null=True)),
('gateway_session_id', models.CharField(max_length=126, null=True, db_index=True)),
('submit_partial_form', models.BooleanField(null=True, default=False)),
('include_case_side_effects', models.BooleanField(null=True, default=False)),
('max_question_retries', models.IntegerField(null=True)),
('current_question_retry_count', models.IntegerField(default=0, null=True)),
('xforms_session_id', models.CharField(max_length=126, null=True)),
('error_message', models.TextField(null=True)),
('use_precached_first_response', models.BooleanField(null=True, default=False)),
('first_response', models.TextField(null=True)),
('case_id', models.CharField(max_length=126, null=True)),
('case_for_case_submission', models.BooleanField(null=True, default=False)),
('form_unique_id', models.CharField(max_length=126, null=True)),
('messaging_subevent', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='sms.MessagingSubEvent', null=True)),
],
options={
'abstract': False,
},
bases=(dimagi.utils.couch.migration.SyncSQLToCouchMixin, models.Model),
),
]
| bsd-3-clause | b6cf8c7e9cb0470e225c159df84446eb | 58.857143 | 142 | 0.599344 | 4 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/api/domain_metadata.py | 1 | 6313 | import logging
from tastypie import fields
from tastypie.exceptions import NotFound
from tastypie.resources import ModelResource, Resource
from dimagi.utils.dates import force_to_datetime
from corehq.apps.accounting.models import Subscription
from corehq.apps.api.resources import CouchResourceMixin, HqBaseResource
from corehq.apps.api.resources.auth import AdminAuthentication
from corehq.apps.api.resources.meta import CustomResourceMeta
from corehq.apps.api.serializers import XFormInstanceSerializer
from corehq.apps.data_analytics.models import GIRRow, MALTRow
from corehq.apps.domain.models import Domain, DomainAuditRecordEntry
from corehq.apps.es.domains import DomainES
def _get_domain(bundle):
return bundle.obj
class DomainQuerySetAdapter(object):
def __init__(self, es_query):
self.es_query = es_query
def count(self):
return self.es_query.count()
def __getitem__(self, item):
if isinstance(item, slice):
return list(map(Domain.wrap, self.es_query.start(item.start).size(item.stop - item.start).run().hits))
raise ValueError('Invalid type of argument. Item should be an instance of slice class.')
class DomainMetadataResource(CouchResourceMixin, HqBaseResource):
billing_properties = fields.DictField()
calculated_properties = fields.DictField()
domain_properties = fields.DictField()
# using the default resource dispatch function to bypass our authorization for internal use
def dispatch(self, request_type, request, **kwargs):
return Resource.dispatch(self, request_type, request, **kwargs)
def dehydrate_billing_properties(self, bundle):
domain_obj = _get_domain(bundle)
subscription = Subscription.get_active_subscription_by_domain(domain_obj.name)
return {
"date_start": (subscription.date_start
if subscription is not None else None),
"date_end": (subscription.date_end
if subscription is not None else None),
"plan_version": (subscription.plan_version
if subscription is not None else None),
}
def dehydrate_calculated_properties(self, bundle):
calc_prop_prefix = 'cp_'
domain_obj = _get_domain(bundle)
try:
es_data = (DomainES()
.in_domains([domain_obj.name])
.size(1)
.run()
.hits[0])
base_properties = {
prop_name: es_data[prop_name]
for prop_name in es_data
if prop_name.startswith(calc_prop_prefix)
}
try:
audit_record = DomainAuditRecordEntry.objects.get(domain=domain_obj.name)
except DomainAuditRecordEntry.DoesNotExist:
audit_record = None
extra_properties = {
field.name: getattr(audit_record, field.name, 0)
for field in DomainAuditRecordEntry._meta.fields
if field.name.startswith(calc_prop_prefix)
}
base_properties.update(extra_properties)
return base_properties
except IndexError:
logging.exception('Problem getting calculated properties for {}'.format(domain_obj.name))
return {}
def dehydrate_domain_properties(self, bundle):
return _get_domain(bundle)._doc
def obj_get(self, bundle, **kwargs):
domain_obj = Domain.get_by_name(kwargs.get('domain'))
if domain_obj is None:
raise NotFound
return domain_obj
def obj_get_list(self, bundle, **kwargs):
if kwargs.get('domain'):
return [self.obj_get(bundle, **kwargs)]
else:
filters = {}
if hasattr(bundle.request, 'GET'):
filters = bundle.request.GET
params = {}
if 'last_modified__lte' in filters:
params['lte'] = force_to_datetime(filters['last_modified__lte'])
if 'last_modified__gte' in filters:
params['gte'] = force_to_datetime(filters['last_modified__gte'])
return DomainQuerySetAdapter(DomainES().last_modified(**params).sort('last_modified'))
class Meta(CustomResourceMeta):
authentication = AdminAuthentication()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
object_class = Domain
resource_name = 'project_space_metadata'
serializer = XFormInstanceSerializer(formats=['json'])
class MaltResource(ModelResource):
class Meta(CustomResourceMeta):
authentication = AdminAuthentication()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
queryset = MALTRow.objects.all().order_by('pk')
resource_name = 'malt_tables'
fields = ['id', 'month', 'user_id', 'username', 'email', 'user_type',
'domain_name', 'num_of_forms', 'app_id', 'device_id',
'is_app_deleted', 'wam', 'pam', 'use_threshold', 'experienced_threshold']
include_resource_uri = False
filtering = {
'month': ['gt', 'gte', 'lt', 'lte'],
'domain_name': ['exact']
}
class GIRResource(ModelResource):
class Meta(CustomResourceMeta):
authentication = AdminAuthentication()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
queryset = GIRRow.objects.all().order_by('pk')
resource_name = 'gir_tables'
fields = [
'id', 'month', 'domain_name', 'country', 'sector', 'subsector', 'bu',
'self_service', 'test_domain', 'start_date', 'device_id', 'pam',
'wams_current', 'active_users', 'using_and_performing', 'not_performing',
'inactive_experienced', 'inactive_not_experienced', 'not_experienced',
'not_performing_not_experienced', 'active_ever', 'possibly_exp', 'ever_exp',
'exp_and_active_ever', 'active_in_span', 'eligible_forms', 'performance_threshold',
'experienced_threshold',
]
include_resource_uri = False
filtering = {
'month': ['gt', 'gte', 'lt', 'lte'],
'domain_name': ['exact']
}
| bsd-3-clause | 63b45d6f228c8d94391869a370ec25f5 | 37.969136 | 114 | 0.609377 | 4.123449 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/funding/migrations/0053_auto_20200320_1457.py | 1 | 1092 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-03-20 13:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funding', '0052_auto_20200205_1710'),
]
operations = [
migrations.AddField(
model_name='funding',
name='started',
field=models.DateTimeField(blank=True, null=True, verbose_name='started'),
),
migrations.AlterField(
model_name='funding',
name='deadline',
field=models.DateTimeField(blank=True, help_text='If you enter a deadline, leave the duration field empty. This will override the duration.', null=True, verbose_name='deadline'),
),
migrations.AlterField(
model_name='funding',
name='duration',
field=models.PositiveIntegerField(blank=True, help_text='If you enter a duration, leave the deadline field empty for it to be automatically calculated.', null=True, verbose_name='duration'),
),
]
| bsd-3-clause | 0f7ac1f2cd3f22a638ffd87f0ff2ac22 | 35.4 | 202 | 0.627289 | 4.316206 | false | false | false | false |
dimagi/commcare-hq | corehq/ex-submodules/dimagi/utils/rate_limit.py | 1 | 1605 | from dimagi.utils.couch.cache.cache_core import get_redis_client
# todo: should this be replaced project_limits.RateLimiter?
def rate_limit(key, actions_allowed=60, how_often=60):
"""
A simple util to be used for rate limiting, using redis as a backend.
key - a unique key which describes the action you are rate limiting
actions_allowed - the number of actions to allow for key every how_often
seconds before returning False
returns True to proceed with the action, or False to not proceed
For example, to only allow a single project space to send 100 SMS max every
30 seconds:
if rate_limit('send-sms-for-projectname', actions_allowed=100, how_often=30):
<perform action>
else:
<delay action>
"""
# We need access to the raw redis client because calling incr on
# a django_redis RedisCache object raises an error if the key
# doesn't exist.
client = get_redis_client().client.get_client()
# Increment the key. If they key doesn't exist (or already expired),
# redis sets the value to 0 before incrementing.
value = client.incr(key)
if value == 1 or client.ttl(key) == -1:
# Set the key's expiration if it's the first action we're granting.
# As a precauation, we also check to make sure that the key actually has
# an expiration set in case an error occurred the first time we tried to
# set the expiration. If it doesn't have an expiration (ttl == -1), then
# we'll set it here again.
client.expire(key, how_often)
return value <= actions_allowed
| bsd-3-clause | 46feee3d60d311dffcfdebfae0b1ab24 | 37.214286 | 81 | 0.685358 | 3.98263 | false | false | false | false |
dimagi/commcare-hq | corehq/messaging/smsbackends/mach/forms.py | 1 | 1186 | from django.forms.fields import *
from corehq.apps.sms.forms import BackendForm
from dimagi.utils.django.fields import TrimmedCharField
from django.core.exceptions import ValidationError
from crispy_forms import layout as crispy
from django.utils.translation import gettext_lazy as _
class MachBackendForm(BackendForm):
account_id = TrimmedCharField(
label=_("Account ID"),
)
password = TrimmedCharField(
label=_("Password"),
)
sender_id = TrimmedCharField(
label=_("Sender ID"),
)
max_sms_per_second = IntegerField(
label=_("Max Outgoing SMS Per Second (as per account contract)"),
)
def clean_max_sms_per_second(self):
value = self.cleaned_data["max_sms_per_second"]
try:
value = int(value)
assert value > 0
except AssertionError:
raise ValidationError(_("Please enter a positive number"))
return value
@property
def gateway_specific_fields(self):
return crispy.Fieldset(
_("Syniverse Settings"),
'account_id',
'password',
'sender_id',
'max_sms_per_second',
)
| bsd-3-clause | 3fd6c58bce88e62f266f669a185a5b4c | 28.65 | 73 | 0.623103 | 4.205674 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/time_based/migrations/0005_application.py | 1 | 1755 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-10-21 11:15
from __future__ import unicode_literals
import bluebottle.files.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('files', '0007_auto_20201021_1315'),
('time_based', '0004_auto_20201014_1444'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('contribution_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='activities.Contribution')),
('motivation', models.TextField(blank=True)),
('document', bluebottle.files.fields.PrivateDocumentField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='files.PrivateDocument')),
],
options={
'verbose_name': 'Application',
'verbose_name_plural': 'Application',
'permissions': (('api_read_application', 'Can view application through the API'), ('api_add_application', 'Can add application through the API'), ('api_change_application', 'Can change application through the API'), ('api_delete_application', 'Can delete application through the API'), ('api_read_own_application', 'Can view own application through the API'), ('api_add_own_application', 'Can add own application through the API'), ('api_change_own_application', 'Can change own application through the API'), ('api_delete_own_application', 'Can delete own application through the API')),
},
bases=('activities.contribution',),
),
]
| bsd-3-clause | 3979da9c41ed036cbf2d5e5bb93e2575 | 53.84375 | 604 | 0.65584 | 4.208633 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/activities/models.py | 1 | 10463 | import uuid
from builtins import str, object
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from djchoices.choices import DjangoChoices, ChoiceItem
from future.utils import python_2_unicode_compatible
from polymorphic.models import PolymorphicModel
from bluebottle.files.fields import ImageField
from bluebottle.follow.models import Follow
from bluebottle.fsm.triggers import TriggerMixin
from bluebottle.geo.models import Location
from bluebottle.initiatives.models import Initiative, InitiativePlatformSettings
from bluebottle.offices.models import OfficeRestrictionChoices
from bluebottle.utils.models import ValidatedModelMixin, AnonymizationMixin
from bluebottle.utils.utils import get_current_host, get_current_language, clean_html
@python_2_unicode_compatible
class Activity(TriggerMixin, AnonymizationMixin, ValidatedModelMixin, PolymorphicModel):
class TeamActivityChoices(DjangoChoices):
teams = ChoiceItem('teams', label=_("Teams"))
individuals = ChoiceItem('individuals', label=_("Individuals"))
owner = models.ForeignKey(
'members.Member',
verbose_name=_('activity manager'),
related_name='activities',
on_delete=models.CASCADE
)
highlight = models.BooleanField(
default=False,
help_text=_('Highlight this activity to show it on homepage')
)
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(auto_now=True)
transition_date = models.DateTimeField(
_('transition date'),
help_text=_('Date of the last transition.'),
null=True, blank=True
)
status = models.CharField(max_length=40)
review_status = models.CharField(max_length=40, default='draft')
initiative = models.ForeignKey(Initiative, related_name='activities', on_delete=models.CASCADE)
office_location = models.ForeignKey(
'geo.Location', verbose_name=_('Host office'),
null=True, blank=True, on_delete=models.SET_NULL)
office_restriction = models.CharField(
_('Restrictions'),
default=OfficeRestrictionChoices.all,
choices=OfficeRestrictionChoices.choices,
blank=True, null=True, max_length=100
)
title = models.CharField(_('Title'), max_length=255)
slug = models.SlugField(_('Slug'), max_length=100, default='new')
description = models.TextField(
_('Description'), blank=True
)
team_activity = models.CharField(
_('participation'),
max_length=100,
default=TeamActivityChoices.individuals,
choices=TeamActivityChoices.choices,
blank=True,
help_text=_("Is this activity open for individuals or can only teams sign up?")
)
image = ImageField(blank=True, null=True)
video_url = models.URLField(
_('video'),
max_length=100,
blank=True,
null=True,
default='',
help_text=_(
"Do you have a video pitch or a short movie that "
"explains your activity? Cool! We can't wait to see it! "
"You can paste the link to YouTube or Vimeo video here"
)
)
segments = models.ManyToManyField(
'segments.segment',
verbose_name=_('Segment'),
related_name='activities',
blank=True
)
followers = GenericRelation('follow.Follow', object_id_field='instance_id')
messages = GenericRelation('notifications.Message')
follows = GenericRelation(Follow, object_id_field='instance_id')
wallposts = GenericRelation('wallposts.Wallpost', related_query_name='activity_wallposts')
auto_approve = True
@property
def activity_date(self):
raise NotImplementedError
@property
def stats(self):
return {}
@property
def required_fields(self):
fields = []
if Location.objects.count():
fields.append('office_location')
if InitiativePlatformSettings.load().enable_office_regions:
fields.append('office_restriction')
return fields
class Meta(object):
verbose_name = _("Activity")
verbose_name_plural = _("Activities")
permissions = (
('api_read_activity', 'Can view activity through the API'),
('api_read_own_activity', 'Can view own activity through the API'),
)
def __str__(self):
return self.title or str(_('-empty-'))
def save(self, **kwargs):
if self.slug in ['', 'new']:
if self.title and slugify(self.title):
self.slug = slugify(self.title)
else:
self.slug = 'new'
if not self.owner_id:
self.owner = self.initiative.owner
self.description = clean_html(self.description)
super(Activity, self).save(**kwargs)
if not self.segments.count():
for segment in self.owner.segments.filter(segment_type__inherit=True).all():
self.segments.add(segment)
def get_absolute_url(self):
domain = get_current_host()
language = get_current_language()
link = u"{}/{}/initiatives/activities/details/{}/{}/{}".format(
domain, language,
self.get_real_instance().__class__.__name__.lower(),
self.pk,
self.slug
)
return link
@property
def organizer(self):
return self.contributors.instance_of(Organizer).first()
def NON_POLYMORPHIC_CASCADE(collector, field, sub_objs, using):
# This fixing deleting related polymorphic objects through admin
if hasattr(sub_objs, 'non_polymorphic'):
sub_objs = sub_objs.non_polymorphic()
return models.CASCADE(collector, field, sub_objs, using)
@python_2_unicode_compatible
class Contributor(TriggerMixin, AnonymizationMixin, PolymorphicModel):
status = models.CharField(max_length=40)
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(auto_now=True)
transition_date = models.DateTimeField(null=True, blank=True)
contributor_date = models.DateTimeField(null=True, blank=True)
activity = models.ForeignKey(
Activity, related_name='contributors', on_delete=NON_POLYMORPHIC_CASCADE
)
team = models.ForeignKey(
'activities.Team', verbose_name=_('team'),
null=True, blank=True, related_name='members', on_delete=models.CASCADE
)
user = models.ForeignKey(
'members.Member', verbose_name=_('user'),
null=True, blank=True, on_delete=models.CASCADE
)
invite = models.OneToOneField(
'activities.Invite', null=True, on_delete=models.SET_NULL, related_name="contributor"
)
accepted_invite = models.ForeignKey(
'activities.Invite', null=True, on_delete=models.SET_NULL, related_name="accepted_contributors"
)
@property
def owner(self):
return self.user
@property
def is_team_captain(self):
return self.team and self.user == self.team.owner
@property
def date(self):
return self.activity.contributor_date
class Meta(object):
ordering = ('-created',)
verbose_name = _('Contribution')
verbose_name_plural = _('Contributions')
def __str__(self):
if self.user:
return str(self.user)
return str(_('Guest'))
@python_2_unicode_compatible
class Organizer(Contributor):
class Meta(object):
verbose_name = _("Activity owner")
verbose_name_plural = _("Activity owners")
class JSONAPIMeta(object):
resource_name = 'contributors/organizers'
class Contribution(TriggerMixin, PolymorphicModel):
status = models.CharField(max_length=40)
created = models.DateTimeField(default=timezone.now)
start = models.DateTimeField(_('start'), null=True, blank=True)
end = models.DateTimeField(_('end'), null=True, blank=True)
contributor = models.ForeignKey(
Contributor, related_name='contributions', on_delete=NON_POLYMORPHIC_CASCADE
)
@property
def owner(self):
return self.contributor.user
class Meta(object):
ordering = ('-created',)
verbose_name = _("Contribution amount")
verbose_name_plural = _("Contribution amounts")
def __str__(self):
return str(_('Contribution amount'))
class EffortContribution(Contribution):
class ContributionTypeChoices(DjangoChoices):
organizer = ChoiceItem('organizer', label=_("Activity Organizer"))
deed = ChoiceItem('deed', label=_("Deed particpant"))
contribution_type = models.CharField(
_('Contribution type'),
max_length=20,
choices=ContributionTypeChoices.choices,
)
class Meta(object):
verbose_name = _("Effort")
verbose_name_plural = _("Contributions")
class Invite(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
class JSONAPIMeta(object):
resource_name = 'activities/invites'
class Team(TriggerMixin, models.Model):
status = models.CharField(max_length=40)
activity = models.ForeignKey(
Activity, related_name='teams', on_delete=NON_POLYMORPHIC_CASCADE
)
created = models.DateTimeField(default=timezone.now)
owner = models.ForeignKey(
'members.Member', related_name='teams', null=True, on_delete=models.SET_NULL
)
@property
def accepted_participants(self):
return self.members.filter(status='accepted')
@property
def accepted_participants_count(self):
return len(self.accepted_participants)
class Meta(object):
ordering = ('-created',)
verbose_name = _("Team")
permissions = (
('api_read_team', 'Can view team through the API'),
('api_change_team', 'Can change team through the API'),
('api_change_own_team', 'Can change own team through the API'),
)
@property
def name(self):
return _("Team {name}").format(
name=self.owner.full_name if self.owner_id else _("Anonymous")
)
def __str__(self):
return self.name
from bluebottle.activities.signals import * # noqa
from bluebottle.activities.wallposts import * # noqa
from bluebottle.activities.states import * # noqa
| bsd-3-clause | 23c2c95c6954c60ce5a299d45f9ee42c | 30.89939 | 103 | 0.65507 | 4.160239 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/sso/models.py | 1 | 19475 | from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.urls import reverse
from corehq.apps.accounting.models import BillingAccount, Subscription
from corehq.apps.sso import certificates
from corehq.apps.sso.exceptions import ServiceProviderCertificateError
from corehq.apps.sso.utils.user_helpers import get_email_domain_from_username
from corehq.util.quickcache import quickcache
class IdentityProviderType:
AZURE_AD = 'azure_ad'
ONE_LOGIN = 'one_login'
OKTA = 'okta'
CHOICES = (
(AZURE_AD, "Azure AD"),
(ONE_LOGIN, "One Login"),
(OKTA, "Okta"),
)
class IdentityProviderProtocol:
SAML = 'saml'
OIDC = 'oidc'
CHOICES = (
(SAML, "SAML 2.0"),
(OIDC, "OpenID Connect (OIDC)"),
)
@classmethod
def get_supported_types(cls):
return {
cls.SAML: (
(IdentityProviderType.AZURE_AD, "Azure AD"),
),
cls.OIDC: (
(IdentityProviderType.ONE_LOGIN, "One Login"),
(IdentityProviderType.OKTA, "Okta"),
)
}
class LoginEnforcementType:
GLOBAL = 'global'
TEST = 'test'
CHOICES = (
(GLOBAL, "Global"),
(TEST, "Test"),
)
class ServiceProviderCertificate:
def __init__(self):
"""
To increase the security with SAML transactions, we will provide the IdP
with our public key for an x509 certificate unique to our interactions with
a particular IdP. This certificate will be regenerated automatically by
a periodic task every year.
"""
key_pair = certificates.create_key_pair()
cert = certificates.create_self_signed_cert(key_pair)
self.public_key = certificates.get_public_key(cert)
self.private_key = certificates.get_private_key(key_pair)
self.date_expires = certificates.get_expiration_date(cert)
class IdentityProvider(models.Model):
"""
This stores the information necessary to make a SAML request to an external
IdP. Right now this process supports Azure AD and the plan is to add
support for other identity provider types in the future.
"""
# these three fields must only ever be editable by Accounting admins
name = models.CharField(max_length=128)
slug = models.CharField(max_length=256, db_index=True, unique=True)
idp_type = models.CharField(
max_length=50,
default=IdentityProviderType.AZURE_AD,
choices=IdentityProviderType.CHOICES,
)
protocol = models.CharField(
max_length=5,
default=IdentityProviderProtocol.SAML,
choices=IdentityProviderProtocol.CHOICES,
)
# whether an IdP is editable by its BillingAccount owner
# (it will always be editable by accounting admins)
is_editable = models.BooleanField(default=False)
# whether an IdP is actively in use as an authentication method on HQ
is_active = models.BooleanField(default=False)
# determines how the is_active behavior enforces the login policy on the homepage
login_enforcement_type = models.CharField(
max_length=10,
default=LoginEnforcementType.GLOBAL,
choices=LoginEnforcementType.CHOICES,
)
# the enterprise admins of this account will be able to edit the SAML
# configuration fields
owner = models.ForeignKey(BillingAccount, on_delete=models.PROTECT)
entity_id = models.TextField(blank=True, null=True)
# these are fields required by the external IdP to form a SAML request
login_url = models.TextField(blank=True, null=True)
logout_url = models.TextField(blank=True, null=True)
idp_cert_public = models.TextField(blank=True, null=True)
# needed for OIDC
client_id = models.TextField(blank=True, null=True)
client_secret = models.TextField(blank=True, null=True)
# the date the IdP's SAML signing certificate expires.
# this will be filled out by enterprise admins
date_idp_cert_expiration = models.DateTimeField(blank=True, null=True)
# Requires that <saml:Assertion> elements received by the SP are encrypted.
# In Azure AD this requires that Token Encryption is enabled, a premium feature
require_encrypted_assertions = models.BooleanField(default=False)
# as the service provider, this will store our x509 certificates and
# will be renewed automatically by a periodic task
sp_cert_public = models.TextField(blank=True, null=True)
sp_cert_private = models.TextField(blank=True, null=True)
date_sp_cert_expiration = models.DateTimeField(blank=True, null=True)
# as the x509 certificate expires, we need to provide the IdP with our next
# "rollover" cert to prepare the IdP for the transfer
sp_rollover_cert_public = models.TextField(blank=True, null=True)
sp_rollover_cert_private = models.TextField(blank=True, null=True)
date_sp_rollover_cert_expiration = models.DateTimeField(blank=True, null=True)
# for auditing purposes
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.EmailField()
last_modified = models.DateTimeField(auto_now=True)
last_modified_by = models.EmailField()
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.name} IdP [{self.idp_type}]"
@property
def service_name(self):
return dict(IdentityProviderType.CHOICES)[self.idp_type]
def create_service_provider_certificate(self):
sp_cert = ServiceProviderCertificate()
self.sp_cert_public = sp_cert.public_key
self.sp_cert_private = sp_cert.private_key
self.date_sp_cert_expiration = sp_cert.date_expires
self.save()
def create_rollover_service_provider_certificate(self):
sp_cert = ServiceProviderCertificate()
self.sp_rollover_cert_public = sp_cert.public_key
self.sp_rollover_cert_private = sp_cert.private_key
self.date_sp_rollover_cert_expiration = sp_cert.date_expires
self.save()
def renew_service_provider_certificate(self):
if not self.sp_rollover_cert_public:
raise ServiceProviderCertificateError(
"A rollover certificate for the Service Provider was never "
"generated. You should first create a rollover certificate and "
"leave it active for a few days to give the IdP a heads up."
)
self.sp_cert_public = self.sp_rollover_cert_public
self.sp_cert_private = self.sp_rollover_cert_private
self.date_sp_cert_expiration = self.date_sp_rollover_cert_expiration
self.sp_rollover_cert_public = None
self.sp_rollover_cert_private = None
self.date_sp_rollover_cert_expiration = None
self.save()
def get_email_domains(self):
return AuthenticatedEmailDomain.objects.filter(
identity_provider=self
).values_list('email_domain', flat=True).all()
def get_sso_exempt_users(self):
return UserExemptFromSingleSignOn.objects.filter(
email_domain__identity_provider=self,
).values_list('username', flat=True)
def get_login_url(self, username=None):
"""
Gets the login endpoint for the IdentityProvider based on the protocol
being used.
:param username: (string) username to pre-populate IdP login with
:return: (String) identity provider login url
"""
login_view_name = 'sso_saml_login' if self.protocol == IdentityProviderProtocol.SAML else 'sso_oidc_login'
return '{}?username={}'.format(
reverse(login_view_name, args=(self.slug,)),
username
)
def get_active_projects(self):
"""
Returns a list of active domains/project spaces for this identity
provider.
:return: list of strings (domain names)
"""
return list(Subscription.visible_objects.filter(
account=self.owner,
is_active=True
).values_list('subscriber__domain', flat=True))
@quickcache(['self.slug', 'domain'])
def is_domain_an_active_member(self, domain):
"""
Checks whether the given Domain is an Active Member of the current
Identity Provider.
An "Active Member" is defined by having an active Subscription that
belongs to the BillingAccount owner of this IdentityProvider.
:param domain: String (the Domain name)
:return: Boolean (True if Domain is an Active Member)
"""
return Subscription.visible_objects.filter(
account=self.owner,
is_active=True,
subscriber__domain=domain,
).exists()
@quickcache(['self.slug', 'domain'])
def does_domain_trust_this_idp(self, domain):
"""
Checks whether the given Domain trusts this Identity Provider.
:param domain: String (the Domain name)
:return: Boolean (True if Domain trusts this Identity Provider)
"""
is_active_member = self.is_domain_an_active_member(domain)
if not is_active_member:
# Since this Domain is not an Active Member, check whether an
# administrator of this domain has trusted this Identity Provider
return TrustedIdentityProvider.objects.filter(
domain=domain, identity_provider=self
).exists()
return is_active_member
def clear_domain_caches(self, domain):
"""
Clear all caches associated with a Domain and this IdentityProvider
:param domain: String (the Domain name)
"""
IdentityProvider.does_domain_trust_this_idp.clear(self, domain)
IdentityProvider.is_domain_an_active_member.clear(self, domain)
from corehq.apps.sso.utils.domain_helpers import is_domain_using_sso
is_domain_using_sso.clear(domain)
@staticmethod
def clear_email_domain_caches(email_domain):
"""
Clears all caches associated with a given email_domain
:param email_domain: String (email domain)
"""
IdentityProvider.get_active_identity_provider_by_email_domain.clear(
IdentityProvider,
email_domain
)
def clear_all_email_domain_caches(self):
"""
Clears the email_domain-related caches of all the email domains
associated with this IdentityProvider.
"""
all_email_domains_for_idp = AuthenticatedEmailDomain.objects.filter(
identity_provider=self).values_list('email_domain', flat=True)
for email_domain in all_email_domains_for_idp:
self.clear_email_domain_caches(email_domain)
def clear_all_domain_subscriber_caches(self):
"""
Ensure that we clear all domain caches tied to the Subscriptions
associated with the BillingAccount owner of this IdentityProvider.
"""
for domain in self.get_active_projects():
self.clear_domain_caches(domain)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.clear_all_email_domain_caches()
self.clear_all_domain_subscriber_caches()
def create_trust_with_domain(self, domain, username):
"""
This creates a TrustedIdentityProvider relationship between the Domain
and the current Identity Provider.
:param domain: String (the Domain name)
:param username: String (the username of the user creating this agreement)
:return: Boolean (True if a new trust was created, False if it already exists)
"""
if not TrustedIdentityProvider.objects.filter(
domain=domain, identity_provider=self
).exists():
TrustedIdentityProvider.objects.create(
domain=domain,
identity_provider=self,
acknowledged_by=username,
)
return True
return False
@classmethod
def domain_has_editable_identity_provider(cls, domain):
"""
Check to see that a Domain is associated with an IdentityProvider
that is editable.
:param domain: (String) Domain name
:return: Boolean (True if an editable IdentityProvider exists)
"""
owner = BillingAccount.get_account_by_domain(domain)
return cls.objects.filter(owner=owner, is_editable=True).exists()
@classmethod
@quickcache(['cls.__name__', 'email_domain'])
def get_active_identity_provider_by_email_domain(cls, email_domain):
"""
Returns the active Identity Provider associated with a given email
domain or None.
:param email_domain: (string)
:return: IdentityProvider or None
"""
try:
authenticated_email_domain = AuthenticatedEmailDomain.objects.get(
email_domain=email_domain
)
idp = authenticated_email_domain.identity_provider
except AuthenticatedEmailDomain.DoesNotExist:
return None
return idp if idp.is_active else None
@classmethod
def get_active_identity_provider_by_username(cls, username):
"""
Returns the active Identity Provider associated with a user's email
domain or None.
:param username: (string)
:return: IdentityProvider or None
"""
email_domain = get_email_domain_from_username(username)
if not email_domain:
# malformed username/email
return None
return cls.get_active_identity_provider_by_email_domain(email_domain)
@classmethod
def does_domain_trust_user(cls, domain, username):
"""
Check to see if the given domain trusts the user's IdentityProvider
(if applicable) based on their email domain. If the user has no
IdentityProvider, it will also return True.
:param domain: (String) name of the domain
:param username: (String) username of the user
:return: Boolean (True if an IdP trust exists or is not applicable)
"""
idp = cls.get_active_identity_provider_by_username(username)
if idp is None:
return True
return idp.does_domain_trust_this_idp(domain)
@classmethod
def get_required_identity_provider(cls, username):
"""
Gets the Identity Provider for the given username only if that
user is required to login or sign up with that Identity Provider.
An Identity Provider is required if:
- it exists
- is active
- is Globally enforcing logins (login_enforcement_type) or is in Test login_enforcement_type
and there is an SsoTestUser that maps to the given username
:param username: String
:return: IdentityProvider or None
"""
idp = cls.get_active_identity_provider_by_username(username)
if not idp:
return None
if (idp.login_enforcement_type == LoginEnforcementType.GLOBAL
and not UserExemptFromSingleSignOn.objects.filter(username=username).exists()):
return idp
if (idp.login_enforcement_type == LoginEnforcementType.TEST
and SsoTestUser.objects.filter(username=username).exists()):
return idp
return None
@receiver(post_save, sender=Subscription)
@receiver(post_delete, sender=Subscription)
def clear_caches_when_subscription_status_changes(sender, instance, **kwargs):
"""
Catches the post-save and post-delete signals of Subscription to ensure
that if the Subscription status for a domain changes, that the
domain-related caches for IdentityProvider are all cleared.
:param sender: The sender class (in this case Subscription)
:param instance: Subscription - the instance being saved/deleted
:param kwargs:
"""
for identity_provider in IdentityProvider.objects.filter(owner=instance.account):
identity_provider.clear_domain_caches(instance.subscriber.domain)
class AuthenticatedEmailDomain(models.Model):
"""
This specifies the email domains that are tied to an Identity Provider and
a list of users that would be exempt from SSO.
"""
email_domain = models.CharField(max_length=256, db_index=True, unique=True)
identity_provider = models.ForeignKey(IdentityProvider, on_delete=models.PROTECT)
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.email_domain} authenticated by [{self.identity_provider.name}]"
@receiver(post_save, sender=AuthenticatedEmailDomain)
@receiver(post_delete, sender=AuthenticatedEmailDomain)
def clear_caches_for_email_domain(sender, instance, **kwargs):
"""
Catches the post-save and post-delete signals of AuthenticatedEmailDomain
to ensure that we immediately clear the related email-domain quickcaches
for IdentityProvider.
:param sender: The sender class (in this case AuthenticatedEmailDomain)
:param instance: AuthenticatedEmailDomain - the instance being saved/deleted
:param kwargs:
"""
IdentityProvider.clear_email_domain_caches(instance.email_domain)
class UserExemptFromSingleSignOn(models.Model):
"""
This specifies what users are exempt from SSO for a given
AuthenticatedEmailDomain. Other users will be required to use SSO once
an AuthenticatedEmailDomain is specified for their email domain.
"""
username = models.CharField(max_length=128, db_index=True)
email_domain = models.ForeignKey(AuthenticatedEmailDomain, on_delete=models.CASCADE)
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.username} is exempt from SSO with {self.email_domain}"
class SsoTestUser(models.Model):
"""
This specifies users who are able to log in with SSO from the homepage when testing mode is turned on
for their Identity Provider.
"""
username = models.CharField(max_length=128, db_index=True)
email_domain = models.ForeignKey(AuthenticatedEmailDomain, on_delete=models.CASCADE)
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.username} is testing SSO with {self.email_domain}"
class TrustedIdentityProvider(models.Model):
"""
This specifies the trust between domains (who are not associated with the
IdP's BillingAccount owner) and an IdentityProvider
"""
domain = models.CharField(max_length=256, db_index=True)
identity_provider = models.ForeignKey(IdentityProvider, on_delete=models.PROTECT)
date_acknowledged = models.DateTimeField(auto_now_add=True)
acknowledged_by = models.EmailField()
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.domain} trusts [{self.identity_provider.name}]"
@receiver(post_save, sender=TrustedIdentityProvider)
@receiver(post_delete, sender=TrustedIdentityProvider)
def clear_caches_when_trust_is_established_or_removed(sender, instance, **kwargs):
"""
Catches the post-save and post-delete signals of TrustedIdentityProvider
to ensure that we immediately clear the related domain quickcaches
for IdentityProvider.
:param sender: The sender class (in this case AuthenticatedEmailDomain)
:param instance: TrustedIdentityProvider - the instance being saved/deleted
:param kwargs:
"""
instance.identity_provider.clear_domain_caches(instance.domain)
| bsd-3-clause | cf84f31ccd66bac46fe1ca11c8aede5a | 37.640873 | 114 | 0.671784 | 4.173811 | false | false | false | false |
dimagi/commcare-hq | corehq/form_processor/track_related.py | 1 | 2581 | from collections import defaultdict
class TrackRelatedChanges(object):
def __init__(self):
self.create_models = defaultdict(list)
self.update_models = defaultdict(list)
self.delete_models = defaultdict(list)
def has_tracked_models(self, model_class=None):
return any((
self.has_tracked_models_to_create(model_class),
self.has_tracked_models_to_update(model_class),
self.has_tracked_models_to_delete(model_class)
))
def has_tracked_models_to_delete(self, model_class=None):
return self._has_tracked_models(self.delete_models, model_class)
def has_tracked_models_to_update(self, model_class=None):
return self._has_tracked_models(self.update_models, model_class)
def has_tracked_models_to_create(self, model_class=None):
return self._has_tracked_models(self.create_models, model_class)
def _has_tracked_models(self, storage, model_class=None):
if model_class:
return bool(storage[model_class])
return any(models for models in storage.values())
def clear_tracked_models(self, model_class=None):
if not model_class:
self.create_models.clear()
self.update_models.clear()
self.delete_models.clear()
else:
self.create_models[model_class] = []
self.update_models[model_class] = []
self.delete_models[model_class] = []
self.on_tracked_models_cleared(model_class)
def on_tracked_models_cleared(self, model_class=None):
"""
Override this to be notified when tracked models have been cleared.
:param model_class: May be None which indicates that all types have been cleared.
"""
pass
def track_create(self, model):
self.create_models[model.__class__].append(model)
def track_update(self, model):
self.update_models[model.__class__].append(model)
def track_delete(self, model):
self.delete_models[model.__class__].append(model)
def get_live_tracked_models(self, model_class):
"""Return tracked models that have not been deleted
"""
return self.update_models[model_class] + self.create_models[model_class]
def get_tracked_models_to_create(self, model_class):
return self.create_models[model_class]
def get_tracked_models_to_update(self, model_class):
return self.update_models[model_class]
def get_tracked_models_to_delete(self, model_class):
return self.delete_models[model_class]
| bsd-3-clause | fc5e46da7665dbb84b3c2d4716f1cfec | 35.352113 | 89 | 0.649748 | 3.806785 | false | false | false | false |
onepercentclub/bluebottle | bluebottle/utils/admin.py | 1 | 6945 | import csv
import datetime
from builtins import str
import six
from django.contrib import admin
from django.contrib.admin.models import CHANGE, LogEntry
from django.contrib.admin.views.main import ChangeList
from django.contrib.contenttypes.models import ContentType
from django.db.models.aggregates import Sum
from django.db.models.fields.files import FieldFile
from django.db.models.query import QuerySet
from django.http import HttpResponse
from django.template import loader
from django.utils.encoding import smart_str
from djmoney.money import Money
from parler.admin import TranslatableAdmin
from solo.admin import SingletonModelAdmin
from bluebottle.activities.models import Contributor
from bluebottle.clients import properties
from bluebottle.members.models import Member
from bluebottle.utils.exchange_rates import convert
from .models import Language, TranslationPlatformSettings
from ..segments.models import SegmentType
class LanguageAdmin(admin.ModelAdmin):
model = Language
list_display = ('code', 'language_name', 'native_name')
admin.site.register(Language, LanguageAdmin)
def prep_field(request, obj, field, manyToManySep=';'):
""" Returns the field as a unicode string. If the field is a callable, it
attempts to call it first, without arguments.
"""
if '__' in field:
bits = field.split('__')
field = bits.pop()
for bit in bits:
obj = getattr(obj, bit, None)
if obj is None:
return ""
attr = getattr(obj, field)
if isinstance(attr, FieldFile):
attr = request.build_absolute_uri(attr.url)
if isinstance(attr, Money):
attr = str(attr)
if isinstance(attr, datetime.datetime):
attr = attr.strftime('%d-%m-%y %H:%M')
if isinstance(attr, datetime.timedelta):
attr = attr.seconds / (60 * 60)
output = attr() if callable(attr) else attr
if isinstance(output, (list, tuple, QuerySet)):
output = manyToManySep.join([str(item) for item in output])
return output if output else ""
def escape_csv_formulas(item):
if item and isinstance(item, six.string_types):
if item[0] in ['=', '+', '-', '@']:
item = u"'" + item
return smart_str(item)
else:
return item
def export_as_csv_action(description="Export as CSV", fields=None, exclude=None, header=True,
manyToManySep=';'):
""" This function returns an export csv action. """
def export_as_csv(modeladmin, request, queryset):
""" Generic csv export admin action.
Based on http://djangosnippets.org/snippets/2712/
"""
opts = modeladmin.model._meta
field_names = [field.name for field in opts.fields]
labels = []
if exclude:
field_names = [f for f in field_names if f not in exclude]
elif fields:
try:
field_names = [field for field, _ in fields]
labels = [label for _, label in fields]
except ValueError:
field_names = [field for field in fields]
labels = field_names
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % (
str(opts).replace('.', '_')
)
writer = csv.writer(response, delimiter=';', dialect='excel')
if header:
row = labels if labels else field_names
if queryset.model is Member or issubclass(queryset.model, Contributor):
for segment_type in SegmentType.objects.all():
labels.append(segment_type.name)
writer.writerow([escape_csv_formulas(item) for item in row])
if queryset.model is Member:
queryset = queryset.prefetch_related('place')
queryset = queryset.prefetch_related('segments')
queryset = queryset.prefetch_related('contributor_set')
for obj in queryset:
row = [prep_field(request, obj, field, manyToManySep) for field in field_names]
# Write extra field data
if queryset.model is Member:
for segment_type in SegmentType.objects.all():
segments = " | ".join(obj.segments.filter(
segment_type=segment_type).values_list('name', flat=True))
row.append(segments)
if issubclass(queryset.model, Contributor):
for segment_type in SegmentType.objects.all():
if obj.user:
segments = " | ".join(obj.user.segments.filter(
segment_type=segment_type).values_list('name', flat=True))
else:
segments = ''
row.append(segments)
escaped_row = [escape_csv_formulas(item) for item in row]
writer.writerow(escaped_row)
return response
export_as_csv.short_description = description
export_as_csv.acts_on_all = True
return export_as_csv
class TotalAmountAdminChangeList(ChangeList):
def get_results(self, *args, **kwargs):
self.model_admin.change_list_template = 'utils/admin/total_amount_change_list.html'
super(TotalAmountAdminChangeList, self).get_results(*args, **kwargs)
total_column = self.model_admin.total_column or 'amount'
currency_column = '{}_currency'.format(total_column)
totals = self.queryset.values(
currency_column
).annotate(
total=Sum(total_column)
).order_by()
amounts = [Money(total['total'], total[currency_column]) for total in totals]
amounts = [convert(amount, properties.DEFAULT_CURRENCY) for amount in amounts]
self.total = sum(amounts) or Money(0, properties.DEFAULT_CURRENCY)
class BasePlatformSettingsAdmin(SingletonModelAdmin):
pass
def log_action(obj, user, change_message='Changed', action_flag=CHANGE):
LogEntry.objects.log_action(
user_id=user.id,
content_type_id=ContentType.objects.get_for_model(obj).pk,
object_id=obj.pk,
object_repr=str(obj),
action_flag=action_flag,
change_message=change_message
)
@admin.register(TranslationPlatformSettings)
class TranslationPlatformSettingsAdmin(TranslatableAdmin, BasePlatformSettingsAdmin):
pass
class TranslatableAdminOrderingMixin(object):
translatable_ordering = 'translations__name'
def get_queryset(self, request):
language_code = self.get_queryset_language(request)
return super(TranslatableAdminOrderingMixin, self).get_queryset(request). \
translated(language_code).order_by(self.translatable_ordering)
def admin_info_box(text):
template = loader.get_template('admin/info_box.html')
context = {
'text': text,
}
return template.render(context)
| bsd-3-clause | cbcc028ffb98312d0de020d0771cb2cd | 33.552239 | 93 | 0.640605 | 4.186257 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/products/models.py | 1 | 10235 | from datetime import datetime
from decimal import Decimal
from django.db import models
from django.utils.translation import gettext as _
import jsonfield
from couchdbkit.exceptions import ResourceNotFound
from dimagi.ext.couchdbkit import (
BooleanProperty,
DateTimeProperty,
DecimalProperty,
DictProperty,
Document,
StringProperty,
)
from dimagi.utils.couch.database import iter_docs
from corehq.apps.commtrack.exceptions import (
DuplicateProductCodeException,
InvalidProductException,
)
class Product(Document):
"""
A product, e.g. "coartem" or "tylenol"
"""
domain = StringProperty()
name = StringProperty()
unit = StringProperty()
code_ = StringProperty()
description = StringProperty()
category = StringProperty()
program_id = StringProperty()
cost = DecimalProperty()
product_data = DictProperty()
is_archived = BooleanProperty(default=False)
last_modified = DateTimeProperty()
@classmethod
def wrap(cls, data):
from corehq.apps.groups.models import dt_no_Z_re
# If "Z" is missing because of the Aug 2014 migration, then add it.
# cf. Group class
last_modified = data.get('last_modified')
if last_modified and dt_no_Z_re.match(last_modified):
data['last_modified'] += 'Z'
return super(Product, cls).wrap(data)
@classmethod
def save_docs(cls, docs, use_uuids=True, codes_by_domain=None):
from corehq.apps.commtrack.util import generate_code
codes_by_domain = codes_by_domain or {}
def get_codes(domain):
if domain not in codes_by_domain:
codes_by_domain[domain] = SQLProduct.objects.filter(domain=domain)\
.values_list('code', flat=True).distinct()
return codes_by_domain[domain]
for doc in docs:
doc.last_modified = datetime.utcnow()
if not doc['code_']:
doc['code_'] = generate_code(
doc['name'],
get_codes(doc['domain'])
)
super(Product, cls).save_docs(docs, use_uuids)
domains = {doc['domain'] for doc in docs}
for domain in domains:
cls.clear_caches(domain)
bulk_save = save_docs
def sync_to_sql(self):
properties_to_sync = [
('product_id', '_id'),
'domain',
'name',
'is_archived',
('code', 'code_'),
'description',
'category',
'program_id',
'cost',
('units', 'unit'),
'product_data',
]
# sync properties to SQL version
sql_product, _ = SQLProduct.objects.get_or_create(
product_id=self._id
)
for prop in properties_to_sync:
if isinstance(prop, tuple):
sql_prop, couch_prop = prop
else:
sql_prop = couch_prop = prop
if hasattr(self, couch_prop):
setattr(sql_product, sql_prop, getattr(self, couch_prop))
sql_product.save()
def save(self, *args, **kwargs):
"""
Saving a couch version of Product will trigger
one way syncing to the SQLProduct version of this
product.
"""
# mark modified time stamp for selective syncing
self.last_modified = datetime.utcnow()
# generate code if user didn't specify one
if not self.code:
from corehq.apps.commtrack.util import generate_code
self.code = generate_code(
self.name,
SQLProduct.objects
.filter(domain=self.domain)
.values_list('code', flat=True)
.distinct()
)
result = super(Product, self).save(*args, **kwargs)
self.clear_caches(self.domain)
self.sync_to_sql()
return result
@property
def code(self):
return self.code_
@code.setter
def code(self, val):
self.code_ = val.lower() if val else None
@classmethod
def clear_caches(cls, domain):
from casexml.apps.phone.utils import clear_fixture_cache
from corehq.apps.products.fixtures import ALL_CACHE_PREFIXES
for prefix in ALL_CACHE_PREFIXES:
clear_fixture_cache(domain, prefix)
@classmethod
def by_domain(cls, domain, wrap=True, include_archived=False):
queryset = SQLProduct.objects.filter(domain=domain)
if not include_archived:
queryset = queryset.filter(is_archived=False)
return list(queryset.couch_products(wrapped=wrap))
@classmethod
def _export_attrs(cls):
return [
('name', str),
('unit', str),
'description',
'category',
('program_id', str),
('cost', lambda a: Decimal(a) if a else None),
]
def to_dict(self):
from corehq.apps.commtrack.util import encode_if_needed
product_dict = {}
product_dict['id'] = self._id
product_dict['product_id'] = self.code_
for attr in self._export_attrs():
real_attr = attr[0] if isinstance(attr, tuple) else attr
product_dict[real_attr] = encode_if_needed(
getattr(self, real_attr)
)
return product_dict
def custom_property_dict(self):
from corehq.apps.commtrack.util import encode_if_needed
property_dict = {}
for prop, val in self.product_data.items():
property_dict['data: ' + prop] = encode_if_needed(val)
return property_dict
def archive(self):
"""
Mark a product as archived. This will cause it (and its data)
to not show up in default Couch and SQL views.
"""
self.is_archived = True
self.save()
def unarchive(self):
"""
Unarchive a product, causing it (and its data) to show
up in Couch and SQL views again.
"""
if self.code:
if SQLProduct.objects.filter(domain=self.domain, code=self.code, is_archived=False).exists():
raise DuplicateProductCodeException()
self.is_archived = False
self.save()
@classmethod
def from_excel(cls, row, custom_data_validator):
if not row:
return None
id = row.get('id')
if id:
try:
p = cls.get(id)
except ResourceNotFound:
raise InvalidProductException(
_("Product with ID '{product_id}' could not be found!").format(product_id=id)
)
else:
p = cls()
p.code = str(row.get('product_id') or '')
for attr in cls._export_attrs():
key = attr[0] if isinstance(attr, tuple) else attr
if key in row:
val = row[key]
if val is None:
val = ''
if isinstance(attr, tuple):
val = attr[1](val)
setattr(p, key, val)
else:
break
if not p.code:
raise InvalidProductException(_('Product ID is a required field and cannot be blank!'))
if not p.name:
raise InvalidProductException(_('Product name is a required field and cannot be blank!'))
custom_data = row.get('data', {})
error = custom_data_validator(custom_data)
if error:
raise InvalidProductException(error)
p.product_data = custom_data
p.product_data.update(row.get('uncategorized_data', {}))
return p
class ProductQueriesMixin(object):
def product_ids(self):
return self.values_list('product_id', flat=True)
def couch_products(self, wrapped=True):
"""
Returns the couch products corresponding to this queryset.
"""
ids = self.product_ids()
products = iter_docs(Product.get_db(), ids)
if wrapped:
return map(Product.wrap, products)
return products
class ProductQuerySet(ProductQueriesMixin, models.query.QuerySet):
pass
class ProductManager(ProductQueriesMixin, models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
class OnlyActiveProductManager(ProductManager):
def get_queryset(self):
return super(OnlyActiveProductManager, self).get_queryset().filter(is_archived=False)
class SQLProduct(models.Model):
"""
A SQL based clone of couch Products.
This is used to efficiently filter LedgerValue and other
SQL based queries to exclude data for archived products.
"""
domain = models.CharField(max_length=255, db_index=True)
product_id = models.CharField(max_length=100, db_index=True, unique=True)
name = models.CharField(max_length=100, null=True)
is_archived = models.BooleanField(default=False)
code = models.CharField(max_length=100, default='', null=True)
description = models.TextField(null=True, default='')
category = models.CharField(max_length=100, null=True, default='')
program_id = models.CharField(max_length=100, null=True, default='')
cost = models.DecimalField(max_digits=20, decimal_places=5, null=True)
units = models.CharField(max_length=100, null=True, default='')
product_data = jsonfield.JSONField(
default=dict,
)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
objects = ProductManager()
active_objects = OnlyActiveProductManager()
def __str__(self):
return "{} ({})".format(self.name, self.domain)
def __repr__(self):
return "<SQLProduct(domain=%s, name=%s)>" % (
self.domain,
self.name
)
@classmethod
def by_domain(cls, domain):
return cls.objects.filter(domain=domain).all()
@property
def get_id(self):
return self.product_id
@property
def unit(self):
# For compatibility with Product
return self.units
class Meta(object):
app_label = 'products'
| bsd-3-clause | f54aa29cc9b5363206d7fe514ceeaabc | 29.014663 | 105 | 0.58681 | 4.127016 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/oauth_integrations/tests/test_util.py | 1 | 2776 | from datetime import datetime
from django.test import SimpleTestCase, TestCase
from django.contrib.auth.models import User
from google.oauth2.credentials import Credentials
from corehq.apps.oauth_integrations.models import GoogleApiToken
from corehq.apps.oauth_integrations.utils import get_token, load_credentials, stringify_credentials
class TestUtils(TestCase):
def setUp(self):
super().setUp()
self.user = User()
self.user.username = 'test@user.com'
self.user.save()
self.credentials = Credentials(
token="token",
refresh_token="refresh_token",
id_token="id_token",
token_uri="token_uri",
client_id="client_id",
client_secret="client_secret",
scopes="scopes",
expiry=datetime(2020, 1, 1)
)
def tearDown(self):
self.credentials = None
self.user.delete()
return super().tearDown()
def test_get_token_with_created_token(self):
GoogleApiToken.objects.create(
user=self.user,
token=stringify_credentials(self.credentials)
)
token = get_token(self.user)
self.assertIsNotNone(token)
self.tearDowntoken()
def test_get_token_without_token(self):
token = get_token(self.user)
self.assertIsNone(token)
def tearDowntoken(self):
objects = GoogleApiToken.objects.get(user=self.user)
objects.delete()
class TestCredentialsUtils(SimpleTestCase):
def setUp(self):
super().setUp()
self.credentials = Credentials(
token="token",
refresh_token="refresh_token",
id_token="id_token",
token_uri="token_uri",
client_id="client_id",
client_secret="client_secret",
scopes="scopes",
expiry=datetime(2020, 1, 1)
)
def tearDown(self):
self.credentials = None
return super().tearDown()
def test_stringify_credentials(self):
desired_credentials = ('{"token": "token", "refresh_token": "refresh_token", "id_token": "id_token", '
'"token_uri": "token_uri", "client_id": "client_id", "client_secret": "client_secret", '
'"scopes": "scopes", "expiry": "2020-01-01 00:00:00"}')
stringified_credentials = stringify_credentials(self.credentials)
self.assertEqual(desired_credentials, stringified_credentials)
def test_load_credentials(self):
desired_credentials = self.credentials
stringified_credentials = stringify_credentials(self.credentials)
loaded_credentials = load_credentials(stringified_credentials)
self.assertEqual(loaded_credentials.token, desired_credentials.token)
| bsd-3-clause | 83fe661adaa9b76ae15c42059b2c3706 | 30.191011 | 110 | 0.627882 | 4.118694 | false | true | false | false |
onepercentclub/bluebottle | bluebottle/collect/migrations/0006_auto_20210927_1047.py | 1 | 2243 | # Generated by Django 2.2.24 on 2021-09-27 08:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('geo', '0027_auto_20210927_1047'),
('collect', '0005_auto_20210922_1502'),
]
operations = [
migrations.AlterModelOptions(
name='collectactivity',
options={'permissions': (('api_read_collect', 'Can view collect activity through the API'), ('api_add_collect', 'Can add collect activity through the API'), ('api_change_collect', 'Can change collect activity through the API'), ('api_delete_collect', 'Can delete collect activity through the API'), ('api_read_own_collect', 'Can view own collect activity through the API'), ('api_add_own_collect', 'Can add own collect activity through the API'), ('api_change_own_collect', 'Can change own collect activity through the API'), ('api_delete_own_collect', 'Can delete own collect activity through the API')), 'verbose_name': 'Collect Activity', 'verbose_name_plural': 'Collect Activities'},
),
migrations.AlterModelOptions(
name='collectcontributor',
options={'permissions': (('api_read_collectcontributor', 'Can view collect contributor through the API'), ('api_add_collectcontributor', 'Can add collect contributor through the API'), ('api_change_collectcontributor', 'Can change collect contributor through the API'), ('api_delete_collectcontributor', 'Can delete collect contributor through the API'), ('api_read_own_collectcontributor', 'Can view own collect contributor through the API'), ('api_add_own_collectcontributor', 'Can add own collect contributor through the API'), ('api_change_own_collectcontributor', 'Can change own collect contributor through the API'), ('api_delete_own_collectcontributor', 'Can delete own collect contributor through the API')), 'verbose_name': 'Collect contributor', 'verbose_name_plural': 'Collect contributors'},
),
migrations.AddField(
model_name='collectactivity',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='geo.Geolocation'),
),
]
| bsd-3-clause | 037f777d211c82bc1d3c8ae95bf8d83d | 79.107143 | 819 | 0.700401 | 4.296935 | false | false | false | false |
dimagi/commcare-hq | corehq/ex-submodules/dimagi/utils/couch/debugdb/debugdatabase.py | 1 | 7563 | __author__ = 'dmyung'
from datetime import datetime
from couchdbkit import Database
from dimagi.utils.couch.debugdb import tidy_stacktrace, SQL_WARNING_THRESHOLD, process_key, ms_from_timedelta
#taken from the django debug toolbar sql panel
import traceback
import couchdbkit
from couchdbkit import resource, ResourceNotFound
from couchdbkit.client import ViewResults
class DebugDatabase(Database):
_queries = []
def debug_open_doc(self, docid, **params):
"""Get document from database
Args:
@param docid: str, document id to retrieve
@param wrapper: callable. function that takes dict as a param.
Used to wrap an object.
@param **params: See doc api for parameters to use:
http://wiki.apache.org/couchdb/HTTP_Document_API
@return: dict, representation of CouchDB document as
a dict.
"""
start = datetime.utcnow()
############################
#Start Database.open_doc
wrapper = None
if "wrapper" in params:
wrapper = params.pop("wrapper")
elif "schema" in params:
schema = params.pop("schema")
if not hasattr(schema, "wrap"):
raise TypeError("invalid schema")
wrapper = schema.wrap
docid = resource.escape_docid(docid)
error = None
try:
doc = self.res.get(docid, **params).json_body
except ResourceNotFound as ex:
error = ex
doc = {}
#############################
#############################
#Debug Panel data collection
stop = datetime.utcnow()
duration = ms_from_timedelta(stop - start)
stacktrace = tidy_stacktrace(traceback.extract_stack())
if wrapper is not None:
view_path_display = "GET %s" % wrapper.__self__._doc_type
else:
view_path_display = "Raw GET"
q = {
'view_path': view_path_display,
'duration': duration,
'params': params,
'stacktrace': stacktrace,
'start_time': start,
'stop_time': stop,
'is_slow': (duration > SQL_WARNING_THRESHOLD),
'total_rows': 1,
'response': 200 if error is None else 404,
'doc_type': doc.get('doc_type', '[unknown]'),
'doc_id': docid,
}
self._queries.append(q)
#end debug panel data collection
################################
##################################
#Resume original Database.open_doc
if error is not None:
raise error
if wrapper is not None:
if not callable(wrapper):
raise TypeError("wrapper isn't a callable")
return wrapper(doc)
return doc
get = debug_open_doc
couchdbkit.client.Database = DebugDatabase
class DebugViewResults64(ViewResults):
_queries = []
def debug_fetch(self):
""" Overrided
fetch results and cache them
"""
# reset dynamic keys
for key in self._dynamic_keys:
try:
delattr(self, key)
except:
pass
self._dynamic_keys = []
self._result_cache = self.fetch_raw().json_body
self._total_rows = self._result_cache.get('total_rows')
self._offset = self._result_cache.get('offset', 0)
# add key in view results that could be added by an external
# like couchdb-lucene
for key in self._result_cache:
if key not in ["total_rows", "offset", "rows"]:
self._dynamic_keys.append(key)
setattr(self, key, self._result_cache[key])
def _debug_fetch_if_needed(self):
view_args = self._arg.split('/')
if len(view_args) == 4:
design_doc = view_args[1]
view_name = view_args[3]
self.debug_view = '%s/%s' % (design_doc, view_name)
else:
self.debug_view = view_args[0]
start = datetime.utcnow()
if not self._result_cache:
result_cached = False
self.debug_fetch()
else:
result_cached = True
stop = datetime.utcnow()
duration = ms_from_timedelta(stop - start)
stacktrace = tidy_stacktrace(traceback.extract_stack())
self._queries.append({
'view_path': self.debug_view,
'duration': duration,
'params': self.params,
'stacktrace': stacktrace,
'start_time': start,
'stop_time': stop,
'is_slow': (duration > SQL_WARNING_THRESHOLD),
'total_rows': len(self._result_cache.get('rows', [])),
'offset': self._result_cache.get('offset', 0),
'rows': self._result_cache.get('total_rows', 0),
'result_cached': result_cached,
'include_docs': self.params.get('include_docs', False)
})
_fetch_if_needed = _debug_fetch_if_needed
class DebugViewResults57(ViewResults):
_queries = []
def debug_fetch(self):
""" Overrided
fetch results and cache them
"""
# reset dynamic keys
for key in self._dynamic_keys:
try:
delattr(self, key)
except:
pass
self._dynamic_keys = []
self._result_cache = self.fetch_raw().json_body
self._total_rows = self._result_cache.get('total_rows')
self._offset = self._result_cache.get('offset', 0)
# add key in view results that could be added by an external
# like couchdb-lucene
for key in self._result_cache:
if key not in ["total_rows", "offset", "rows"]:
self._dynamic_keys.append(key)
setattr(self, key, self._result_cache[key])
def _debug_fetch_if_needed(self):
start = datetime.utcnow()
if not self._result_cache:
self.debug_fetch()
stop = datetime.utcnow()
duration = ms_from_timedelta(stop - start)
stacktrace = tidy_stacktrace(traceback.extract_stack())
view_path_arr = self.view.view_path.split('/')
if len(view_path_arr) == 4:
view_path_display = '%s/%s' % (view_path_arr[1], view_path_arr[3])
else:
view_path_display = view_path_arr[0] # _all_docs
if not self._result_cache:
result_cached = False
self.debug_fetch()
else:
result_cached = True
self._queries.append({
'view_path': view_path_display,
'duration': duration,
'params': self.params,
'stacktrace': stacktrace,
'start_time': start,
'stop_time': stop,
'is_slow': (duration > SQL_WARNING_THRESHOLD),
'total_rows': len(self._result_cache.get('rows', [])),
'offset': self._result_cache.get('offset', 0),
'rows': self._result_cache.get('total_rows', 0),
'result_cached': result_cached,
'include_docs': self.params.get('include_docs', False)
})
_fetch_if_needed = _debug_fetch_if_needed
if couchdbkit.version_info < (0, 6, 0):
DebugViewResults = DebugViewResults57
couchdbkit.client.ViewResults = DebugViewResults57
else:
DebugViewResults = DebugViewResults64
couchdbkit.client.ViewResults = DebugViewResults
| bsd-3-clause | ae9bf09a250aa8675db22633265df4d8 | 31.182979 | 109 | 0.538146 | 4.169239 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/linked_domain/migrations/0013_auto_20201005_2215.py | 1 | 1435 | # Generated by Django 2.2.16 on 2020-10-05 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('linked_domain', '0012_auto_20200929_0809'),
]
operations = [
migrations.AlterField(
model_name='domainlinkhistory',
name='model',
field=models.CharField(choices=[('app', 'Application'),
('custom_user_data', 'Custom User Data Fields'),
('custom_product_data', 'Custom Product Data Fields'),
('custom_location_data', 'Custom Location Data Fields'),
('roles', 'User Roles'), ('toggles', 'Feature Flags and Previews'),
('fixture', 'Lookup Table'),
('case_search_data', 'Case Search Settings'), ('report', 'Report'),
('data_dictionary', 'Data Dictionary'),
('dialer_settings', 'Dialer Settings'),
('otp_settings', 'OTP Pass-through Settings'),
('hmac_callout_settings', 'Signed Callout'), ('keyword', 'Keyword')],
max_length=128),
),
]
| bsd-3-clause | a083aed09a6404629dec3f5b930270b6 | 48.482759 | 113 | 0.432753 | 5.583658 | false | false | false | false |
dimagi/commcare-hq | corehq/apps/users/urls.py | 1 | 10514 | from django.conf.urls import include, re_path as url
from corehq.apps.domain.utils import grandfathered_domain_re
from corehq.apps.reports.dispatcher import UserManagementReportDispatcher
from .views import (
DefaultProjectUserSettingsView,
EditWebUserView,
EnterpriseUsersView,
InviteWebUserView,
UploadWebUsers,
WebUserUploadStatusView,
ListRolesView,
ListWebUsersView,
add_domain_membership,
change_password,
delete_phone_number,
delete_request,
check_sso_trust,
delete_user_role,
domain_accounts,
make_phone_number_default,
paginate_enterprise_users,
paginate_web_users,
post_user_role,
register_fcm_device_token,
remove_web_user,
test_httpdigest,
undo_remove_web_user,
verify_phone_number,
download_web_users,
DownloadWebUsersStatusView,
WebUserUploadJobPollView,
)
from .views.web import (
accept_invitation,
delete_invitation,
DomainRequestView,
reinvite_web_user,
)
from .views.mobile.custom_data_fields import UserFieldsView
from .views.mobile.groups import (
BulkSMSVerificationView,
EditGroupMembersView,
GroupsListView,
)
from .views.mobile.users import (
CommCareUserConfirmAccountBySMSView,
CommCareUsersLookup,
ConfirmBillingAccountForExtraUsersView,
ConfirmTurnOffDemoModeView,
CreateCommCareUserModal,
DemoRestoreStatusView,
DeleteCommCareUsers,
DownloadUsersStatusView,
EditCommCareUserView,
FilteredCommCareUserDownload,
FilteredWebUserDownload,
MobileWorkerListView,
UploadCommCareUsers,
UserUploadStatusView,
activate_commcare_user,
count_commcare_users,
count_web_users,
deactivate_commcare_user,
delete_commcare_user,
demo_restore_job_poll,
download_commcare_users,
force_user_412,
paginate_mobile_workers,
reset_demo_user_restore,
restore_commcare_user,
toggle_demo_mode,
update_user_groups,
user_download_job_poll,
CommCareUserConfirmAccountView,
send_confirmation_email,
send_confirmation_sms,
CommcareUserUploadJobPollView)
from ..hqwebapp.decorators import waf_allow
user_management_urls = [
UserManagementReportDispatcher.url_pattern(),
]
urlpatterns = [
url(r'^$', DefaultProjectUserSettingsView.as_view(), name=DefaultProjectUserSettingsView.urlname),
url(r'^change_password/(?P<login_id>[ \w-]+)/$', change_password, name="change_password"),
url(r'^domain_accounts/(?P<couch_user_id>[ \w-]+)/$', domain_accounts, name='domain_accounts'),
url(r'^delete_phone_number/(?P<couch_user_id>[ \w-]+)/$', delete_phone_number, name='delete_phone_number'),
url(
r'^make_phone_number_default/(?P<couch_user_id>[ \w-]+)/$',
make_phone_number_default,
name='make_phone_number_default'
),
url(r'^verify_phone_number/(?P<couch_user_id>[ \w-]+)/$', verify_phone_number, name='verify_phone_number'),
url(
r'^add_domain_membership/(?P<couch_user_id>[ \w-]+)/(?P<domain_name>%s)/$' % grandfathered_domain_re,
add_domain_membership,
name='add_domain_membership'
),
url(r'^web/account/(?P<couch_user_id>[ \w-]+)/$', EditWebUserView.as_view(), name=EditWebUserView.urlname),
url(r'^web/remove/(?P<couch_user_id>[ \w-]+)/$', remove_web_user, name='remove_web_user'),
url(r'^web/undo_remove/(?P<record_id>[ \w-]+)/$', undo_remove_web_user, name='undo_remove_web_user'),
url(r'^web/invite/$', InviteWebUserView.as_view(), name=InviteWebUserView.urlname),
url(r'^web/reinvite/$', reinvite_web_user, name='reinvite_web_user'),
url(r'^web/request/$', DomainRequestView.as_view(), name=DomainRequestView.urlname),
url(r'^web/delete_invitation/$', delete_invitation, name='delete_invitation'),
url(r'^web/delete_request/$', delete_request, name='delete_request'),
url(r'^web/check_sso_trust/$', check_sso_trust, name='check_sso_trust'),
url(r'^web/$', ListWebUsersView.as_view(), name=ListWebUsersView.urlname),
url(r'^web/json/$', paginate_web_users, name='paginate_web_users'),
url(r'^web/download/$', download_web_users, name='download_web_users'),
url(
r'^web/download/status/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
DownloadWebUsersStatusView.as_view(),
name='download_web_users_status'
),
url(r'^web/filter_and_download/$', FilteredWebUserDownload.as_view(), name=FilteredWebUserDownload.urlname),
url(r'^web/count_users/$', count_web_users, name='count_web_users'),
url(r'^web/upload/$', waf_allow('XSS_BODY')(UploadWebUsers.as_view()), name=UploadWebUsers.urlname),
url(
r'^web/upload/status/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
WebUserUploadStatusView.as_view(),
name=WebUserUploadStatusView.urlname
),
url(
r'^web/upload/poll/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
WebUserUploadJobPollView.as_view(),
name=WebUserUploadJobPollView.urlname
),
url(r'^enterprise/$', EnterpriseUsersView.as_view(), name=EnterpriseUsersView.urlname),
url(r'^enterprise/json/$', paginate_enterprise_users, name='paginate_enterprise_users'),
url(r'^join/(?P<uuid>[ \w-]+)/$', accept_invitation, name='domain_accept_invitation'),
url(r'^roles/$', ListRolesView.as_view(), name=ListRolesView.urlname),
url(r'^roles/save/$', post_user_role, name='post_user_role'),
url(r'^roles/delete/$', delete_user_role, name='delete_user_role'),
url(
r'^register_fcm_device_token/(?P<couch_user_id>[ \w-]+)/(?P<device_token>[ \w-]+)/$',
register_fcm_device_token,
name='register_fcm_device_token'
),
url(r'^httpdigest/?$', test_httpdigest, name='test_httpdigest'),
] + [
url(r'^commcare/$', MobileWorkerListView.as_view(), name=MobileWorkerListView.urlname),
url(r'^commcare/json/$', paginate_mobile_workers, name='paginate_mobile_workers'),
url(r'^commcare/fields/$', waf_allow('XSS_BODY')(UserFieldsView.as_view()), name=UserFieldsView.urlname),
url(
r'^commcare/account/(?P<couch_user_id>[ \w-]+)/$',
EditCommCareUserView.as_view(),
name=EditCommCareUserView.urlname
),
url(r'^commcare/account/(?P<couch_user_id>[ \w-]+)/groups/$', update_user_groups, name='update_user_groups'),
url(r'^commcare/activate/(?P<user_id>[ \w-]+)/$', activate_commcare_user, name='activate_commcare_user'),
url(r'^commcare/deactivate/(?P<user_id>[ \w-]+)/$', deactivate_commcare_user, name='deactivate_commcare_user'),
url(
r'^commcare/send_confirmation_email/(?P<user_id>[ \w-]+)/$',
send_confirmation_email,
name='send_confirmation_email'
),
url(r'^commcare/delete/(?P<user_id>[ \w-]+)/$', delete_commcare_user, name='delete_commcare_user'),
url(r'^commcare/force_412/(?P<user_id>[ \w-]+)/$', force_user_412, name='force_user_412'),
url(r'^commcare/restore/(?P<user_id>[ \w-]+)/$', restore_commcare_user, name='restore_commcare_user'),
url(r'^commcare/toggle_demo_mode/(?P<user_id>[ \w-]+)/$', toggle_demo_mode, name='toggle_demo_mode'),
url(
r'^commcare/confirm_turn_off_demo_mode/(?P<couch_user_id>[ \w-]+)/$',
ConfirmTurnOffDemoModeView.as_view(),
name=ConfirmTurnOffDemoModeView.urlname
),
url(r'^commcare/delete/$', DeleteCommCareUsers.as_view(), name=DeleteCommCareUsers.urlname),
url(r'^commcare/lookup/$', CommCareUsersLookup.as_view(), name=CommCareUsersLookup.urlname),
url(
r'^commcare/reset_demo_user_restore/(?P<user_id>[ \w-]+)/$',
reset_demo_user_restore,
name='reset_demo_user_restore'
),
url(
r'^commcare/demo_restore/status/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/(?P<user_id>[ \w-]+)/$',
DemoRestoreStatusView.as_view(),
name=DemoRestoreStatusView.urlname
),
url(
r'^commcare/demo_restore/poll/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
demo_restore_job_poll,
name='demo_restore_job_poll'
),
url(
r'^commcare/upload/$',
waf_allow('XSS_BODY')(UploadCommCareUsers.as_view()),
name=UploadCommCareUsers.urlname
),
url(
r'^commcare/upload/status/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
UserUploadStatusView.as_view(),
name=UserUploadStatusView.urlname
),
url(
r'^commcare/upload/poll/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
CommcareUserUploadJobPollView.as_view(),
name=CommcareUserUploadJobPollView.urlname
),
url(r'^commcare/download/$', download_commcare_users, name='download_commcare_users'),
url(
r'^commcare/filter_and_download/$',
FilteredCommCareUserDownload.as_view(),
name=FilteredCommCareUserDownload.urlname
),
url(r'^commcare/count_users/$', count_commcare_users, name='count_commcare_users'),
url(
r'^commcare/download/status/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
DownloadUsersStatusView.as_view(),
name=DownloadUsersStatusView.urlname
),
url(
r'^commcare/download/poll/(?P<download_id>(?:dl-)?[0-9a-fA-Z]{25,32})/$',
user_download_job_poll,
name='user_download_job_poll'
),
url(
r'^commcare/new_mobile_worker_modal/$',
CreateCommCareUserModal.as_view(),
name=CreateCommCareUserModal.urlname
),
url(
r'^commcare/confirm_charges/$',
ConfirmBillingAccountForExtraUsersView.as_view(),
name=ConfirmBillingAccountForExtraUsersView.urlname
),
url(
r'^commcare/confirm_account/(?P<user_id>[\w-]+)/$',
CommCareUserConfirmAccountView.as_view(),
name=CommCareUserConfirmAccountView.urlname
),
url(
r'^commcare/send_confirmation_sms/(?P<user_id>[ \w-]+)/$',
send_confirmation_sms,
name='send_confirmation_sms'
),
url(
r'^commcare/confirm_account_sms/(?P<user_invite_hash>[\S-]+)/$',
CommCareUserConfirmAccountBySMSView.as_view(),
name=CommCareUserConfirmAccountBySMSView.urlname
),
] + [
url(r'^groups/$', GroupsListView.as_view(), name=GroupsListView.urlname),
url(r'^groups/(?P<group_id>[ \w-]+)/$', EditGroupMembersView.as_view(), name=EditGroupMembersView.urlname),
url(
r'^groups/sms_verification/(?P<group_id>[ \w-]+)$',
BulkSMSVerificationView.as_view(),
name=BulkSMSVerificationView.urlname
),
] + [
url(r'^reports/', include(user_management_urls)),
]
| bsd-3-clause | 5c02ea13832b37fe262389f205722d19 | 40.070313 | 115 | 0.653985 | 3.208422 | false | false | true | false |
strands-project/mongodb_store | mongodb_store/tests/test_replication.py | 1 | 4431 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: furushchev <furushchev@jsk.imi.i.u-tokyo.ac.jp>
import os
from bson import json_util
import pymongo
import rospy
import subprocess
import unittest
from mongodb_store.util import import_MongoClient, wait_for_mongo
from mongodb_store.message_store import MessageStoreProxy
from geometry_msgs.msg import Wrench, Pose
def get_script_path():
test_dir = os.path.dirname(os.path.realpath(__file__))
pkg_dir = os.path.dirname(test_dir)
return os.path.join(pkg_dir, "scripts", "replicator_client.py")
class TestReplication(unittest.TestCase):
def test_replication(self):
replication_db = "replication_test"
replication_col = "replication_test"
# connect to destination for replication
try:
self.assertTrue(wait_for_mongo(ns="/datacentre2"), "wait for mongodb server")
dst_client = import_MongoClient()("localhost", 49163)
count = dst_client[replication_db][replication_col].count()
self.assertEqual(count, 0, "No entry in destination")
except pymongo.errors.ConnectionFailure:
self.fail("Failed to connect to destination for replication")
# insert an entry to move
self.assertTrue(wait_for_mongo(), "wait for mongodb server")
msg_store = MessageStoreProxy(
database=replication_db, collection=replication_col)
msg = Wrench()
msg_name = "replication test message"
self.assertIsNotNone(msg_store.insert_named(msg_name, msg), "inserted message")
# move entries
rospy.sleep(3)
retcode = subprocess.check_call([
get_script_path(),
'--move-before', '0',
replication_db, replication_col])
self.assertEqual(retcode, 0, "replicator_client returns code 0")
# check if replication was succeeded
rospy.sleep(3)
count = dst_client[replication_db][replication_col].count()
self.assertGreater(count, 0, "entry moved to the destination")
# test deletion after move
data, meta = msg_store.query_named(msg_name, Wrench._type)
self.assertIsNotNone(data, "entry is still in source")
retcode = subprocess.check_call([
get_script_path(),
'--move-before', '0',
'--delete-after-move',
replication_db, replication_col])
self.assertEqual(retcode, 0, "replicator_client returns code 0")
data, meta = msg_store.query_named("replication test", Wrench._type)
self.assertIsNone(data, "moved entry is deleted from source")
def test_replication_with_query(self):
replication_db = "replication_test_with_query"
replication_col = "replication_test_with_query"
# connect to destination for replication
try:
self.assertTrue(wait_for_mongo(ns="/datacentre2"), "wait for mongodb server")
dst_client = import_MongoClient()("localhost", 49163)
count = dst_client[replication_db][replication_col].count()
self.assertEqual(count, 0, "No entry in destination")
except pymongo.errors.ConnectionFailure:
self.fail("Failed to connect to destination for replication")
# insert an entry to move
self.assertTrue(wait_for_mongo(), "wait for mongodb server")
msg_store = MessageStoreProxy(
database=replication_db, collection=replication_col)
for i in range(5):
msg = Wrench()
msg.force.x = i
msg_store.insert(msg)
msg = Pose()
msg.position.x = i
msg_store.insert(msg)
# move entries with query
rospy.sleep(3)
query = {'_meta.stored_type': Pose._type}
retcode = subprocess.check_call([
get_script_path(),
'--move-before', '0',
'--query', json_util.dumps(query),
replication_db, replication_col])
self.assertEqual(retcode, 0, "replicator_client returns code 0")
# check if replication was succeeded
rospy.sleep(3)
count = dst_client[replication_db][replication_col].count()
self.assertEqual(count, 5, "replicated entry exists in destination")
if __name__ == '__main__':
import rostest
rospy.init_node("test_replication")
rostest.rosrun("mongodb_store", "test_replication", TestReplication)
| bsd-3-clause | 71c52ba685108788870f1d3fe2f5a4ed | 38.918919 | 89 | 0.633717 | 4.050274 | false | true | false | false |
strands-project/mongodb_store | mongodb_store/src/mongodb_store/message_store.py | 1 | 12718 | from __future__ import absolute_import
import rospy
import mongodb_store_msgs.srv as dc_srv
import mongodb_store.util as dc_util
from mongodb_store_msgs.msg import StringPair, StringPairList, SerialisedMessage, Insert
from bson import json_util
from bson.objectid import ObjectId
import json
import copy
class MessageStoreProxy:
"""
A class that provides functions for storage and retrieval of ROS Message
objects in the mongodb_store. This is achieved by acting as a proxy to the
services provided by the MessageStore ROS node, and therefore requires the message
store node to be running in addition to the datacentre:
`rosrun mongodb_store message_store_node.py`
>>> from geometry_msgs.msg import Pose, Quaternion
>>> msg_store = MessageStoreProxy()
>>> p = Pose(Point(0, 1, 2), Quaternion(0, 0, 0 , 1))
>>> msg_store.insert_named("my favourite pose", p)
>>> retrieved = msg_store.query_named("my favourite pose", Pose._type)
For usage examples, please see `example_message_store_client.py` within the scripts
folder of mongodb_store.
"""
def __init__(self, service_prefix='/message_store', database='message_store', collection='message_store', queue_size=100):
"""
Args:
| service_prefix (str): The prefix to the *insert*, *update*, *delete* and
*query_messages* ROS services/
| database (str): The MongoDB database that this object works with.
| collection (str): The MongoDB collect/on that this object works with.
"""
self.database = database
self.collection = collection
insert_service = service_prefix + '/insert'
update_service = service_prefix + '/update'
delete_service = service_prefix + '/delete'
query_service = service_prefix + '/query_messages'
# try and get the mongo service, block until available
found_services_first_try = True # if found straight away
while not rospy.is_shutdown():
try:
rospy.wait_for_service(insert_service,5)
rospy.wait_for_service(update_service,5)
rospy.wait_for_service(query_service,5)
rospy.wait_for_service(delete_service,5)
break
except rospy.ROSException as e:
found_services_first_try = False
rospy.logerr("Could not get message store services. Maybe the message "
"store has not been started? Retrying..")
if not found_services_first_try:
rospy.loginfo("Message store services found.")
self.insert_srv = rospy.ServiceProxy(insert_service, dc_srv.MongoInsertMsg)
self.update_srv = rospy.ServiceProxy(update_service, dc_srv.MongoUpdateMsg)
self.query_srv = rospy.ServiceProxy(query_service, dc_srv.MongoQueryMsg)
self.delete_srv = rospy.ServiceProxy(delete_service, dc_srv.MongoDeleteMsg)
insert_topic = service_prefix + '/insert'
self.pub_insert = rospy.Publisher(insert_topic, Insert, queue_size=queue_size)
def insert_named(self, name, message, meta = {}, wait=True):
"""
Inserts a ROS message into the message storage, giving it a name for convenient
later retrieval.
.. note:: Multiple messages can be stored with the same name.
:Args:
| name (str): The name to refere to this message as.
| message (ROS Message): An instance of a ROS message type to store
| meta (dict): A dictionary of additional meta data to store in association
with thie message.
| wait (bool): If true, waits until database returns object id after insert
:Returns:
| (str) the ObjectId of the MongoDB document containing the stored message.
"""
# create a copy as we're modifying it
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.insert(message, meta_copy, wait=wait)
def insert(self, message, meta = {}, wait=True):
"""
Inserts a ROS message into the message storage.
:Args:
| message (ROS Message): An instance of a ROS message type to store
| meta (dict): A dictionary of additional meta data to store in association
with thie message.
| wait (bool): If true, waits until database returns object id after insert
:Returns:
| (str) the ObjectId of the MongoDB document containing the stored message.
"""
# assume meta is a dict, convert k/v to tuple pairs
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta, default=json_util.default)),)
serialised_msg = dc_util.serialise_message(message)
if wait:
return self.insert_srv(self.database, self.collection, serialised_msg, StringPairList(meta_tuple)).id
else:
msg = Insert(self.database, self.collection, serialised_msg, StringPairList(meta_tuple))
self.pub_insert.publish(msg)
return True
def query_id(self, id, type):
"""
Finds and returns the message with the given ID.
:Parameters:
| id (str): The ObjectID of the MongoDB document holding the message.
| type (str): The ROS message type of the stored messsage to retrieve.
:Returns:
| message (ROS message), meta (dict): The retrieved message and associated metadata
or *None* if the named message could not be found.
"""
return self.query(type, {'_id': ObjectId(id)}, {}, True)
def delete(self, message_id):
"""
Delete the message with the given ID.
:Parameters:
| message_id (str) : The ObjectID of the MongoDB document holding the message.
:Returns:
| bool : was the object successfully deleted.
"""
return self.delete_srv(self.database, self.collection, message_id)
def query_named(self, name, type, single = True, meta = {}, limit = 0):
"""
Finds and returns the message(s) with the given name.
:Args:
| name (str): The name of the stored messages to retrieve.
| type (str): The type of the stored message.
| single (bool): Should only one message be returned?
| meta (dict): Extra queries on the meta data of the message.
| limit (int): Limit number of return documents
:Return:
| message (ROS message), meta (dict): The retrieved message and associated metadata
or *None* if the named message could not be found.
"""
# create a copy as we're modifying it
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.query(type, {}, meta_copy, single, [], limit)
def update_named(self, name, message, meta = {}, upsert = False):
"""
Updates a named message.
:Args:
| name (str): The name of the stored messages to update.
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
meta_query = {}
meta_query["name"] = name
# make sure the name goes into the meta info after update
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.update(message, meta_copy, {}, meta_query, upsert)
def update_id(self, id, message, meta = {}, upsert = False):
"""
Updates a message by MongoDB ObjectId.
:Args:
| id (str): The MongoDB ObjectId of the doucment storing the message.
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
msg_query = {'_id': ObjectId(id)}
meta_query = {}
return self.update(message, meta, msg_query, meta_query, upsert)
def update(self, message, meta = {}, message_query = {}, meta_query = {}, upsert = False):
"""
Updates a message.
:Args:
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| message_query (dict): A query to match the ROS message that is to be updated.
| meta_query (dict): A query to match against the meta data of the message to be updated
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
# serialise the json queries to strings using json_util.dumps
message_query_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(message_query, default=json_util.default)),)
meta_query_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta_query, default=json_util.default)),)
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta, default=json_util.default)),)
return self.update_srv(self.database, self.collection, upsert, StringPairList(message_query_tuple), StringPairList(meta_query_tuple), dc_util.serialise_message(message), StringPairList(meta_tuple))
"""
Returns [message, meta] where message is the queried message and meta a dictionary of meta information. If single is false returns a list of these lists.
"""
def query(self, type, message_query = {}, meta_query = {}, single = False, sort_query = [], projection_query = {}, limit=0):
"""
Finds and returns message(s) matching the message and meta data queries.
:Parameters:
| type (str): The ROS message type of the stored messsage to retrieve.
| message_query (dict): A query to match the actual ROS message
| meta_query (dict): A query to match against the meta data of the message
| sort_query (list of tuple): A query to request sorted list to mongodb module
| projection_query (dict): A query to request desired fields to be returned or excluded
| single (bool): Should only one message be returned?
| limit (int): Limit number of return documents
:Returns:
| [message, meta] where message is the queried message and meta a dictionary of
meta information. If single is false returns a list of these lists.
"""
# assume meta is a dict, convert k/v to tuple pairs for ROS msg type
# serialise the json queries to strings using json_util.dumps
message_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(message_query, default=json_util.default)),)
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta_query, default=json_util.default)),)
projection_tuple =(StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(projection_query, default=json_util.default)),)
if len(sort_query) > 0:
sort_tuple = [StringPair(str(k), str(v)) for k, v in sort_query]
else:
sort_tuple = []
response = self.query_srv(
self.database, self.collection, type, single, limit,
StringPairList(message_tuple),
StringPairList(meta_tuple),
StringPairList(sort_tuple),
StringPairList(projection_tuple))
if response.messages is None:
messages = []
metas = []
else:
messages = map(dc_util.deserialise_message, response.messages)
metas = map(dc_util.string_pair_list_to_dictionary, response.metas)
if single:
if len(messages) > 0:
return [messages[0], metas[0]]
else:
return [None, None]
else:
return zip(messages,metas)
| bsd-3-clause | cac8749c759f779fd3fcf8134619501d | 45.586081 | 205 | 0.609844 | 4.267785 | false | false | false | false |
uwescience/myria-web | appengine/networkx/readwrite/json_graph/adjacency.py | 14 | 2989 | # Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from copy import deepcopy
from itertools import count,repeat
import json
import networkx as nx
__author__ = """Aric Hagberg (hagberg@lanl.gov))"""
__all__ = ['adjacency_data', 'adjacency_graph']
def adjacency_data(G):
"""Return data in adjacency format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : NetworkX graph
Returns
-------
data : dict
A dictionary with node-link formatted data.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.Graph([(1,2)])
>>> data = json_graph.adjacency_data(G)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Graph, node, and link attributes are stored in this format but keys
for attributes must be strings if you want to serialize with JSON.
See Also
--------
adjacency_graph, node_link_data, tree_data
"""
data = {}
data['directed'] = G.is_directed()
data['multigraph'] = G.is_multigraph()
data['graph'] = list(G.graph.items())
data['nodes'] = []
data['adjacency'] = []
for n,nbrdict in G.adjacency_iter():
data['nodes'].append(dict(id=n, **G.node[n]))
adj = []
for nbr,d in nbrdict.items():
adj.append(dict(id=nbr, **d))
data['adjacency'].append(adj)
return data
def adjacency_graph(data, directed=False, multigraph=True):
"""Return graph from adjacency data format.
Parameters
----------
data : dict
Adjacency list formatted graph data
Returns
-------
G : NetworkX graph
A NetworkX graph object
directed : bool
If True, and direction not specified in data, return a directed graph.
multigraph : bool
If True, and multigraph not specified in data, return a multigraph.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.Graph([(1,2)])
>>> data = json_graph.adjacency_data(G)
>>> H = json_graph.adjacency_graph(data)
See Also
--------
adjacency_graph, node_link_data, tree_data
"""
multigraph = data.get('multigraph',multigraph)
directed = data.get('directed',directed)
if multigraph:
graph = nx.MultiGraph()
else:
graph = nx.Graph()
if directed:
graph = graph.to_directed()
graph.graph = dict(data.get('graph',[]))
mapping=[]
for d in data['nodes']:
node = d.pop('id')
mapping.append(node)
graph.add_node(node, attr_dict=d)
for i,d in enumerate(data['adjacency']):
source = mapping[i]
for tdata in d:
target=tdata.pop('id')
graph.add_edge(source,target,attr_dict=tdata)
return graph
| bsd-3-clause | f0dcd155f8fdb25a79cc71a0f069d14e | 25.927928 | 78 | 0.590498 | 3.793147 | false | false | false | false |
uwescience/myria-web | appengine/networkx/algorithms/isomorphism/matchhelpers.py | 35 | 12220 | """Functions which help end users define customize node_match and
edge_match functions to use during isomorphism checks.
"""
from itertools import permutations
import types
import networkx as nx
__all__ = ['categorical_node_match',
'categorical_edge_match',
'categorical_multiedge_match',
'numerical_node_match',
'numerical_edge_match',
'numerical_multiedge_match',
'generic_node_match',
'generic_edge_match',
'generic_multiedge_match',
]
def copyfunc(f, name=None):
"""Returns a deepcopy of a function."""
try:
return types.FunctionType(f.func_code, f.func_globals, name or f.name,
f.func_defaults, f.func_closure)
except AttributeError:
return types.FunctionType(f.__code__, f.__globals__, name or f.name,
f.__defaults__, f.__closure__)
def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close, elementwise.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
for xi, yi in zip(x,y):
if not ( abs(xi-yi) <= atol + rtol * abs(yi) ):
return False
return True
def close(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
return abs(x-y) <= atol + rtol * abs(y)
categorical_doc = """
Returns a comparison function for a categorical node attribute.
The value(s) of the attr(s) must be hashable and comparable via the ==
operator since they are placed into a set([]) object. If the sets from
G1 and G2 are the same, then the constructed function returns True.
Parameters
----------
attr : string | list
The categorical node attribute to compare, or a list of categorical
node attributes to compare.
default : value | list
The default value for the categorical node attribute, or a list of
default values for the categorical node attributes.
Returns
-------
match : function
The customized, categorical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.categorical_node_match('size', 1)
>>> nm = iso.categorical_node_match(['color', 'size'], ['red', 2])
"""
def categorical_node_match(attr, default):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return data1.get(attr, default) == data2.get(attr, default)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
values1 = set([data1.get(attr, d) for attr, d in attrs])
values2 = set([data2.get(attr, d) for attr, d in attrs])
return values1 == values2
return match
categorical_edge_match = copyfunc(categorical_node_match, 'categorical_edge_match')
def categorical_multiedge_match(attr, default):
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = set([data.get(attr, default) for data in datasets1.values()])
values2 = set([data.get(attr, default) for data in datasets2.values()])
return values1 == values2
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = set([])
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.add(x)
values2 = set([])
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.add(x)
return values1 == values2
return match
# Docstrings for categorical functions.
categorical_node_match.__doc__ = categorical_doc
categorical_edge_match.__doc__ = categorical_doc.replace('node', 'edge')
tmpdoc = categorical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('categorical_edge_match', 'categorical_multiedge_match')
categorical_multiedge_match.__doc__ = tmpdoc
numerical_doc = """
Returns a comparison function for a numerical node attribute.
The value(s) of the attr(s) must be numerical and sortable. If the
sorted list of values from G1 and G2 are the same within some
tolerance, then the constructed function returns True.
Parameters
----------
attr : string | list
The numerical node attribute to compare, or a list of numerical
node attributes to compare.
default : value | list
The default value for the numerical node attribute, or a list of
default values for the numerical node attributes.
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
Returns
-------
match : function
The customized, numerical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.numerical_node_match('weight', 1.0)
>>> nm = iso.numerical_node_match(['weight', 'linewidth'], [.25, .5])
"""
def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return close(data1.get(attr, default),
data2.get(attr, default),
rtol=rtol, atol=atol)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
values1 = [data1.get(attr, d) for attr, d in attrs]
values2 = [data2.get(attr, d) for attr, d in attrs]
return allclose(values1, values2, rtol=rtol, atol=atol)
return match
numerical_edge_match = copyfunc(numerical_node_match, 'numerical_edge_match')
def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = sorted([data.get(attr, default) for data in datasets1.values()])
values2 = sorted([data.get(attr, default) for data in datasets2.values()])
return allclose(values1, values2, rtol=rtol, atol=atol)
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = []
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.append(x)
values2 = []
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.append(x)
values1.sort()
values2.sort()
for xi, yi in zip(values1, values2):
if not allclose(xi, yi, rtol=rtol, atol=atol):
return False
else:
return True
return match
# Docstrings for numerical functions.
numerical_node_match.__doc__ = numerical_doc
numerical_edge_match.__doc__ = numerical_doc.replace('node', 'edge')
tmpdoc = numerical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('numerical_edge_match', 'numerical_multiedge_match')
numerical_multiedge_match.__doc__ = tmpdoc
generic_doc = """
Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True.
Parameters
----------
attr : string | list
The node attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the node attribute, or a list of
default values for the node attributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `node_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'], [1.0, 'red'], [close, eq])
"""
def generic_node_match(attr, default, op):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return op(data1.get(attr, default), data2.get(attr, default))
else:
attrs = list(zip(attr, default, op)) # Python 3
def match(data1, data2):
for attr, d, operator in attrs:
if not operator(data1.get(attr, d), data2.get(attr, d)):
return False
else:
return True
return match
generic_edge_match = copyfunc(generic_node_match, 'generic_edge_match')
def generic_multiedge_match(attr, default, op):
"""Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True. Potentially, the constructed edge_match
function can be slow since it must verify that no isomorphism
exists between the multiedges before it returns False.
Parameters
----------
attr : string | list
The edge attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the edge attribute, or a list of
default values for the dgeattributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `edge_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'],
... [1.0, 'red'],
... [close, eq])
...
"""
# This is slow, but generic.
# We must test every possible isomorphism between the edges.
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = [data.get(attr, default) for data in datasets1.values()]
values2 = [data.get(attr, default) for data in datasets2.values()]
for vals2 in permutations(values2):
for xi, yi in zip(values1, vals2):
if not op(xi, yi):
# This is not an isomorphism, go to next permutation.
break
else:
# Then we found an isomorphism.
return True
else:
# Then there are no isomorphisms between the multiedges.
return False
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = []
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.append(x)
values2 = []
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.append(x)
for vals2 in permutations(values2):
for xi, yi, operator in zip(values1, vals2, op):
if not operator(xi, yi):
return False
else:
return True
return match
# Docstrings for numerical functions.
generic_node_match.__doc__ = generic_doc
generic_edge_match.__doc__ = generic_doc.replace('node', 'edge')
| bsd-3-clause | 49dc119310ff97a5a00c3f8977014a21 | 34.317919 | 86 | 0.617512 | 3.981753 | false | false | false | false |
uwescience/myria-web | appengine/networkx/utils/rcm.py | 15 | 4562 | """
Cuthill-McKee ordering of graph nodes to produce sparse matrices
"""
# Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
from operator import itemgetter
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['cuthill_mckee_ordering',
'reverse_cuthill_mckee_ordering']
def cuthill_mckee_ordering(G, start=None):
"""Generate an ordering (permutation) of the graph nodes to make
a sparse matrix.
Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_.
Parameters
----------
G : graph
A NetworkX graph
start : node, optional
Start algorithm and specified node. The node should be on the
periphery of the graph for best results.
Returns
-------
nodes : generator
Generator of nodes in Cuthill-McKee ordering.
Examples
--------
>>> from networkx.utils import cuthill_mckee_ordering
>>> G = nx.path_graph(4)
>>> rcm = list(cuthill_mckee_ordering(G))
>>> A = nx.adjacency_matrix(G, nodelist=rcm) # doctest: +SKIP
See Also
--------
reverse_cuthill_mckee_ordering
Notes
-----
The optimal solution the the bandwidth reduction is NP-complete [2]_.
References
----------
.. [1] E. Cuthill and J. McKee.
Reducing the bandwidth of sparse symmetric matrices,
In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969.
http://doi.acm.org/10.1145/800195.805928
.. [2] Steven S. Skiena. 1997. The Algorithm Design Manual.
Springer-Verlag New York, Inc., New York, NY, USA.
"""
for g in nx.connected_component_subgraphs(G):
for n in connected_cuthill_mckee_ordering(g, start):
yield n
def reverse_cuthill_mckee_ordering(G, start=None):
"""Generate an ordering (permutation) of the graph nodes to make
a sparse matrix.
Uses the reverse Cuthill-McKee heuristic (based on breadth-first search)
[1]_.
Parameters
----------
G : graph
A NetworkX graph
start : node, optional
Start algorithm and specified node. The node should be on the
periphery of the graph for best results.
Returns
-------
nodes : generator
Generator of nodes in reverse Cuthill-McKee ordering.
Examples
--------
>>> from networkx.utils import reverse_cuthill_mckee_ordering
>>> G = nx.path_graph(4)
>>> rcm = list(reverse_cuthill_mckee_ordering(G))
>>> A = nx.adjacency_matrix(G, nodelist=rcm) # doctest: +SKIP
See Also
--------
cuthill_mckee_ordering
Notes
-----
The optimal solution the the bandwidth reduction is NP-complete [2]_.
References
----------
.. [1] E. Cuthill and J. McKee.
Reducing the bandwidth of sparse symmetric matrices,
In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969.
http://doi.acm.org/10.1145/800195.805928
.. [2] Steven S. Skiena. 1997. The Algorithm Design Manual.
Springer-Verlag New York, Inc., New York, NY, USA.
"""
return reversed(list(cuthill_mckee_ordering(G, start=start)))
def connected_cuthill_mckee_ordering(G, start=None):
# the cuthill mckee algorithm for connected graphs
if start is None:
(_, start) = find_pseudo_peripheral_node_pair(G)
yield start
visited = set([start])
stack = [(start, iter(G[start]))]
while stack:
parent,children = stack[0]
if parent not in visited:
yield parent
try:
child = next(children)
if child not in visited:
yield child
visited.add(child)
# add children to stack, sorted by degree (lowest first)
nd = sorted(G.degree(G[child]).items(), key=itemgetter(1))
children = (n for n,d in nd)
stack.append((child,children))
except StopIteration:
stack.pop(0)
def find_pseudo_peripheral_node_pair(G, start=None):
# helper for cuthill-mckee to find a "pseudo peripheral pair"
# to use as good starting node
if start is None:
u = next(G.nodes_iter())
else:
u = start
lp = 0
v = u
while True:
spl = nx.shortest_path_length(G, v)
l = max(spl.values())
if l <= lp:
break
lp = l
farthest = [n for n,dist in spl.items() if dist==l]
v, deg = sorted(G.degree(farthest).items(), key=itemgetter(1))[0]
return u, v
| bsd-3-clause | 7e7fe4c9c862763c00cb6a35bbaf1b0a | 29.413333 | 77 | 0.600833 | 3.511932 | false | false | false | false |
uwescience/myria-web | appengine/networkx/algorithms/centrality/tests/test_load_centrality.py | 3 | 7544 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestLoadCentrality:
def setUp(self):
G=nx.Graph();
G.add_edge(0,1,weight=3)
G.add_edge(0,2,weight=2)
G.add_edge(0,3,weight=6)
G.add_edge(0,4,weight=4)
G.add_edge(1,3,weight=5)
G.add_edge(1,5,weight=5)
G.add_edge(2,4,weight=1)
G.add_edge(3,4,weight=2)
G.add_edge(3,5,weight=1)
G.add_edge(4,5,weight=4)
self.G=G
self.exact_weighted={0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0}
self.K = nx.krackhardt_kite_graph()
self.P3 = nx.path_graph(3)
self.P4 = nx.path_graph(4)
self.K5 = nx.complete_graph(5)
self.C4=nx.cycle_graph(4)
self.T=nx.balanced_tree(r=2, h=2)
self.Gb = nx.Graph()
self.Gb.add_edges_from([(0,1), (0,2), (1,3), (2,3),
(2,4), (4,5), (3,5)])
F = nx.florentine_families_graph()
self.F = F
def test_weighted_load(self):
b=nx.load_centrality(self.G,weight='weight',normalized=False)
for n in sorted(self.G):
assert_equal(b[n],self.exact_weighted[n])
def test_k5_load(self):
G=self.K5
c=nx.load_centrality(G)
d={0: 0.000,
1: 0.000,
2: 0.000,
3: 0.000,
4: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
def test_p3_load(self):
G=self.P3
c=nx.load_centrality(G)
d={0: 0.000,
1: 1.000,
2: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
def test_krackhardt_load(self):
G=self.K
c=nx.load_centrality(G)
d={0: 0.023,
1: 0.023,
2: 0.000,
3: 0.102,
4: 0.000,
5: 0.231,
6: 0.231,
7: 0.389,
8: 0.222,
9: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
def test_florentine_families_load(self):
G=self.F
c=nx.load_centrality(G)
d={'Acciaiuoli': 0.000,
'Albizzi': 0.211,
'Barbadori': 0.093,
'Bischeri': 0.104,
'Castellani': 0.055,
'Ginori': 0.000,
'Guadagni': 0.251,
'Lamberteschi': 0.000,
'Medici': 0.522,
'Pazzi': 0.000,
'Peruzzi': 0.022,
'Ridolfi': 0.117,
'Salviati': 0.143,
'Strozzi': 0.106,
'Tornabuoni': 0.090}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
def test_unnormalized_k5_load(self):
G=self.K5
c=nx.load_centrality(G,normalized=False)
d={0: 0.000,
1: 0.000,
2: 0.000,
3: 0.000,
4: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
def test_unnormalized_p3_load(self):
G=self.P3
c=nx.load_centrality(G,normalized=False)
d={0: 0.000,
1: 2.000,
2: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
def test_unnormalized_krackhardt_load(self):
G=self.K
c=nx.load_centrality(G,normalized=False)
d={0: 1.667,
1: 1.667,
2: 0.000,
3: 7.333,
4: 0.000,
5: 16.667,
6: 16.667,
7: 28.000,
8: 16.000,
9: 0.000}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
def test_unnormalized_florentine_families_load(self):
G=self.F
c=nx.load_centrality(G,normalized=False)
d={'Acciaiuoli': 0.000,
'Albizzi': 38.333,
'Barbadori': 17.000,
'Bischeri': 19.000,
'Castellani': 10.000,
'Ginori': 0.000,
'Guadagni': 45.667,
'Lamberteschi': 0.000,
'Medici': 95.000,
'Pazzi': 0.000,
'Peruzzi': 4.000,
'Ridolfi': 21.333,
'Salviati': 26.000,
'Strozzi': 19.333,
'Tornabuoni': 16.333}
for n in sorted(G):
assert_almost_equal(c[n],d[n],places=3)
def test_load_betweenness_difference(self):
# Difference Between Load and Betweenness
# --------------------------------------- The smallest graph
# that shows the difference between load and betweenness is
# G=ladder_graph(3) (Graph B below)
# Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong
# Wang: Comment on ``Scientific collaboration
# networks. II. Shortest paths, weighted networks, and
# centrality". http://arxiv.org/pdf/physics/0511084
# Notice that unlike here, their calculation adds to 1 to the
# betweennes of every node i for every path from i to every
# other node. This is exactly what it should be, based on
# Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t,
# s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore,
# they allow v to be the target node.
# We follow Brandes 2001, who follows Freeman 1977 that make
# the sum for betweenness of v exclude paths where v is either
# the source or target node. To agree with their numbers, we
# must additionally, remove edge (4,8) from the graph, see AC
# example following (there is a mistake in the figure in their
# paper - personal communication).
# A = nx.Graph()
# A.add_edges_from([(0,1), (1,2), (1,3), (2,4),
# (3,5), (4,6), (4,7), (4,8),
# (5,8), (6,9), (7,9), (8,9)])
B = nx.Graph() # ladder_graph(3)
B.add_edges_from([(0,1), (0,2), (1,3), (2,3), (2,4), (4,5), (3,5)])
c = nx.load_centrality(B,normalized=False)
d={0: 1.750,
1: 1.750,
2: 6.500,
3: 6.500,
4: 1.750,
5: 1.750}
for n in sorted(B):
assert_almost_equal(c[n],d[n],places=3)
def test_c4_edge_load(self):
G=self.C4
c = nx.edge_load(G)
d={(0, 1): 6.000,
(0, 3): 6.000,
(1, 2): 6.000,
(2, 3): 6.000}
for n in G.edges():
assert_almost_equal(c[n],d[n],places=3)
def test_p4_edge_load(self):
G=self.P4
c = nx.edge_load(G)
d={(0, 1): 6.000,
(1, 2): 8.000,
(2, 3): 6.000}
for n in G.edges():
assert_almost_equal(c[n],d[n],places=3)
def test_k5_edge_load(self):
G=self.K5
c = nx.edge_load(G)
d={(0, 1): 5.000,
(0, 2): 5.000,
(0, 3): 5.000,
(0, 4): 5.000,
(1, 2): 5.000,
(1, 3): 5.000,
(1, 4): 5.000,
(2, 3): 5.000,
(2, 4): 5.000,
(3, 4): 5.000}
for n in G.edges():
assert_almost_equal(c[n],d[n],places=3)
def test_tree_edge_load(self):
G=self.T
c = nx.edge_load(G)
d={(0, 1): 24.000,
(0, 2): 24.000,
(1, 3): 12.000,
(1, 4): 12.000,
(2, 5): 12.000,
(2, 6): 12.000}
for n in G.edges():
assert_almost_equal(c[n],d[n],places=3)
| bsd-3-clause | 11b518c2d312d9683d084598eef486ea | 28.818182 | 76 | 0.462089 | 2.897081 | false | true | false | false |
uwescience/myria-web | appengine/networkx/tests/test.py | 96 | 1244 | #!/usr/bin/env python
import sys
from os import path,getcwd
def run(verbosity=1,doctest=False,numpy=True):
"""Run NetworkX tests.
Parameters
----------
verbosity: integer, optional
Level of detail in test reports. Higher numbers provide more detail.
doctest: bool, optional
True to run doctests in code modules
numpy: bool, optional
True to test modules dependent on numpy
"""
try:
import nose
except ImportError:
raise ImportError(\
"The nose package is needed to run the NetworkX tests.")
sys.stderr.write("Running NetworkX tests:")
nx_install_dir=path.join(path.dirname(__file__), path.pardir)
# stop if running from source directory
if getcwd() == path.abspath(path.join(nx_install_dir,path.pardir)):
raise RuntimeError("Can't run tests from source directory.\n"
"Run 'nosetests' from the command line.")
argv=[' ','--verbosity=%d'%verbosity,
'-w',nx_install_dir,
'-exe']
if doctest:
argv.extend(['--with-doctest','--doctest-extension=txt'])
if not numpy:
argv.extend(['-A not numpy'])
nose.run(argv=argv)
if __name__=="__main__":
run()
| bsd-3-clause | 7b82f5ff9670a8273b6ea08d1945b164 | 26.644444 | 78 | 0.611736 | 4.160535 | false | true | false | false |
uwescience/myria-web | appengine/networkx/algorithms/link_analysis/pagerank_alg.py | 3 | 12801 | """
PageRank analysis of graph structure.
"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
# NetworkX:http://networkx.lanl.gov/.
import networkx as nx
from networkx.exception import NetworkXError
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['pagerank','pagerank_numpy','pagerank_scipy','google_matrix']
def pagerank(G,alpha=0.85,personalization=None,
max_iter=100,tol=1.0e-8,nstart=None,weight='weight'):
"""Return the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
-----------
G : graph
A NetworkX graph
alpha : float, optional
Damping parameter for PageRank, default=0.85
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each node.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
nstart : dictionary, optional
Starting value of PageRank iteration for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G=nx.DiGraph(nx.path_graph(4))
>>> pr=nx.pagerank(G,alpha=0.9)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The PageRank algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs by converting each oriented edge in the
directed graph to two edges.
See Also
--------
pagerank_numpy, pagerank_scipy, google_matrix
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
raise Exception("pagerank() not defined for graphs with multiedges.")
if not G.is_directed():
D=G.to_directed()
else:
D=G
# create a copy in (right) stochastic form
W=nx.stochastic_graph(D, weight=weight)
scale=1.0/W.number_of_nodes()
# choose fixed starting vector if not given
if nstart is None:
x=dict.fromkeys(W,scale)
else:
x=nstart
# normalize starting vector to 1
s=1.0/sum(x.values())
for k in x: x[k]*=s
# assign uniform personalization/teleportation vector if not given
if personalization is None:
p=dict.fromkeys(W,scale)
else:
p=personalization
# normalize starting vector to 1
s=1.0/sum(p.values())
for k in p:
p[k]*=s
if set(p)!=set(G):
raise NetworkXError('Personalization vector '
'must have a value for every node')
# "dangling" nodes, no links out from them
out_degree=W.out_degree()
dangle=[n for n in W if out_degree[n]==0.0]
i=0
while True: # power iteration: make up to max_iter iterations
xlast=x
x=dict.fromkeys(xlast.keys(),0)
danglesum=alpha*scale*sum(xlast[n] for n in dangle)
for n in x:
# this matrix multiply looks odd because it is
# doing a left multiply x^T=xlast^T*W
for nbr in W[n]:
x[nbr]+=alpha*xlast[n]*W[n][nbr][weight]
x[n]+=danglesum+(1.0-alpha)*p[n]
# normalize vector
s=1.0/sum(x.values())
for n in x:
x[n]*=s
# check convergence, l1 norm
err=sum([abs(x[n]-xlast[n]) for n in x])
if err < tol:
break
if i>max_iter:
raise NetworkXError('pagerank: power iteration failed to converge'
'in %d iterations.'%(i+1))
i+=1
return x
def google_matrix(G, alpha=0.85, personalization=None,
nodelist=None, weight='weight'):
"""Return the Google matrix of the graph.
Parameters
-----------
G : graph
A NetworkX graph
alpha : float
The damping factor
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each node.
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
Returns
-------
A : NumPy matrix
Google matrix of the graph
See Also
--------
pagerank, pagerank_numpy, pagerank_scipy
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"google_matrix() requires NumPy: http://scipy.org/")
# choose ordering in matrix
if personalization is None: # use G.nodes() ordering
nodelist=G.nodes()
else: # use personalization "vector" ordering
nodelist=personalization.keys()
if set(nodelist)!=set(G):
raise NetworkXError('Personalization vector dictionary'
'must have a value for every node')
M=nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight)
(n,m)=M.shape # should be square
# add constant to dangling nodes' row
dangling=np.where(M.sum(axis=1)==0)
for d in dangling[0]:
M[d]=1.0/n
# normalize
M=M/M.sum(axis=1)
# add "teleportation"/personalization
e=np.ones((n))
if personalization is not None:
v=np.array(personalization.values()).astype(np.float)
else:
v=e
v=v/v.sum()
P=alpha*M+(1-alpha)*np.outer(e,v)
return P
def pagerank_numpy(G, alpha=0.85, personalization=None, weight='weight'):
"""Return the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
-----------
G : graph
A NetworkX graph
alpha : float, optional
Damping parameter for PageRank, default=0.85
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G=nx.DiGraph(nx.path_graph(4))
>>> pr=nx.pagerank_numpy(G,alpha=0.9)
Notes
-----
The eigenvector calculation uses NumPy's interface to the LAPACK
eigenvalue solvers. This will be the fastest and most accurate
for small graphs.
This implementation works with Multi(Di)Graphs.
See Also
--------
pagerank, pagerank_scipy, google_matrix
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
try:
import numpy as np
except ImportError:
raise ImportError("pagerank_numpy() requires NumPy: http://scipy.org/")
# choose ordering in matrix
if personalization is None: # use G.nodes() ordering
nodelist=G.nodes()
else: # use personalization "vector" ordering
nodelist=personalization.keys()
M=google_matrix(G, alpha, personalization=personalization,
nodelist=nodelist, weight=weight)
# use numpy LAPACK solver
eigenvalues,eigenvectors=np.linalg.eig(M.T)
ind=eigenvalues.argsort()
# eigenvector of largest eigenvalue at ind[-1], normalized
largest=np.array(eigenvectors[:,ind[-1]]).flatten().astype(np.float)
norm=largest.sum()
centrality=dict(zip(nodelist,largest/norm))
return centrality
def pagerank_scipy(G, alpha=0.85, personalization=None,
max_iter=100, tol=1.0e-6, weight='weight'):
"""Return the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
-----------
G : graph
A NetworkX graph
alpha : float, optional
Damping parameter for PageRank, default=0.85
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each node.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G=nx.DiGraph(nx.path_graph(4))
>>> pr=nx.pagerank_scipy(G,alpha=0.9)
Notes
-----
The eigenvector calculation uses power iteration with a SciPy
sparse matrix representation.
See Also
--------
pagerank, pagerank_numpy, google_matrix
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
try:
import scipy.sparse
except ImportError:
raise ImportError("pagerank_scipy() requires SciPy: http://scipy.org/")
# choose ordering in matrix
if personalization is None: # use G.nodes() ordering
nodelist=G.nodes()
else: # use personalization "vector" ordering
nodelist=personalization.keys()
M=nx.to_scipy_sparse_matrix(G,nodelist=nodelist,weight=weight)
(n,m)=M.shape # should be square
S=scipy.array(M.sum(axis=1)).flatten()
for i, j, v in zip( *scipy.sparse.find(M) ):
M[i,j] = v / S[i]
x=scipy.ones((n))/n # initial guess
dangle=scipy.array(scipy.where(M.sum(axis=1)==0,1.0/n,0)).flatten()
# add "teleportation"/personalization
if personalization is not None:
v=scipy.array(personalization.values()).astype(scipy.float_)
v=v/v.sum()
else:
v=x
i=0
while i <= max_iter:
# power iteration: make up to max_iter iterations
xlast=x
x=alpha*(x*M+scipy.dot(dangle,xlast))+(1-alpha)*v
x=x/x.sum()
# check convergence, l1 norm
err=scipy.absolute(x-xlast).sum()
if err < n*tol:
return dict(zip(nodelist,x))
i+=1
raise NetworkXError('pagerank_scipy: power iteration failed to converge'
'in %d iterations.'%(i+1))
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause | 3af7eb800d088d98601ae8f0d1f86e78 | 31.823077 | 90 | 0.624639 | 3.823477 | false | false | false | false |
uwescience/myria-web | appengine/networkx/algorithms/centrality/tests/test_betweenness_centrality.py | 85 | 17053 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
def weighted_G():
G=nx.Graph();
G.add_edge(0,1,weight=3)
G.add_edge(0,2,weight=2)
G.add_edge(0,3,weight=6)
G.add_edge(0,4,weight=4)
G.add_edge(1,3,weight=5)
G.add_edge(1,5,weight=5)
G.add_edge(2,4,weight=1)
G.add_edge(3,4,weight=2)
G.add_edge(3,5,weight=1)
G.add_edge(4,5,weight=4)
return G
class TestBetweennessCentrality(object):
def test_K5(self):
"""Betweenness centrality: K5"""
G=nx.complete_graph(5)
b=nx.betweenness_centrality(G,
weight=None,
normalized=False)
b_answer={0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_K5_endpoints(self):
"""Betweenness centrality: K5 endpoints"""
G=nx.complete_graph(5)
b=nx.betweenness_centrality(G,
weight=None,
normalized=False,
endpoints=True)
b_answer={0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P3_normalized(self):
"""Betweenness centrality: P3 normalized"""
G=nx.path_graph(3)
b=nx.betweenness_centrality(G,
weight=None,
normalized=True)
b_answer={0: 0.0, 1: 1.0, 2: 0.0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P3(self):
"""Betweenness centrality: P3"""
G=nx.path_graph(3)
b_answer={0: 0.0, 1: 1.0, 2: 0.0}
b=nx.betweenness_centrality(G,
weight=None,
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P3_endpoints(self):
"""Betweenness centrality: P3 endpoints"""
G=nx.path_graph(3)
b_answer={0: 2.0, 1: 3.0, 2: 2.0}
b=nx.betweenness_centrality(G,
weight=None,
normalized=False,
endpoints=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_krackhardt_kite_graph(self):
"""Betweenness centrality: Krackhardt kite graph"""
G=nx.krackhardt_kite_graph()
b_answer={0: 1.667,1: 1.667,2: 0.000,3: 7.333,4: 0.000,
5: 16.667,6: 16.667,7: 28.000,8: 16.000,9: 0.000}
for b in b_answer:
b_answer[b]/=2.0
b=nx.betweenness_centrality(G,
weight=None,
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n],places=3)
def test_krackhardt_kite_graph_normalized(self):
"""Betweenness centrality: Krackhardt kite graph normalized"""
G=nx.krackhardt_kite_graph()
b_answer={0:0.023,1:0.023,2:0.000,3:0.102,4:0.000,
5:0.231,6:0.231,7:0.389,8:0.222,9:0.000}
b=nx.betweenness_centrality(G,
weight=None,
normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n],places=3)
def test_florentine_families_graph(self):
"""Betweenness centrality: Florentine families graph"""
G=nx.florentine_families_graph()
b_answer=\
{'Acciaiuoli': 0.000,
'Albizzi': 0.212,
'Barbadori': 0.093,
'Bischeri': 0.104,
'Castellani': 0.055,
'Ginori': 0.000,
'Guadagni': 0.255,
'Lamberteschi': 0.000,
'Medici': 0.522,
'Pazzi': 0.000,
'Peruzzi': 0.022,
'Ridolfi': 0.114,
'Salviati': 0.143,
'Strozzi': 0.103,
'Tornabuoni': 0.092}
b=nx.betweenness_centrality(G,
weight=None,
normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n],places=3)
def test_ladder_graph(self):
"""Betweenness centrality: Ladder graph"""
G = nx.Graph() # ladder_graph(3)
G.add_edges_from([(0,1), (0,2), (1,3), (2,3),
(2,4), (4,5), (3,5)])
b_answer={0:1.667,1: 1.667,2: 6.667,
3: 6.667,4: 1.667,5: 1.667}
for b in b_answer:
b_answer[b]/=2.0
b=nx.betweenness_centrality(G,
weight=None,
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n],places=3)
def test_disconnected_path(self):
"""Betweenness centrality: disconnected path"""
G=nx.Graph()
G.add_path([0,1,2])
G.add_path([3,4,5,6])
b_answer={0:0,1:1,2:0,3:0,4:2,5:2,6:0}
b=nx.betweenness_centrality(G,
weight=None,
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_disconnected_path_endpoints(self):
"""Betweenness centrality: disconnected path endpoints"""
G=nx.Graph()
G.add_path([0,1,2])
G.add_path([3,4,5,6])
b_answer={0:2,1:3,2:2,3:3,4:5,5:5,6:3}
b=nx.betweenness_centrality(G,
weight=None,
normalized=False,
endpoints=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_directed_path(self):
"""Betweenness centrality: directed path"""
G=nx.DiGraph()
G.add_path([0,1,2])
b=nx.betweenness_centrality(G,
weight=None,
normalized=False)
b_answer={0: 0.0, 1: 1.0, 2: 0.0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_directed_path_normalized(self):
"""Betweenness centrality: directed path normalized"""
G=nx.DiGraph()
G.add_path([0,1,2])
b=nx.betweenness_centrality(G,
weight=None,
normalized=True)
b_answer={0: 0.0, 1: 0.5, 2: 0.0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
class TestWeightedBetweennessCentrality(object):
def test_K5(self):
"""Weighted betweenness centrality: K5"""
G=nx.complete_graph(5)
b=nx.betweenness_centrality(G,
weight='weight',
normalized=False)
b_answer={0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P3_normalized(self):
"""Weighted betweenness centrality: P3 normalized"""
G=nx.path_graph(3)
b=nx.betweenness_centrality(G,
weight='weight',
normalized=True)
b_answer={0: 0.0, 1: 1.0, 2: 0.0}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P3(self):
"""Weighted betweenness centrality: P3"""
G=nx.path_graph(3)
b_answer={0: 0.0, 1: 1.0, 2: 0.0}
b=nx.betweenness_centrality(G,
weight='weight',
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_krackhardt_kite_graph(self):
"""Weighted betweenness centrality: Krackhardt kite graph"""
G=nx.krackhardt_kite_graph()
b_answer={0: 1.667,1: 1.667,2: 0.000,3: 7.333,4: 0.000,
5: 16.667,6: 16.667,7: 28.000,8: 16.000,9: 0.000}
for b in b_answer:
b_answer[b]/=2.0
b=nx.betweenness_centrality(G,
weight='weight',
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n],places=3)
def test_krackhardt_kite_graph_normalized(self):
"""Weighted betweenness centrality:
Krackhardt kite graph normalized
"""
G=nx.krackhardt_kite_graph()
b_answer={0:0.023,1:0.023,2:0.000,3:0.102,4:0.000,
5:0.231,6:0.231,7:0.389,8:0.222,9:0.000}
b=nx.betweenness_centrality(G,
weight='weight',
normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n],places=3)
def test_florentine_families_graph(self):
"""Weighted betweenness centrality:
Florentine families graph"""
G=nx.florentine_families_graph()
b_answer=\
{'Acciaiuoli': 0.000,
'Albizzi': 0.212,
'Barbadori': 0.093,
'Bischeri': 0.104,
'Castellani': 0.055,
'Ginori': 0.000,
'Guadagni': 0.255,
'Lamberteschi': 0.000,
'Medici': 0.522,
'Pazzi': 0.000,
'Peruzzi': 0.022,
'Ridolfi': 0.114,
'Salviati': 0.143,
'Strozzi': 0.103,
'Tornabuoni': 0.092}
b=nx.betweenness_centrality(G,
weight='weight',
normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n],places=3)
def test_ladder_graph(self):
"""Weighted betweenness centrality: Ladder graph"""
G = nx.Graph() # ladder_graph(3)
G.add_edges_from([(0,1), (0,2), (1,3), (2,3),
(2,4), (4,5), (3,5)])
b_answer={0:1.667,1: 1.667,2: 6.667,
3: 6.667,4: 1.667,5: 1.667}
for b in b_answer:
b_answer[b]/=2.0
b=nx.betweenness_centrality(G,
weight='weight',
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n],places=3)
def test_G(self):
"""Weighted betweenness centrality: G"""
G = weighted_G()
b_answer={0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0}
b=nx.betweenness_centrality(G,
weight='weight',
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_G2(self):
"""Weighted betweenness centrality: G2"""
G=nx.DiGraph()
G.add_weighted_edges_from([('s','u',10) ,('s','x',5) ,
('u','v',1) ,('u','x',2) ,
('v','y',1) ,('x','u',3) ,
('x','v',5) ,('x','y',2) ,
('y','s',7) ,('y','v',6)])
b_answer={'y':5.0,'x':5.0,'s':4.0,'u':2.0,'v':2.0}
b=nx.betweenness_centrality(G,
weight='weight',
normalized=False)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
class TestEdgeBetweennessCentrality(object):
def test_K5(self):
"""Edge betweenness centrality: K5"""
G=nx.complete_graph(5)
b=nx.edge_betweenness_centrality(G, weight=None, normalized=False)
b_answer=dict.fromkeys(G.edges(),1)
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
def test_normalized_K5(self):
"""Edge betweenness centrality: K5"""
G=nx.complete_graph(5)
b=nx.edge_betweenness_centrality(G, weight=None, normalized=True)
b_answer=dict.fromkeys(G.edges(),1/10.0)
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
def test_C4(self):
"""Edge betweenness centrality: C4"""
G=nx.cycle_graph(4)
b=nx.edge_betweenness_centrality(G, weight=None, normalized=True)
b_answer={(0, 1):2,(0, 3):2, (1, 2):2, (2, 3): 2}
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n]/6.0)
def test_P4(self):
"""Edge betweenness centrality: P4"""
G=nx.path_graph(4)
b=nx.edge_betweenness_centrality(G, weight=None, normalized=False)
b_answer={(0, 1):3,(1, 2):4, (2, 3):3}
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
def test_normalized_P4(self):
"""Edge betweenness centrality: P4"""
G=nx.path_graph(4)
b=nx.edge_betweenness_centrality(G, weight=None, normalized=True)
b_answer={(0, 1):3,(1, 2):4, (2, 3):3}
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n]/6.0)
def test_balanced_tree(self):
"""Edge betweenness centrality: balanced tree"""
G=nx.balanced_tree(r=2,h=2)
b=nx.edge_betweenness_centrality(G, weight=None, normalized=False)
b_answer={(0, 1):12,(0, 2):12,
(1, 3):6,(1, 4):6,(2, 5):6,(2,6):6}
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
class TestWeightedEdgeBetweennessCentrality(object):
def test_K5(self):
"""Edge betweenness centrality: K5"""
G=nx.complete_graph(5)
b=nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
b_answer=dict.fromkeys(G.edges(),1)
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
def test_C4(self):
"""Edge betweenness centrality: C4"""
G=nx.cycle_graph(4)
b=nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
b_answer={(0, 1):2,(0, 3):2, (1, 2):2, (2, 3): 2}
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
def test_P4(self):
"""Edge betweenness centrality: P4"""
G=nx.path_graph(4)
b=nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
b_answer={(0, 1):3,(1, 2):4, (2, 3):3}
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
def test_balanced_tree(self):
"""Edge betweenness centrality: balanced tree"""
G=nx.balanced_tree(r=2,h=2)
b=nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
b_answer={(0, 1):12,(0, 2):12,
(1, 3):6,(1, 4):6,(2, 5):6,(2,6):6}
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
def test_weighted_graph(self):
eList = [(0, 1, 5), (0, 2, 4), (0, 3, 3),
(0, 4, 2), (1, 2, 4), (1, 3, 1),
(1, 4, 3), (2, 4, 5), (3, 4, 4)]
G = nx.Graph()
G.add_weighted_edges_from(eList)
b = nx.edge_betweenness_centrality(G, weight='weight', normalized=False)
b_answer={(0, 1):0.0,
(0, 2):1.0,
(0, 3):2.0,
(0, 4):1.0,
(1, 2):2.0,
(1, 3):3.5,
(1, 4):1.5,
(2, 4):1.0,
(3, 4):0.5}
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n])
def test_normalized_weighted_graph(self):
eList = [(0, 1, 5), (0, 2, 4), (0, 3, 3),
(0, 4, 2), (1, 2, 4), (1, 3, 1),
(1, 4, 3), (2, 4, 5), (3, 4, 4)]
G = nx.Graph()
G.add_weighted_edges_from(eList)
b = nx.edge_betweenness_centrality(G, weight='weight', normalized=True)
b_answer={(0, 1):0.0,
(0, 2):1.0,
(0, 3):2.0,
(0, 4):1.0,
(1, 2):2.0,
(1, 3):3.5,
(1, 4):1.5,
(2, 4):1.0,
(3, 4):0.5}
norm = len(G)*(len(G)-1)/2.0
for n in sorted(G.edges()):
assert_almost_equal(b[n],b_answer[n]/norm)
| bsd-3-clause | e4c905893137d5969fd7344e7aa22848 | 35.911255 | 80 | 0.455756 | 3.260612 | false | true | false | false |
uwescience/myria-web | appengine/networkx/algorithms/bipartite/spectral.py | 76 | 2538 | # -*- coding: utf-8 -*-
"""
Spectral bipartivity measure.
"""
import networkx as nx
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['spectral_bipartivity']
def spectral_bipartivity(G, nodes=None, weight='weight'):
"""Returns the spectral bipartivity.
Parameters
----------
G : NetworkX graph
nodes : list or container optional(default is all nodes)
Nodes to return value of spectral bipartivity contribution.
weight : string or None optional (default = 'weight')
Edge data key to use for edge weights. If None, weights set to 1.
Returns
-------
sb : float or dict
A single number if the keyword nodes is not specified, or
a dictionary keyed by node with the spectral bipartivity contribution
of that node as the value.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> bipartite.spectral_bipartivity(G)
1.0
Notes
-----
This implementation uses Numpy (dense) matrices which are not efficient
for storing large sparse graphs.
See Also
--------
color
References
----------
.. [1] E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of
bipartivity in complex networks", PhysRev E 72, 046105 (2005)
"""
try:
import scipy.linalg
except ImportError:
raise ImportError('spectral_bipartivity() requires SciPy: ',
'http://scipy.org/')
nodelist = G.nodes() # ordering of nodes in matrix
A = nx.to_numpy_matrix(G, nodelist, weight=weight)
expA = scipy.linalg.expm(A)
expmA = scipy.linalg.expm(-A)
coshA = 0.5 * (expA + expmA)
if nodes is None:
# return single number for entire graph
return coshA.diagonal().sum() / expA.diagonal().sum()
else:
# contribution for individual nodes
index = dict(zip(nodelist, range(len(nodelist))))
sb = {}
for n in nodes:
i = index[n]
sb[n] = coshA[i, i] / expA[i, i]
return sb
def setup_module(module):
"""Fixture for nose tests."""
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause | 7668364107dc9a3a707cc3418a6d9620 | 27.818182 | 76 | 0.606073 | 3.745938 | false | false | false | false |
uwescience/myria-web | appengine/networkx/algorithms/centrality/current_flow_closeness.py | 3 | 4287 | """
Current-flow closeness centrality measures.
"""
# Copyright (C) 2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = """Aric Hagberg <aric.hagberg@gmail.com>"""
__all__ = ['current_flow_closeness_centrality','information_centrality']
import networkx as nx
from networkx.algorithms.centrality.flow_matrix import *
def current_flow_closeness_centrality(G, normalized=True, weight='weight',
dtype=float, solver='lu'):
"""Compute current-flow closeness centrality for nodes.
A variant of closeness centrality based on effective
resistance between nodes in a network. This metric
is also known as information centrality.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional
If True the values are normalized by 1/(n-1) where n is the
number of nodes in G.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with current flow closeness centrality as the value.
See Also
--------
closeness_centrality
Notes
-----
The algorithm is from Brandes [1]_.
See also [2]_ for the original definition of information centrality.
References
----------
.. [1] Ulrik Brandes and Daniel Fleischer,
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] Stephenson, K. and Zelen, M.
Rethinking centrality: Methods and examples.
Social Networks. Volume 11, Issue 1, March 1989, pp. 1-37
http://dx.doi.org/10.1016/0378-8733(89)90016-6
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_closeness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_closeness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('current_flow_closeness_centrality ',
'not defined for digraphs.')
if G.is_directed():
raise nx.NetworkXError(\
"current_flow_closeness_centrality() not defined for digraphs.")
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername={"full" :FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G,dict(zip(ordering,range(n))))
betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
n = G.number_of_nodes()
L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
dtype=dtype, format='csc')
C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
for v in H:
col=C2.get_row(v)
for w in H:
betweenness[v]+=col[v]-2*col[w]
betweenness[w]+=col[v]
if normalized:
nb=len(betweenness)-1.0
else:
nb=1.0
for v in H:
betweenness[v]=nb/(betweenness[v])
return dict((ordering[k],v) for k,v in betweenness.items())
information_centrality=current_flow_closeness_centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| bsd-3-clause | e2d6011c0a84b9ce087368d48568863b | 32.492188 | 79 | 0.627245 | 3.705272 | false | false | false | false |
uwescience/myria-web | appengine/networkx/algorithms/assortativity/mixing.py | 93 | 6746 | #-*- coding: utf-8 -*-
"""
Mixing matrices for node attributes and degree.
"""
import networkx as nx
from networkx.utils import dict_to_numpy_array
from networkx.algorithms.assortativity.pairs import node_degree_xy, \
node_attribute_xy
__author__ = ' '.join(['Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['attribute_mixing_matrix',
'attribute_mixing_dict',
'degree_mixing_matrix',
'degree_mixing_dict',
'numeric_mixing_matrix',
'mixing_dict']
def attribute_mixing_dict(G,attribute,nodes=None,normalized=False):
"""Return dictionary representation of mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key.
nodes: list or iterable (optional)
Unse nodes in container to build the dict. The default is all nodes.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([0,1],color='red')
>>> G.add_nodes_from([2,3],color='blue')
>>> G.add_edge(1,3)
>>> d=nx.attribute_mixing_dict(G,'color')
>>> print(d['red']['blue'])
1
>>> print(d['blue']['red']) # d symmetric for undirected graphs
1
Returns
-------
d : dictionary
Counts or joint probability of occurrence of attribute pairs.
"""
xy_iter=node_attribute_xy(G,attribute,nodes)
return mixing_dict(xy_iter,normalized=normalized)
def attribute_mixing_matrix(G,attribute,nodes=None,mapping=None,
normalized=True):
"""Return mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key.
nodes: list or iterable (optional)
Use only nodes in container to build the matrix. The default is
all nodes.
mapping : dictionary, optional
Mapping from node attribute to integer index in matrix.
If not specified, an arbitrary ordering will be used.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts or joint probability of occurrence of attribute pairs.
"""
d=attribute_mixing_dict(G,attribute,nodes)
a=dict_to_numpy_array(d,mapping=mapping)
if normalized:
a=a/a.sum()
return a
def degree_mixing_dict(G, x='out', y='in', weight=None,
nodes=None, normalized=False):
"""Return dictionary representation of mixing matrix for degree.
Parameters
----------
G : graph
NetworkX graph object.
x: string ('in','out')
The degree type for source node (directed graphs only).
y: string ('in','out')
The degree type for target node (directed graphs only).
weight: string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
d: dictionary
Counts or joint probability of occurrence of degree pairs.
"""
xy_iter=node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
return mixing_dict(xy_iter,normalized=normalized)
def degree_mixing_matrix(G, x='out', y='in', weight=None,
nodes=None, normalized=True):
"""Return mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
x: string ('in','out')
The degree type for source node (directed graphs only).
y: string ('in','out')
The degree type for target node (directed graphs only).
nodes: list or iterable (optional)
Build the matrix using only nodes in container.
The default is all nodes.
weight: string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts, or joint probability, of occurrence of node degree.
"""
d=degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight)
s=set(d.keys())
for k,v in d.items():
s.update(v.keys())
m=max(s)
mapping=dict(zip(range(m+1),range(m+1)))
a=dict_to_numpy_array(d,mapping=mapping)
if normalized:
a=a/a.sum()
return a
def numeric_mixing_matrix(G,attribute,nodes=None,normalized=True):
"""Return numeric mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key.
nodes: list or iterable (optional)
Build the matrix only with nodes in container. The default is all nodes.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts, or joint, probability of occurrence of node attribute pairs.
"""
d=attribute_mixing_dict(G,attribute,nodes)
s=set(d.keys())
for k,v in d.items():
s.update(v.keys())
m=max(s)
mapping=dict(zip(range(m+1),range(m+1)))
a=dict_to_numpy_array(d,mapping=mapping)
if normalized:
a=a/a.sum()
return a
def mixing_dict(xy,normalized=False):
"""Return a dictionary representation of mixing matrix.
Parameters
----------
xy : list or container of two-tuples
Pairs of (x,y) items.
attribute : string
Node attribute key
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
d: dictionary
Counts or Joint probability of occurrence of values in xy.
"""
d={}
psum=0.0
for x,y in xy:
if x not in d:
d[x]={}
if y not in d:
d[y]={}
v = d[x].get(y,0)
d[x][y] = v+1
psum+=1
if normalized:
for k,jdict in d.items():
for j in jdict:
jdict[j]/=psum
return d
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause | c5198411e000f6c99c2a29c6b7beada3 | 26.201613 | 80 | 0.607916 | 3.97525 | false | false | false | false |
uwescience/myria-web | appengine/networkx/linalg/attrmatrix.py | 78 | 15994 | """
Functions for constructing matrix-like objects from graph attributes.
"""
__all__ = ['attr_matrix', 'attr_sparse_matrix']
import networkx as nx
def _node_value(G, node_attr):
"""Returns a function that returns a value from G.node[u].
We return a function expecting a node as its sole argument. Then, in the
simplest scenario, the returned function will return G.node[u][node_attr].
However, we also handle the case when `node_attr` is None or when it is a
function itself.
Parameters
----------
G : graph
A NetworkX graph
node_attr : {None, str, callable}
Specification of how the value of the node attribute should be obtained
from the node attribute dictionary.
Returns
-------
value : function
A function expecting a node as its sole argument. The function will
returns a value from G.node[u] that depends on `edge_attr`.
"""
if node_attr is None:
value = lambda u: u
elif not hasattr(node_attr, '__call__'):
# assume it is a key for the node attribute dictionary
value = lambda u: G.node[u][node_attr]
else:
# Advanced: Allow users to specify something else.
#
# For example,
# node_attr = lambda u: G.node[u].get('size', .5) * 3
#
value = node_attr
return value
def _edge_value(G, edge_attr):
"""Returns a function that returns a value from G[u][v].
Suppose there exists an edge between u and v. Then we return a function
expecting u and v as arguments. For Graph and DiGraph, G[u][v] is
the edge attribute dictionary, and the function (essentially) returns
G[u][v][edge_attr]. However, we also handle cases when `edge_attr` is None
and when it is a function itself. For MultiGraph and MultiDiGraph, G[u][v]
is a dictionary of all edges between u and v. In this case, the returned
function sums the value of `edge_attr` for every edge between u and v.
Parameters
----------
G : graph
A NetworkX graph
edge_attr : {None, str, callable}
Specification of how the value of the edge attribute should be obtained
from the edge attribute dictionary, G[u][v]. For multigraphs, G[u][v]
is a dictionary of all the edges between u and v. This allows for
special treatment of multiedges.
Returns
-------
value : function
A function expecting two nodes as parameters. The nodes should
represent the from- and to- node of an edge. The function will
return a value from G[u][v] that depends on `edge_attr`.
"""
if edge_attr is None:
# topological count of edges
if G.is_multigraph():
value = lambda u,v: len(G[u][v])
else:
value = lambda u,v: 1
elif not hasattr(edge_attr, '__call__'):
# assume it is a key for the edge attribute dictionary
if edge_attr == 'weight':
# provide a default value
if G.is_multigraph():
value = lambda u,v: sum([d.get(edge_attr, 1) for d in G[u][v].values()])
else:
value = lambda u,v: G[u][v].get(edge_attr, 1)
else:
# otherwise, the edge attribute MUST exist for each edge
if G.is_multigraph():
value = lambda u,v: sum([d[edge_attr] for d in G[u][v].values()])
else:
value = lambda u,v: G[u][v][edge_attr]
else:
# Advanced: Allow users to specify something else.
#
# Alternative default value:
# edge_attr = lambda u,v: G[u][v].get('thickness', .5)
#
# Function on an attribute:
# edge_attr = lambda u,v: abs(G[u][v]['weight'])
#
# Handle Multi(Di)Graphs differently:
# edge_attr = lambda u,v: numpy.prod([d['size'] for d in G[u][v].values()])
#
# Ignore multiple edges
# edge_attr = lambda u,v: 1 if len(G[u][v]) else 0
#
value = edge_attr
return value
def attr_matrix(G, edge_attr=None, node_attr=None, normalized=False,
rc_order=None, dtype=None, order=None):
"""Returns a NumPy matrix using attributes from G.
If only `G` is passed in, then the adjacency matrix is constructed.
Let A be a discrete set of values for the node attribute `node_attr`. Then
the elements of A represent the rows and columns of the constructed matrix.
Now, iterate through every edge e=(u,v) in `G` and consider the value
of the edge attribute `edge_attr`. If ua and va are the values of the
node attribute `node_attr` for u and v, respectively, then the value of
the edge attribute is added to the matrix element at (ua, va).
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
edge_attr : str, optional
Each element of the matrix represents a running total of the
specified edge attribute for edges whose node attributes correspond
to the rows/cols of the matirx. The attribute must be present for
all edges in the graph. If no attribute is specified, then we
just count the number of edges whose node attributes correspond
to the matrix element.
node_attr : str, optional
Each row and column in the matrix represents a particular value
of the node attribute. The attribute must be present for all nodes
in the graph. Note, the values of this attribute should be reliably
hashable. So, float values are not recommended. If no attribute is
specified, then the rows and columns will be the nodes of the graph.
normalized : bool, optional
If True, then each row is normalized by the summation of its values.
rc_order : list, optional
A list of the node attribute values. This list specifies the ordering
of rows and columns of the array. If no ordering is provided, then
the ordering will be random (and also, a return value).
Other Parameters
----------------
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. Keep in mind certain
dtypes can yield unexpected results if the array is to be normalized.
The parameter is passed to numpy.zeros(). If unspecified, the NumPy
default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. This parameter is passed to
numpy.zeros(). If unspecified, the NumPy default is used.
Returns
-------
M : NumPy matrix
The attribute matrix.
ordering : list
If `rc_order` was specified, then only the matrix is returned.
However, if `rc_order` was None, then the ordering used to construct
the matrix is returned as well.
Examples
--------
Construct an adjacency matrix:
>>> G = nx.Graph()
>>> G.add_edge(0,1,thickness=1,weight=3)
>>> G.add_edge(0,2,thickness=2)
>>> G.add_edge(1,2,thickness=3)
>>> nx.attr_matrix(G, rc_order=[0,1,2])
matrix([[ 0., 1., 1.],
[ 1., 0., 1.],
[ 1., 1., 0.]])
Alternatively, we can obtain the matrix describing edge thickness.
>>> nx.attr_matrix(G, edge_attr='thickness', rc_order=[0,1,2])
matrix([[ 0., 1., 2.],
[ 1., 0., 3.],
[ 2., 3., 0.]])
We can also color the nodes and ask for the probability distribution over
all edges (u,v) describing:
Pr(v has color Y | u has color X)
>>> G.node[0]['color'] = 'red'
>>> G.node[1]['color'] = 'red'
>>> G.node[2]['color'] = 'blue'
>>> rc = ['red', 'blue']
>>> nx.attr_matrix(G, node_attr='color', normalized=True, rc_order=rc)
matrix([[ 0.33333333, 0.66666667],
[ 1. , 0. ]])
For example, the above tells us that for all edges (u,v):
Pr( v is red | u is red) = 1/3
Pr( v is blue | u is red) = 2/3
Pr( v is red | u is blue) = 1
Pr( v is blue | u is blue) = 0
Finally, we can obtain the total weights listed by the node colors.
>>> nx.attr_matrix(G, edge_attr='weight', node_attr='color', rc_order=rc)
matrix([[ 3., 2.],
[ 2., 0.]])
Thus, the total weight over all edges (u,v) with u and v having colors:
(red, red) is 3 # the sole contribution is from edge (0,1)
(red, blue) is 2 # contributions from edges (0,2) and (1,2)
(blue, red) is 2 # same as (red, blue) since graph is undirected
(blue, blue) is 0 # there are no edges with blue endpoints
"""
try:
import numpy as np
except ImportError:
raise ImportError(
"attr_matrix() requires numpy: http://scipy.org/ ")
edge_value = _edge_value(G, edge_attr)
node_value = _node_value(G, node_attr)
if rc_order is None:
ordering = list(set([node_value(n) for n in G]))
else:
ordering = rc_order
N = len(ordering)
undirected = not G.is_directed()
index = dict(zip(ordering, range(N)))
M = np.zeros((N,N), dtype=dtype, order=order)
seen = set([])
for u,nbrdict in G.adjacency_iter():
for v in nbrdict:
# Obtain the node attribute values.
i, j = index[node_value(u)], index[node_value(v)]
if v not in seen:
M[i,j] += edge_value(u,v)
if undirected:
M[j,i] = M[i,j]
if undirected:
seen.add(u)
if normalized:
M /= M.sum(axis=1).reshape((N,1))
M = np.asmatrix(M)
if rc_order is None:
return M, ordering
else:
return M
def attr_sparse_matrix(G, edge_attr=None, node_attr=None,
normalized=False, rc_order=None, dtype=None):
"""Returns a SciPy sparse matrix using attributes from G.
If only `G` is passed in, then the adjacency matrix is constructed.
Let A be a discrete set of values for the node attribute `node_attr`. Then
the elements of A represent the rows and columns of the constructed matrix.
Now, iterate through every edge e=(u,v) in `G` and consider the value
of the edge attribute `edge_attr`. If ua and va are the values of the
node attribute `node_attr` for u and v, respectively, then the value of
the edge attribute is added to the matrix element at (ua, va).
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
edge_attr : str, optional
Each element of the matrix represents a running total of the
specified edge attribute for edges whose node attributes correspond
to the rows/cols of the matirx. The attribute must be present for
all edges in the graph. If no attribute is specified, then we
just count the number of edges whose node attributes correspond
to the matrix element.
node_attr : str, optional
Each row and column in the matrix represents a particular value
of the node attribute. The attribute must be present for all nodes
in the graph. Note, the values of this attribute should be reliably
hashable. So, float values are not recommended. If no attribute is
specified, then the rows and columns will be the nodes of the graph.
normalized : bool, optional
If True, then each row is normalized by the summation of its values.
rc_order : list, optional
A list of the node attribute values. This list specifies the ordering
of rows and columns of the array. If no ordering is provided, then
the ordering will be random (and also, a return value).
Other Parameters
----------------
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. Keep in mind certain
dtypes can yield unexpected results if the array is to be normalized.
The parameter is passed to numpy.zeros(). If unspecified, the NumPy
default is used.
Returns
-------
M : SciPy sparse matrix
The attribute matrix.
ordering : list
If `rc_order` was specified, then only the matrix is returned.
However, if `rc_order` was None, then the ordering used to construct
the matrix is returned as well.
Examples
--------
Construct an adjacency matrix:
>>> G = nx.Graph()
>>> G.add_edge(0,1,thickness=1,weight=3)
>>> G.add_edge(0,2,thickness=2)
>>> G.add_edge(1,2,thickness=3)
>>> M = nx.attr_sparse_matrix(G, rc_order=[0,1,2])
>>> M.todense()
matrix([[ 0., 1., 1.],
[ 1., 0., 1.],
[ 1., 1., 0.]])
Alternatively, we can obtain the matrix describing edge thickness.
>>> M = nx.attr_sparse_matrix(G, edge_attr='thickness', rc_order=[0,1,2])
>>> M.todense()
matrix([[ 0., 1., 2.],
[ 1., 0., 3.],
[ 2., 3., 0.]])
We can also color the nodes and ask for the probability distribution over
all edges (u,v) describing:
Pr(v has color Y | u has color X)
>>> G.node[0]['color'] = 'red'
>>> G.node[1]['color'] = 'red'
>>> G.node[2]['color'] = 'blue'
>>> rc = ['red', 'blue']
>>> M = nx.attr_sparse_matrix(G, node_attr='color', \
normalized=True, rc_order=rc)
>>> M.todense()
matrix([[ 0.33333333, 0.66666667],
[ 1. , 0. ]])
For example, the above tells us that for all edges (u,v):
Pr( v is red | u is red) = 1/3
Pr( v is blue | u is red) = 2/3
Pr( v is red | u is blue) = 1
Pr( v is blue | u is blue) = 0
Finally, we can obtain the total weights listed by the node colors.
>>> M = nx.attr_sparse_matrix(G, edge_attr='weight',\
node_attr='color', rc_order=rc)
>>> M.todense()
matrix([[ 3., 2.],
[ 2., 0.]])
Thus, the total weight over all edges (u,v) with u and v having colors:
(red, red) is 3 # the sole contribution is from edge (0,1)
(red, blue) is 2 # contributions from edges (0,2) and (1,2)
(blue, red) is 2 # same as (red, blue) since graph is undirected
(blue, blue) is 0 # there are no edges with blue endpoints
"""
try:
import numpy as np
from scipy import sparse
except ImportError:
raise ImportError(
"attr_sparse_matrix() requires scipy: http://scipy.org/ ")
edge_value = _edge_value(G, edge_attr)
node_value = _node_value(G, node_attr)
if rc_order is None:
ordering = list(set([node_value(n) for n in G]))
else:
ordering = rc_order
N = len(ordering)
undirected = not G.is_directed()
index = dict(zip(ordering, range(N)))
M = sparse.lil_matrix((N,N), dtype=dtype)
seen = set([])
for u,nbrdict in G.adjacency_iter():
for v in nbrdict:
# Obtain the node attribute values.
i, j = index[node_value(u)], index[node_value(v)]
if v not in seen:
M[i,j] += edge_value(u,v)
if undirected:
M[j,i] = M[i,j]
if undirected:
seen.add(u)
if normalized:
norms = np.asarray(M.sum(axis=1)).ravel()
for i,norm in enumerate(norms):
M[i,:] /= norm
if rc_order is None:
return M, ordering
else:
return M
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause | b7e94d24dc4ff3ca3fea11f28063d70d | 33.921397 | 88 | 0.587533 | 3.883924 | false | false | false | false |
uwescience/myria-web | appengine/networkx/readwrite/json_graph/tests/test_adjacency.py | 14 | 1269 | import json
from nose.tools import assert_equal, assert_raises, assert_not_equal,assert_true
import networkx as nx
from networkx.readwrite.json_graph import *
class TestAdjacency:
def test_graph(self):
G = nx.path_graph(4)
H = adjacency_graph(adjacency_data(G))
nx.is_isomorphic(G,H)
def test_graph_attributes(self):
G = nx.path_graph(4)
G.add_node(1,color='red')
G.add_edge(1,2,width=7)
G.graph['foo']='bar'
G.graph[1]='one'
H = adjacency_graph(adjacency_data(G))
assert_equal(H.graph['foo'],'bar')
assert_equal(H.node[1]['color'],'red')
assert_equal(H[1][2]['width'],7)
d = json.dumps(adjacency_data(G))
H = adjacency_graph(json.loads(d))
assert_equal(H.graph['foo'],'bar')
assert_equal(H.graph[1],'one')
assert_equal(H.node[1]['color'],'red')
assert_equal(H[1][2]['width'],7)
def test_digraph(self):
G = nx.DiGraph()
H = adjacency_graph(adjacency_data(G))
assert_true(H.is_directed())
def test_multidigraph(self):
G = nx.MultiDiGraph()
H = adjacency_graph(adjacency_data(G))
assert_true(H.is_directed())
assert_true(H.is_multigraph())
| bsd-3-clause | ae0c351e0a70f1db4529995752e2678b | 29.95122 | 80 | 0.583136 | 3.148883 | false | true | false | false |
uwescience/myria-web | appengine/networkx/algorithms/boundary.py | 49 | 2604 | """
Routines to find the boundary of a set of nodes.
Edge boundaries are edges that have only one end
in the set of nodes.
Node boundaries are nodes outside the set of nodes
that have an edge to a node in the set.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__=['edge_boundary','node_boundary']
def edge_boundary(G, nbunch1, nbunch2=None):
"""Return the edge boundary.
Edge boundaries are edges that have only one end
in the given set of nodes.
Parameters
-----------
G : graph
A networkx graph
nbunch1 : list, container
Interior node set
nbunch2 : list, container
Exterior node set. If None then it is set to all of the
nodes in G not in nbunch1.
Returns
-------
elist : list
List of edges
Notes
------
Nodes in nbunch1 and nbunch2 that are not in G are ignored.
nbunch1 and nbunch2 are usually meant to be disjoint,
but in the interest of speed and generality, that is
not required here.
"""
if nbunch2 is None: # Then nbunch2 is complement of nbunch1
nset1=set((n for n in nbunch1 if n in G))
return [(n1,n2) for n1 in nset1 for n2 in G[n1] \
if n2 not in nset1]
nset2=set(nbunch2)
return [(n1,n2) for n1 in nbunch1 if n1 in G for n2 in G[n1] \
if n2 in nset2]
def node_boundary(G, nbunch1, nbunch2=None):
"""Return the node boundary.
The node boundary is all nodes in the edge boundary of a given
set of nodes that are in the set.
Parameters
-----------
G : graph
A networkx graph
nbunch1 : list, container
Interior node set
nbunch2 : list, container
Exterior node set. If None then it is set to all of the
nodes in G not in nbunch1.
Returns
-------
nlist : list
List of nodes.
Notes
------
Nodes in nbunch1 and nbunch2 that are not in G are ignored.
nbunch1 and nbunch2 are usually meant to be disjoint,
but in the interest of speed and generality, that is
not required here.
"""
nset1=set(n for n in nbunch1 if n in G)
bdy=set()
for n1 in nset1:
bdy.update(G[n1])
bdy -= nset1
if nbunch2 is not None: # else nbunch2 is complement of nbunch1
bdy &= set(nbunch2)
return list(bdy)
| bsd-3-clause | f899d9ac07dc4b112980e86417164d5c | 24.529412 | 115 | 0.623272 | 3.35567 | false | false | false | false |
uwescience/myria-web | appengine/networkx/algorithms/centrality/eigenvector.py | 3 | 5085 | """
Eigenvector centrality.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Sasha Gutfraind (ag362@cornell.edu)'])
__all__ = ['eigenvector_centrality',
'eigenvector_centrality_numpy']
def eigenvector_centrality(G,max_iter=100,tol=1.0e-6,nstart=None):
"""Compute the eigenvector centrality for the graph G.
Uses the power method to find the eigenvector for the
largest eigenvalue of the adjacency matrix of G.
Parameters
----------
G : graph
A networkx graph
max_iter : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of eigenvector iteration for each node.
Returns
-------
nodes : dictionary
Dictionary of nodes with eigenvector centrality as the value.
Examples
--------
>>> G=nx.path_graph(4)
>>> centrality=nx.eigenvector_centrality(G)
>>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality])
['0 0.37', '1 0.60', '2 0.60', '3 0.37']
Notes
------
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
For directed graphs this is "right" eigevector centrality. For
"left" eigenvector centrality, first reverse the graph with
G.reverse().
See Also
--------
eigenvector_centrality_numpy
pagerank
hits
"""
from math import sqrt
if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
raise Exception(\
"eigenvector_centrality() not defined for multigraphs.")
if len(G)==0:
raise nx.NetworkXException(\
"eigenvector_centrality_numpy(): empty graph.")
if nstart is None:
# choose starting vector with entries of 1/len(G)
x=dict([(n,1.0/len(G)) for n in G])
else:
x=nstart
# normalize starting vector
s=1.0/sum(x.values())
for k in x: x[k]*=s
nnodes=G.number_of_nodes()
# make up to max_iter iterations
for i in range(max_iter):
xlast=x
x=dict.fromkeys(xlast, 0)
# do the multiplication y=Ax
for n in x:
for nbr in G[n]:
x[n]+=xlast[nbr]*G[n][nbr].get('weight',1)
# normalize vector
try:
s=1.0/sqrt(sum(v**2 for v in x.values()))
except ZeroDivisionError:
s=1.0
for n in x: x[n]*=s
# check convergence
err=sum([abs(x[n]-xlast[n]) for n in x])
if err < nnodes*tol:
return x
raise nx.NetworkXError("""eigenvector_centrality():
power iteration failed to converge in %d iterations."%(i+1))""")
def eigenvector_centrality_numpy(G):
"""Compute the eigenvector centrality for the graph G.
Parameters
----------
G : graph
A networkx graph
Returns
-------
nodes : dictionary
Dictionary of nodes with eigenvector centrality as the value.
Examples
--------
>>> G=nx.path_graph(4)
>>> centrality=nx.eigenvector_centrality_numpy(G)
>>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality])
['0 0.37', '1 0.60', '2 0.60', '3 0.37']
Notes
------
This algorithm uses the NumPy eigenvalue solver.
For directed graphs this is "right" eigevector centrality. For
"left" eigenvector centrality, first reverse the graph with
G.reverse().
See Also
--------
eigenvector_centrality
pagerank
hits
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"eigenvector_centrality_numpy() requires NumPy: http://scipy.org/")
if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
raise Exception(\
"eigenvector_centrality_numpy() not defined for multigraphs.")
if len(G)==0:
raise nx.NetworkXException(\
"eigenvector_centrality_numpy(): empty graph.")
A=nx.adj_matrix(G,nodelist=G.nodes())
eigenvalues,eigenvectors=np.linalg.eig(A)
# eigenvalue indices in reverse sorted order
ind=eigenvalues.argsort()[::-1]
# eigenvector of largest eigenvalue at ind[0], normalized
largest=np.array(eigenvectors[:,ind[0]]).flatten()
norm=np.sign(largest.sum())*np.linalg.norm(largest)
centrality=dict(zip(G,largest/norm))
return centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import numpy.linalg
except:
raise SkipTest("numpy not available")
| bsd-3-clause | f928fcb67943fad279833c8ce6f77139 | 28.393064 | 79 | 0.611996 | 3.719824 | false | false | false | false |
uwescience/myria-web | appengine/networkx/algorithms/clique.py | 3 | 16137 | """
=======
Cliques
=======
Find and manipulate cliques of graphs.
Note that finding the largest clique of a graph has been
shown to be an NP-complete problem; the algorithms here
could take a long time to run.
http://en.wikipedia.org/wiki/Clique_problem
"""
__author__ = """Dan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['find_cliques', 'find_cliques_recursive', 'make_max_clique_graph',
'make_clique_bipartite' ,'graph_clique_number',
'graph_number_of_cliques', 'node_clique_number',
'number_of_cliques', 'cliques_containing_node',
'project_down', 'project_up']
import networkx
def find_cliques(G):
"""
Search for all maximal cliques in a graph.
This algorithm searches for maximal cliques in a graph.
maximal cliques are the largest complete subgraph containing
a given point. The largest maximal clique is sometimes called
the maximum clique.
This implementation is a generator of lists each
of which contains the members of a maximal clique.
To obtain a list of cliques, use list(find_cliques(G)).
The method essentially unrolls the recursion used in
the references to avoid issues of recursion stack depth.
See Also
--------
find_cliques_recursive :
A recursive version of the same algorithm
Notes
-----
Based on the algorithm published by Bron & Kerbosch (1973) [1]_
as adapated by Tomita, Tanaka and Takahashi (2006) [2]_
and discussed in Cazals and Karande (2008) [3]_.
This algorithm ignores self-loops and parallel edges as
clique is not conventionally defined with such edges.
There are often many cliques in graphs. This algorithm can
run out of memory for large graphs.
References
----------
.. [1] Bron, C. and Kerbosch, J. 1973.
Algorithm 457: finding all cliques of an undirected graph.
Commun. ACM 16, 9 (Sep. 1973), 575-577.
http://portal.acm.org/citation.cfm?doid=362342.362367
.. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
The worst-case time complexity for generating all maximal
cliques and computational experiments,
Theoretical Computer Science, Volume 363, Issue 1,
Computing and Combinatorics,
10th Annual International Conference on
Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28-42
http://dx.doi.org/10.1016/j.tcs.2006.06.015
.. [3] F. Cazals, C. Karande,
A note on the problem of reporting maximal cliques,
Theoretical Computer Science,
Volume 407, Issues 1-3, 6 November 2008, Pages 564-568,
http://dx.doi.org/10.1016/j.tcs.2008.05.010
"""
# Cache nbrs and find first pivot (highest degree)
maxconn=-1
nnbrs={}
pivotnbrs=set() # handle empty graph
for n,nbrs in G.adjacency_iter():
nbrs=set(nbrs)
nbrs.discard(n)
conn = len(nbrs)
if conn > maxconn:
nnbrs[n] = pivotnbrs = nbrs
maxconn = conn
else:
nnbrs[n] = nbrs
# Initial setup
cand=set(nnbrs)
smallcand = cand - pivotnbrs
done=set()
stack=[]
clique_so_far=[]
# Start main loop
while smallcand or stack:
try:
# Any nodes left to check?
n=smallcand.pop()
except KeyError:
# back out clique_so_far
cand,done,smallcand = stack.pop()
clique_so_far.pop()
continue
# Add next node to clique
clique_so_far.append(n)
cand.remove(n)
done.add(n)
nn=nnbrs[n]
new_cand = cand & nn
new_done = done & nn
# check if we have more to search
if not new_cand:
if not new_done:
# Found a clique!
yield clique_so_far[:]
clique_so_far.pop()
continue
# Shortcut--only one node left!
if not new_done and len(new_cand)==1:
yield clique_so_far + list(new_cand)
clique_so_far.pop()
continue
# find pivot node (max connected in cand)
# look in done nodes first
numb_cand=len(new_cand)
maxconndone=-1
for n in new_done:
cn = new_cand & nnbrs[n]
conn=len(cn)
if conn > maxconndone:
pivotdonenbrs=cn
maxconndone=conn
if maxconndone==numb_cand:
break
# Shortcut--this part of tree already searched
if maxconndone == numb_cand:
clique_so_far.pop()
continue
# still finding pivot node
# look in cand nodes second
maxconn=-1
for n in new_cand:
cn = new_cand & nnbrs[n]
conn=len(cn)
if conn > maxconn:
pivotnbrs=cn
maxconn=conn
if maxconn == numb_cand-1:
break
# pivot node is max connected in cand from done or cand
if maxconndone > maxconn:
pivotnbrs = pivotdonenbrs
# save search status for later backout
stack.append( (cand, done, smallcand) )
cand=new_cand
done=new_done
smallcand = cand - pivotnbrs
def find_cliques_recursive(G):
"""
Recursive search for all maximal cliques in a graph.
This algorithm searches for maximal cliques in a graph.
Maximal cliques are the largest complete subgraph containing
a given point. The largest maximal clique is sometimes called
the maximum clique.
This implementation returns a list of lists each of
which contains the members of a maximal clique.
See Also
--------
find_cliques : An nonrecursive version of the same algorithm
Notes
-----
Based on the algorithm published by Bron & Kerbosch (1973) [1]_
as adapated by Tomita, Tanaka and Takahashi (2006) [2]_
and discussed in Cazals and Karande (2008) [3]_.
This algorithm ignores self-loops and parallel edges as
clique is not conventionally defined with such edges.
References
----------
.. [1] Bron, C. and Kerbosch, J. 1973.
Algorithm 457: finding all cliques of an undirected graph.
Commun. ACM 16, 9 (Sep. 1973), 575-577.
http://portal.acm.org/citation.cfm?doid=362342.362367
.. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
The worst-case time complexity for generating all maximal
cliques and computational experiments,
Theoretical Computer Science, Volume 363, Issue 1,
Computing and Combinatorics,
10th Annual International Conference on
Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28-42
http://dx.doi.org/10.1016/j.tcs.2006.06.015
.. [3] F. Cazals, C. Karande,
A note on the problem of reporting maximal cliques,
Theoretical Computer Science,
Volume 407, Issues 1-3, 6 November 2008, Pages 564-568,
http://dx.doi.org/10.1016/j.tcs.2008.05.010
"""
nnbrs={}
for n,nbrs in G.adjacency_iter():
nbrs=set(nbrs)
nbrs.discard(n)
nnbrs[n]=nbrs
if not nnbrs: return [] # empty graph
cand=set(nnbrs)
done=set()
clique_so_far=[]
cliques=[]
_extend(nnbrs,cand,done,clique_so_far,cliques)
return cliques
def _extend(nnbrs,cand,done,so_far,cliques):
# find pivot node (max connections in cand)
maxconn=-1
numb_cand=len(cand)
for n in done:
cn = cand & nnbrs[n]
conn=len(cn)
if conn > maxconn:
pivotnbrs=cn
maxconn=conn
if conn==numb_cand:
# All possible cliques already found
return
for n in cand:
cn = cand & nnbrs[n]
conn=len(cn)
if conn > maxconn:
pivotnbrs=cn
maxconn=conn
# Use pivot to reduce number of nodes to examine
smallercand = cand - pivotnbrs
for n in smallercand:
cand.remove(n)
so_far.append(n)
nn=nnbrs[n]
new_cand=cand & nn
new_done=done & nn
if not new_cand and not new_done:
# Found the clique
cliques.append(so_far[:])
elif not new_done and len(new_cand) is 1:
# shortcut if only one node left
cliques.append(so_far+list(new_cand))
else:
_extend(nnbrs, new_cand, new_done, so_far, cliques)
done.add(so_far.pop())
def make_max_clique_graph(G,create_using=None,name=None):
""" Create the maximal clique graph of a graph.
Finds the maximal cliques and treats these as nodes.
The nodes are connected if they have common members in
the original graph. Theory has done a lot with clique
graphs, but I haven't seen much on maximal clique graphs.
Notes
-----
This should be the same as make_clique_bipartite followed
by project_up, but it saves all the intermediate steps.
"""
cliq=list(map(set,find_cliques(G)))
if create_using:
B=create_using
B.clear()
else:
B=networkx.Graph()
if name is not None:
B.name=name
for i,cl in enumerate(cliq):
B.add_node(i+1)
for j,other_cl in enumerate(cliq[:i]):
# if not cl.isdisjoint(other_cl): #Requires 2.6
intersect=cl & other_cl
if intersect: # Not empty
B.add_edge(i+1,j+1)
return B
def make_clique_bipartite(G,fpos=None,create_using=None,name=None):
""" Create a bipartite clique graph from a graph G.
Nodes of G are retained as the "bottom nodes" of B and
cliques of G become "top nodes" of B.
Edges are present if a bottom node belongs to the clique
represented by the top node.
Returns a Graph with additional attribute dict B.node_type
which is keyed by nodes to "Bottom" or "Top" appropriately.
if fpos is not None, a second additional attribute dict B.pos
is created to hold the position tuple of each node for viewing
the bipartite graph.
"""
cliq=list(find_cliques(G))
if create_using:
B=create_using
B.clear()
else:
B=networkx.Graph()
if name is not None:
B.name=name
B.add_nodes_from(G)
B.node_type={} # New Attribute for B
for n in B:
B.node_type[n]="Bottom"
if fpos:
B.pos={} # New Attribute for B
delta_cpos=1./len(cliq)
delta_ppos=1./G.order()
cpos=0.
ppos=0.
for i,cl in enumerate(cliq):
name= -i-1 # Top nodes get negative names
B.add_node(name)
B.node_type[name]="Top"
if fpos:
if name not in B.pos:
B.pos[name]=(0.2,cpos)
cpos +=delta_cpos
for v in cl:
B.add_edge(name,v)
if fpos is not None:
if v not in B.pos:
B.pos[v]=(0.8,ppos)
ppos +=delta_ppos
return B
def project_down(B,create_using=None,name=None):
"""Project a bipartite graph B down onto its "bottom nodes".
The nodes retain their names and are connected if they
share a common top node in the bipartite graph.
Returns a Graph.
"""
if create_using:
G=create_using
G.clear()
else:
G=networkx.Graph()
if name is not None:
G.name=name
for v,Bvnbrs in B.adjacency_iter():
if B.node_type[v]=="Bottom":
G.add_node(v)
for cv in Bvnbrs:
G.add_edges_from([(v,u) for u in B[cv] if u!=v])
return G
def project_up(B,create_using=None,name=None):
""" Project a bipartite graph B down onto its "bottom nodes".
The nodes retain their names and are connected if they
share a common Bottom Node in the Bipartite Graph.
Returns a Graph.
"""
if create_using:
G=create_using
G.clear()
else:
G=networkx.Graph()
if name is not None:
G.name=name
for v,Bvnbrs in B.adjacency_iter():
if B.node_type[v]=="Top":
vname= -v #Change sign of name for Top Nodes
G.add_node(vname)
for cv in Bvnbrs:
# Note: -u changes the name (not Top node anymore)
G.add_edges_from([(vname,-u) for u in B[cv] if u!=v])
return G
def graph_clique_number(G,cliques=None):
"""Return the clique number (size of the largest clique) for G.
An optional list of cliques can be input if already computed.
"""
if cliques is None:
cliques=find_cliques(G)
return max( [len(c) for c in cliques] )
def graph_number_of_cliques(G,cliques=None):
""" Returns the number of maximal cliques in G.
An optional list of cliques can be input if already computed.
"""
if cliques is None:
cliques=list(find_cliques(G))
return len(cliques)
def node_clique_number(G,nodes=None,cliques=None):
""" Returns the size of the largest maximal clique containing
each given node.
Returns a single or list depending on input nodes.
Optional list of cliques can be input if already computed.
"""
if cliques is None:
if nodes is not None:
# Use ego_graph to decrease size of graph
if isinstance(nodes,list):
d={}
for n in nodes:
H=networkx.ego_graph(G,n)
d[n]=max( (len(c) for c in find_cliques(H)) )
else:
H=networkx.ego_graph(G,nodes)
d=max( (len(c) for c in find_cliques(H)) )
return d
# nodes is None--find all cliques
cliques=list(find_cliques(G))
if nodes is None:
nodes=G.nodes() # none, get entire graph
if not isinstance(nodes, list): # check for a list
v=nodes
# assume it is a single value
d=max([len(c) for c in cliques if v in c])
else:
d={}
for v in nodes:
d[v]=max([len(c) for c in cliques if v in c])
return d
# if nodes is None: # none, use entire graph
# nodes=G.nodes()
# elif not isinstance(nodes, list): # check for a list
# nodes=[nodes] # assume it is a single value
# if cliques is None:
# cliques=list(find_cliques(G))
# d={}
# for v in nodes:
# d[v]=max([len(c) for c in cliques if v in c])
# if nodes in G:
# return d[v] #return single value
# return d
def number_of_cliques(G,nodes=None,cliques=None):
""" Returns the number of maximal cliques for each node.
Returns a single or list depending on input nodes.
Optional list of cliques can be input if already computed.
"""
if cliques is None:
cliques=list(find_cliques(G))
if nodes is None:
nodes=G.nodes() # none, get entire graph
if not isinstance(nodes, list): # check for a list
v=nodes
# assume it is a single value
numcliq=len([1 for c in cliques if v in c])
else:
numcliq={}
for v in nodes:
numcliq[v]=len([1 for c in cliques if v in c])
return numcliq
def cliques_containing_node(G,nodes=None,cliques=None):
""" Returns a list of cliques containing the given node.
Returns a single list or list of lists depending on input nodes.
Optional list of cliques can be input if already computed.
"""
if cliques is None:
cliques=list(find_cliques(G))
if nodes is None:
nodes=G.nodes() # none, get entire graph
if not isinstance(nodes, list): # check for a list
v=nodes
# assume it is a single value
vcliques=[c for c in cliques if v in c]
else:
vcliques={}
for v in nodes:
vcliques[v]=[c for c in cliques if v in c]
return vcliques
| bsd-3-clause | ba26ce1c3120822ad3115279867fb0c0 | 29.913793 | 78 | 0.591436 | 3.533392 | false | false | false | false |
uwescience/myria-web | appengine/networkx/readwrite/graphml.py | 3 | 19224 | """
*******
GraphML
*******
Read and write graphs in GraphML format.
This implementation does not support mixed graphs (directed and unidirected
edges together), hyperedges, nested graphs, or ports.
"GraphML is a comprehensive and easy-to-use file format for graphs. It
consists of a language core to describe the structural properties of a
graph and a flexible extension mechanism to add application-specific
data. Its main features include support of
* directed, undirected, and mixed graphs,
* hypergraphs,
* hierarchical graphs,
* graphical representations,
* references to external data,
* application-specific attribute data, and
* light-weight parsers.
Unlike many other file formats for graphs, GraphML does not use a
custom syntax. Instead, it is based on XML and hence ideally suited as
a common denominator for all kinds of services generating, archiving,
or processing graphs."
http://graphml.graphdrawing.org/
Format
------
GraphML is an XML format. See
http://graphml.graphdrawing.org/specification.html for the specification and
http://graphml.graphdrawing.org/primer/graphml-primer.html
for examples.
"""
__author__ = """\n""".join(['Salim Fadhley',
'Aric Hagberg (hagberg@lanl.gov)'
])
__all__ = ['write_graphml', 'read_graphml', 'generate_graphml',
'GraphMLWriter', 'GraphMLReader']
import networkx as nx
from networkx.utils import open_file, make_str
import warnings
try:
from xml.etree.cElementTree import Element, ElementTree, tostring
except ImportError:
try:
from xml.etree.ElementTree import Element, ElementTree, tostring
except ImportError:
pass
@open_file(1,mode='wb')
def write_graphml(G, path, encoding='utf-8',prettyprint=True):
"""Write G in GraphML XML format to path
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_graphml(G, "test.graphml")
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
writer.add_graph_element(G)
writer.dump(path)
def generate_graphml(G, encoding='utf-8',prettyprint=True):
"""Generate GraphML lines for G
Parameters
----------
G : graph
A networkx graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> linefeed=chr(10) # linefeed=\n
>>> s=linefeed.join(nx.generate_graphml(G)) # doctest: +SKIP
>>> for line in nx.generate_graphml(G): # doctest: +SKIP
... print(line)
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
writer.add_graph_element(G)
for line in str(writer).splitlines():
yield line
@open_file(0,mode='rb')
def read_graphml(path,node_type=str):
"""Read graph in GraphML format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
Files with the yEd "yfiles" extension will can be read but the graphics
information is discarded.
yEd compressed files ("file.graphmlz" extension) can be read by renaming
the file to "file.graphml.gz".
"""
reader = GraphMLReader(node_type=node_type)
# need to check for multiple graphs
glist=list(reader(path))
return glist[0]
class GraphML(object):
NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
#xmlns:y="http://www.yworks.com/xml/graphml"
NS_Y = "http://www.yworks.com/xml/graphml"
SCHEMALOCATION = \
' '.join(['http://graphml.graphdrawing.org/xmlns',
'http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd'])
try:
chr(12345) # Fails on Py!=3.
unicode = str # Py3k's str is our unicode type
long = int # Py3K's int is our long type
except ValueError:
# Python 2.x
pass
types=[(int,"integer"), # for Gephi GraphML bug
(str,"yfiles"),(str,"string"), (unicode,"string"),
(int,"int"), (long,"long"),
(float,"float"), (float,"double"),
(bool, "boolean")]
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
convert_bool={'true':True,'false':False,
'True': True, 'False': False}
class GraphMLWriter(GraphML):
def __init__(self, graph=None, encoding="utf-8",prettyprint=True):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GraphML writer requires '
'xml.elementtree.ElementTree')
self.prettyprint=prettyprint
self.encoding = encoding
self.xml = Element("graphml",
{'xmlns':self.NS_GRAPHML,
'xmlns:xsi':self.NS_XSI,
'xsi:schemaLocation':self.SCHEMALOCATION}
)
self.keys={}
if graph is not None:
self.add_graph_element(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s=tostring(self.xml).decode(self.encoding)
return s
def get_key(self, name, attr_type, scope, default):
keys_key = (name, attr_type, scope)
try:
return self.keys[keys_key]
except KeyError:
new_id = "d%i" % len(list(self.keys))
self.keys[keys_key] = new_id
key_kwargs = {"id":new_id,
"for":scope,
"attr.name":name,
"attr.type":attr_type}
key_element=Element("key",**key_kwargs)
# add subelement for data default value if present
if default is not None:
default_element=Element("default")
default_element.text=make_str(default)
key_element.append(default_element)
self.xml.insert(0,key_element)
return new_id
def add_data(self, name, element_type, value,
scope="all",
default=None):
"""
Make a data element for an edge or a node. Keep a log of the
type in the keys table.
"""
if element_type not in self.xml_type:
raise nx.NetworkXError('GraphML writer does not support '
'dict types as data values.')
key_id = self.get_key(name, self.xml_type[element_type], scope, default)
data_element = Element("data", key=key_id)
data_element.text = make_str(value)
return data_element
def add_attributes(self, scope, xml_obj, data, default):
"""Appends attributes to edges or nodes.
"""
for k,v in data.items():
default_value=default.get(k)
obj=self.add_data(make_str(k), type(v), make_str(v),
scope=scope, default=default_value)
xml_obj.append(obj)
def add_nodes(self, G, graph_element):
for node,data in G.nodes_iter(data=True):
node_element = Element("node", id = make_str(node))
default=G.graph.get('node_default',{})
self.add_attributes("node", node_element, data, default)
graph_element.append(node_element)
def add_edges(self, G, graph_element):
if G.is_multigraph():
for u,v,key,data in G.edges_iter(data=True,keys=True):
edge_element = Element("edge",source=make_str(u),
target=make_str(v))
default=G.graph.get('edge_default',{})
self.add_attributes("edge", edge_element, data, default)
self.add_attributes("edge", edge_element,
{'key':key}, default)
graph_element.append(edge_element)
else:
for u,v,data in G.edges_iter(data=True):
edge_element = Element("edge",source=make_str(u),
target=make_str(v))
default=G.graph.get('edge_default',{})
self.add_attributes("edge", edge_element, data, default)
graph_element.append(edge_element)
def add_graph_element(self, G):
"""
Serialize graph G in GraphML to the stream.
"""
if G.is_directed():
default_edge_type='directed'
else:
default_edge_type='undirected'
graphid=G.graph.pop('id',None)
if graphid is None:
graph_element = Element("graph",
edgedefault = default_edge_type)
else:
graph_element = Element("graph",
edgedefault = default_edge_type,
id=graphid)
default={}
data=dict((k,v) for (k,v) in G.graph.items()
if k not in ['node_default','edge_default'])
self.add_attributes("graph", graph_element, data, default)
self.add_nodes(G,graph_element)
self.add_edges(G,graph_element)
self.xml.append(graph_element)
def add_graphs(self, graph_list):
"""
Add many graphs to this GraphML document.
"""
for G in graph_list:
self.add_graph_element(G)
def dump(self, stream):
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
header='<?xml version="1.0" encoding="%s"?>'%self.encoding
stream.write(header.encode(self.encoding))
document.write(stream, encoding=self.encoding)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class GraphMLReader(GraphML):
"""Read a GraphML document. Produces NetworkX graph objects.
"""
def __init__(self, node_type=str):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GraphML reader requires '
'xml.elementtree.ElementTree')
self.node_type=node_type
self.multigraph=False # assume multigraph and test for parallel edges
def __call__(self, stream):
self.xml = ElementTree(file=stream)
(keys,defaults) = self.find_graphml_keys(self.xml)
for g in self.xml.findall("{%s}graph" % self.NS_GRAPHML):
yield self.make_graph(g, keys, defaults)
def make_graph(self, graph_xml, graphml_keys, defaults):
# set default graph type
edgedefault = graph_xml.get("edgedefault", None)
if edgedefault=='directed':
G=nx.MultiDiGraph()
else:
G=nx.MultiGraph()
# set defaults for graph attributes
for key_id,value in defaults.items():
key_for=graphml_keys[key_id]['for']
name=graphml_keys[key_id]['name']
python_type=graphml_keys[key_id]['type']
if key_for=='node':
G.graph['node_default']={name:python_type(value)}
if key_for=='edge':
G.graph['edge_default']={name:python_type(value)}
# hyperedges are not supported
hyperedge=graph_xml.find("{%s}hyperedge" % self.NS_GRAPHML)
if hyperedge is not None:
raise nx.NetworkXError("GraphML reader does not support hyperedges")
# add nodes
for node_xml in graph_xml.findall("{%s}node" % self.NS_GRAPHML):
self.add_node(G, node_xml, graphml_keys)
# add edges
for edge_xml in graph_xml.findall("{%s}edge" % self.NS_GRAPHML):
self.add_edge(G, edge_xml, graphml_keys)
# add graph data
data = self.decode_data_elements(graphml_keys, graph_xml)
G.graph.update(data)
# switch to Graph or DiGraph if no parallel edges were found.
if not self.multigraph:
if G.is_directed():
return nx.DiGraph(G)
else:
return nx.Graph(G)
else:
return G
def add_node(self, G, node_xml, graphml_keys):
"""Add a node to the graph.
"""
# warn on finding unsupported ports tag
ports=node_xml.find("{%s}port" % self.NS_GRAPHML)
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# find the node by id and cast it to the appropriate type
node_id = self.node_type(node_xml.get("id"))
# get data/attributes for node
data = self.decode_data_elements(graphml_keys, node_xml)
G.add_node(node_id, data)
def add_edge(self, G, edge_element, graphml_keys):
"""Add an edge to the graph.
"""
# warn on finding unsupported ports tag
ports=edge_element.find("{%s}port" % self.NS_GRAPHML)
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# raise error if we find mixed directed and undirected edges
directed = edge_element.get("directed")
if G.is_directed() and directed=='false':
raise nx.NetworkXError(\
"directed=false edge found in directed graph.")
if (not G.is_directed()) and directed=='true':
raise nx.NetworkXError(\
"directed=true edge found in undirected graph.")
source = self.node_type(edge_element.get("source"))
target = self.node_type(edge_element.get("target"))
data = self.decode_data_elements(graphml_keys, edge_element)
# GraphML stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs too if no key
# attribute is specified
edge_id = edge_element.get("id")
if edge_id:
data["id"] = edge_id
if G.has_edge(source,target):
# mark this as a multigraph
self.multigraph=True
if edge_id is None:
# no id specified, try using 'key' attribute as id
edge_id=data.pop('key',None)
G.add_edge(source, target, key=edge_id, **data)
def decode_data_elements(self, graphml_keys, obj_xml):
"""Use the key information to decode the data XML if present."""
data = {}
for data_element in obj_xml.findall("{%s}data" % self.NS_GRAPHML):
key = data_element.get("key")
try:
data_name=graphml_keys[key]['name']
data_type=graphml_keys[key]['type']
except KeyError:
raise nx.NetworkXError("Bad GraphML data: no key %s"%key)
text=data_element.text
# assume anything with subelements is a yfiles extension
if text is not None and len(list(data_element))==0:
if data_type==bool:
data[data_name] = self.convert_bool[text]
else:
data[data_name] = data_type(text)
elif len(list(data_element)) > 0:
# Assume yfiles as subelements, try to extract node_label
node_label = None
for node_type in ['ShapeNode', 'SVGNode', 'ImageNode']:
if node_label is None:
node_label = data_element.find("{%s}%s/{%s}NodeLabel" %
(self.NS_Y, node_type, self.NS_Y))
if node_label is not None:
data['label'] = node_label.text
edge_label = data_element.find("{%s}PolyLineEdge/{%s}EdgeLabel"%
(self.NS_Y, (self.NS_Y)))
if edge_label is not None:
data['label'] = edge_label.text
return data
def find_graphml_keys(self, graph_element):
"""Extracts all the keys and key defaults from the xml.
"""
graphml_keys = {}
graphml_key_defaults = {}
for k in graph_element.findall("{%s}key" % self.NS_GRAPHML):
attr_id = k.get("id")
attr_type=k.get('attr.type')
attr_name=k.get("attr.name")
if attr_type is None:
attr_name=k.get('yfiles.type')
attr_type='yfiles'
if attr_name is None:
raise nx.NetworkXError("Unknown key type in file.")
graphml_keys[attr_id] = {
"name":attr_name,
"type":self.python_type[attr_type],
"for":k.get("for")}
# check for "default" subelement of key element
default=k.find("{%s}default" % self.NS_GRAPHML)
if default is not None:
graphml_key_defaults[attr_id]=default.text
return graphml_keys,graphml_key_defaults
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import xml.etree.ElementTree
except:
raise SkipTest("xml.etree.ElementTree not available")
# fixture for nose tests
def teardown_module(module):
import os
try:
os.unlink('test.graphml')
except:
pass
| bsd-3-clause | 5ec33b44c904184f197dbc1ea04916e9 | 36.328155 | 80 | 0.567 | 3.984249 | false | false | false | false |
uwescience/myria-web | appengine/networkx/linalg/spectrum.py | 35 | 2271 | """
Eigenvalue spectrum of graphs.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['laplacian_spectrum', 'adjacency_spectrum']
def laplacian_spectrum(G, weight='weight'):
"""Return eigenvalues of the Laplacian of G
Parameters
----------
G : graph
A NetworkX graph
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
evals : NumPy array
Eigenvalues
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
laplacian_matrix
"""
try:
import numpy as np
except ImportError:
raise ImportError(
"laplacian_spectrum() requires NumPy: http://scipy.org/ ")
return np.linalg.eigvals(nx.laplacian_matrix(G,weight=weight))
def adjacency_spectrum(G, weight='weight'):
"""Return eigenvalues of the adjacency matrix of G.
Parameters
----------
G : graph
A NetworkX graph
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
evals : NumPy array
Eigenvalues
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
adjacency_matrix
"""
try:
import numpy as np
except ImportError:
raise ImportError(
"adjacency_spectrum() requires NumPy: http://scipy.org/ ")
return np.linalg.eigvals(nx.adjacency_matrix(G,weight=weight))
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| bsd-3-clause | cb045accd16b646882f0975bf5af718f | 24.233333 | 68 | 0.613827 | 3.929066 | false | false | false | false |
xia2/xia2 | src/xia2/Modules/SSX/data_integration_standard.py | 1 | 29598 | from __future__ import annotations
import functools
import json
import logging
import math
import os
import pathlib
import subprocess
from dataclasses import asdict, dataclass, field
from typing import List, Optional, Tuple
import numpy as np
import libtbx.easy_mp
from dials.algorithms.clustering.unit_cell import Cluster
from dials.algorithms.indexing.ssx.analysis import generate_html_report
from dials.array_family import flex
from dxtbx.model import ExperimentList
from dxtbx.serialize import load
from xia2.Driver.timing import record_step
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Streams import banner
from xia2.Modules.SSX.data_integration_programs import (
IndexingParams,
IntegrationParams,
RefinementParams,
SpotfindingParams,
best_cell_from_cluster,
clusters_from_experiments,
combine_with_reference,
run_refinement,
ssx_find_spots,
ssx_index,
ssx_integrate,
)
from xia2.Modules.SSX.reporting import condensed_unit_cell_info
from xia2.Modules.SSX.util import redirect_xia2_logger
xia2_logger = logging.getLogger(__name__)
@dataclass
class FileInput:
images: List[str] = field(default_factory=list)
templates: List[str] = field(default_factory=list)
directories: List[str] = field(default_factory=list)
mask: Optional[pathlib.Path] = None
reference_geometry: Optional[pathlib.Path] = None
import_phil: Optional[pathlib.Path] = None
@dataclass
class AlgorithmParams:
assess_images_to_use: Optional[Tuple[int, int]] = None
refinement_images_to_use: Optional[Tuple[int, int]] = None
assess_crystals_n_crystals: int = 250
geometry_refinement_n_crystals: int = 250
batch_size: int = 1000
steps: List[str] = field(default_factory=list)
nproc: int = 1
njobs: int = 1
multiprocessing_method: str = "multiprocessing"
enable_live_reporting: bool = False
def process_batch(
working_directory: pathlib.Path,
spotfinding_params: SpotfindingParams,
indexing_params: IndexingParams,
integration_params: IntegrationParams,
options: AlgorithmParams,
) -> dict:
"""Run find_spots, index and integrate in the working directory."""
number = working_directory.name.split("_")[-1]
xia2_logger.notice(banner(f"Processing batch {number}")) # type: ignore
data = {
"n_images_indexed": None,
"n_cryst_integrated": None,
"directory": str(working_directory),
}
if options.enable_live_reporting:
nuggets_dir = working_directory / "nuggets"
if not nuggets_dir.is_dir():
pathlib.Path.mkdir(nuggets_dir)
indexing_params.output_nuggets_dir = nuggets_dir
integration_params.output_nuggets_dir = nuggets_dir
if "find_spots" in options.steps:
strong = ssx_find_spots(working_directory, spotfinding_params)
strong.as_file(working_directory / "strong.refl")
summary: dict = {}
integration_summary: dict = {}
if "index" in options.steps:
expt, refl, summary = ssx_index(working_directory, indexing_params)
large_clusters = summary["large_clusters"]
data["n_images_indexed"] = summary["n_images_indexed"]
expt.as_file(working_directory / "indexed.expt")
refl.as_file(working_directory / "indexed.refl")
if large_clusters:
xia2_logger.info(f"{condensed_unit_cell_info(large_clusters)}")
if not (expt and refl):
xia2_logger.warning(
f"No images successfully indexed in {str(working_directory)}"
)
return data
if "integrate" in options.steps:
integration_summary = ssx_integrate(working_directory, integration_params)
large_clusters = integration_summary["large_clusters"]
if large_clusters:
xia2_logger.info(f"{condensed_unit_cell_info(large_clusters)}")
data["n_cryst_integrated"] = integration_summary["n_cryst_integrated"]
data["DataFiles"] = integration_summary["DataFiles"]
return data
def setup_main_process(
main_directory: pathlib.Path,
imported_expts: pathlib.Path,
batch_size: int,
) -> Tuple[List[pathlib.Path], dict]:
"""
Slice data from the imported data according to the batch size,
saving each into its own subdirectory for batch processing.
"""
expts = load.experiment_list(imported_expts, check_format=True)
n_batches = math.floor(len(expts) / batch_size)
splits = [i * batch_size for i in range(max(1, n_batches))] + [len(expts)]
# make sure last batch has at least the batch size
template = functools.partial(
"batch_{index:0{fmt:d}d}".format, fmt=len(str(n_batches))
)
batch_directories: List[pathlib.Path] = []
setup_data: dict = {"images_per_batch": {}}
for i in range(len(splits) - 1):
subdir = main_directory / template(index=i + 1)
if not subdir.is_dir():
pathlib.Path.mkdir(subdir)
# now copy file and run
sub_expt = expts[splits[i] : splits[i + 1]]
sub_expt.as_file(subdir / "imported.expt")
batch_directories.append(subdir)
setup_data["images_per_batch"][str(subdir)] = splits[i + 1] - splits[i]
return batch_directories, setup_data
def inspect_existing_batch_directories(
main_directory: pathlib.Path,
) -> Tuple[List[pathlib.Path], dict]:
batch_directories: List[pathlib.Path] = []
setup_data: dict = {"images_per_batch": {}}
# use glob to find batch_*
dirs_list = []
numbers = []
n_images = []
for dir_ in list(main_directory.glob("batch_*")):
name = dir_.name
dirs_list.append(dir_)
numbers.append(int(name.split("_")[-1]))
if not (dir_ / "imported.expt").is_file():
raise ValueError("Unable to find imported.expt in existing batch directory")
n_images.append(
len(load.experiment_list(dir_ / "imported.expt", check_format=False))
)
if not dirs_list:
raise ValueError("Unable to find any batch_* directories")
order = np.argsort(np.array(numbers))
for idx in order:
batch_directories.append(dirs_list[idx])
setup_data["images_per_batch"][str(dirs_list[idx])] = n_images[idx]
return batch_directories, setup_data
class NoMoreImages(Exception):
pass
def slice_images_from_experiments(
imported_expts: pathlib.Path,
destination_directory: pathlib.Path,
images: Tuple[int, int],
) -> None:
"""Saves a slice of the experiment list into the destination directory."""
if not destination_directory.is_dir(): # This is the first attempt
pathlib.Path.mkdir(destination_directory)
expts = load.experiment_list(imported_expts, check_format=False)
assert len(images) == 2 # Input is a tuple representing a slice
start, end = images[0], images[1]
if start >= len(expts):
raise NoMoreImages
if end > len(expts):
end = len(expts)
new_expts = expts[start:end]
new_expts.as_file(destination_directory / "imported.expt")
xia2_logger.info(
f"Saved images {start+1} to {end} into {destination_directory / 'imported.expt'}"
)
output = {"input": os.fspath(imported_expts), "slice": images}
outfile = destination_directory / "file_input.json"
with (outfile).open(mode="w") as f:
json.dump(output, f, indent=2)
def check_previous_import(
working_directory: pathlib.Path, file_input: FileInput
) -> Tuple[bool, dict]:
same_as_previous = False
previous = {}
if (working_directory / "file_input.json").is_file():
with (working_directory / "file_input.json").open(mode="r") as f:
previous = json.load(f)
same_reference = False
if not file_input.reference_geometry:
if previous["reference_geometry"] is None:
same_reference = True
else:
if str(file_input.reference_geometry) == previous["reference_geometry"]:
same_reference = True
same_mask = False
if not file_input.mask:
if previous["mask"] is None:
same_mask = True
else:
if str(file_input.mask) == previous["mask"]:
same_mask = True
same_phil = False
if not file_input.import_phil:
if previous["import_phil"] is None:
same_phil = True
else:
if str(file_input.import_phil) == previous["import_phil"]:
same_phil = True
if same_reference and same_mask and same_phil:
inputs = [
file_input.images,
file_input.templates,
file_input.directories,
]
previous_inputs = [
previous["images"],
previous["templates"],
previous["directories"],
]
for this, other in zip(inputs, previous_inputs):
if this and (this == other):
return (True, previous)
return (same_as_previous, previous)
def run_import(working_directory: pathlib.Path, file_input: FileInput) -> None:
"""
Run dials.import with either images, templates or directories.
After running dials.import, the options are saved to file_input.json
If dials.import has previously been run in this directory, then try
to load the previous file_input.json and see what options were used.
If the options are the same as the current options, then don't rerun
dials.import and just return.
Returns True if dials.import was run, False if dials.import wasn't run due
to options being identical to previous run.
"""
if not working_directory.is_dir():
pathlib.Path.mkdir(working_directory)
xia2_logger.info("New images or geometry detected, running import")
import_command = ["dials.import", "output.experiments=imported.expt"]
if file_input.import_phil:
import_command.insert(1, os.fspath(file_input.import_phil))
if file_input.images:
import_command += file_input.images
elif file_input.templates:
for t in file_input.templates:
import_command.append(f"template={t}")
elif file_input.directories:
for d in file_input.directories:
import_command.append(f"directory={d}")
if file_input.mask:
import_command.append(f"mask={os.fspath(file_input.mask)}")
if file_input.reference_geometry:
import_command += [
f"reference_geometry={os.fspath(file_input.reference_geometry)}",
"use_gonio_reference=False",
]
xia2_logger.notice(banner("Importing with reference geometry")) # type: ignore
else:
xia2_logger.notice(banner("Importing")) # type: ignore
with record_step("dials.import"):
result = subprocess.run(
import_command, cwd=working_directory, capture_output=True, encoding="utf-8"
)
if result.returncode or result.stderr:
raise ValueError(
"dials.import returned error status:\n"
+ result.stderr
+ "\nHint: To import data from a .h5 file use e.g. image=/path/to/data/data_master.h5"
+ "\n To import data from cbf files, use e.g. template=/path/to/data/name_#####.cbf"
+ "\n The option directory=/path/to/data/ can also be used."
+ "\nPlease recheck the input path/file names for your data files."
)
outfile = working_directory / "file_input.json"
outfile.touch()
file_input_dict = asdict(file_input)
if file_input.reference_geometry:
file_input_dict["reference_geometry"] = str(file_input.reference_geometry)
if file_input.mask:
file_input_dict["mask"] = str(file_input.mask)
if file_input.import_phil:
file_input_dict["import_phil"] = str(file_input.import_phil)
with (outfile).open(mode="w") as f:
json.dump(file_input_dict, f, indent=2)
def assess_crystal_parameters_from_images(
working_directory: pathlib.Path,
imported_expts: pathlib.Path,
images_to_use: Tuple[int, int],
spotfinding_params: SpotfindingParams,
indexing_params: IndexingParams,
) -> None:
"""
Run spotfinding and indexing and report on the properties of
the largest cluster.
Generates a unit cell clustering html report if any clusters are found.
Always outputs a assess_crystals.json containing at least the success_per_image.
"""
large_clusters: List[Cluster] = []
cluster_plots: dict = {}
success_per_image: List[bool] = []
slice_images_from_experiments(imported_expts, working_directory, images_to_use)
# now run find spots and index
strong = ssx_find_spots(working_directory, spotfinding_params)
strong.as_file(working_directory / "strong.refl")
expts, __, summary = ssx_index(working_directory, indexing_params)
success_per_image = summary["success_per_image"]
if expts:
cluster_plots, large_clusters = clusters_from_experiments(expts)
if large_clusters:
xia2_logger.info(f"{condensed_unit_cell_info(large_clusters)}")
if cluster_plots:
generate_html_report(
cluster_plots, working_directory / "dials.cell_clusters.html"
)
cluster_plots["success_per_image"] = success_per_image
with open(working_directory / "assess_crystals.json", "w") as outfile:
json.dump(cluster_plots, outfile, indent=2)
_report_on_assess_crystals(expts, large_clusters)
def cumulative_assess_crystal_parameters(
working_directory: pathlib.Path,
imported_expts: pathlib.Path,
options: AlgorithmParams,
spotfinding_params: SpotfindingParams,
indexing_params: IndexingParams,
):
large_clusters: List[Cluster] = []
cluster_plots: dict = {}
success_per_image: List[bool] = []
n_xtal = 0
first_image = 0
all_expts = ExperimentList()
while n_xtal < options.assess_crystals_n_crystals:
try:
slice_images_from_experiments(
imported_expts,
working_directory,
(first_image, first_image + options.batch_size),
)
except NoMoreImages:
break
strong = ssx_find_spots(working_directory, spotfinding_params)
strong.as_file(working_directory / "strong.refl")
expts, _, summary_this = ssx_index(working_directory, indexing_params)
n_xtal += len(expts)
xia2_logger.info(f"Indexed {n_xtal} crystals in total")
all_expts.extend(expts)
first_image += options.batch_size
success_per_image.extend(summary_this["success_per_image"])
if all_expts:
# generate up-to-date cluster plots and lists
cluster_plots, large_clusters = clusters_from_experiments(all_expts)
if large_clusters:
xia2_logger.info(f"{condensed_unit_cell_info(large_clusters)}")
if cluster_plots:
generate_html_report(
cluster_plots, working_directory / "dials.cell_clusters.html"
)
cluster_plots["success_per_image"] = success_per_image
with open(working_directory / "assess_crystals.json", "w") as outfile:
json.dump(cluster_plots, outfile, indent=2)
_report_on_assess_crystals(all_expts, large_clusters)
def _report_on_assess_crystals(
experiments: ExperimentList, large_clusters: List[Cluster]
) -> None:
if experiments:
if large_clusters:
sg, uc = best_cell_from_cluster(large_clusters[0])
xia2_logger.info(
"Properties of largest cluster:\n"
"Highest possible metric unit cell: "
+ ", ".join(f"{i:.3f}" for i in uc)
+ f"\nHighest possible metric symmetry: {sg}"
)
else:
xia2_logger.info(
"Some imaged indexed, but no significant unit cell clusters found.\n"
+ "Please try adjusting indexing parameters or try crystal assessment on different images"
)
else:
xia2_logger.warning(
"No successfully indexed images.\n"
+ "Please try adjusting indexing parameters or try crystal assessment on different images"
)
def determine_reference_geometry_from_images(
working_directory: pathlib.Path,
imported_expts: pathlib.Path,
images_to_use: Tuple[int, int],
spotfinding_params: SpotfindingParams,
indexing_params: IndexingParams,
refinement_params: RefinementParams,
) -> None:
"""Run find spots, indexing and joint refinement in the working directory."""
slice_images_from_experiments(imported_expts, working_directory, images_to_use)
xia2_logger.notice(banner("Joint-refinement of experimental geometry")) # type: ignore
cluster_plots: dict = {}
success_per_image: List[bool] = []
strong = ssx_find_spots(working_directory, spotfinding_params)
strong.as_file(working_directory / "strong.refl")
expts, refl, summary = ssx_index(working_directory, indexing_params)
success_per_image = summary["success_per_image"]
if expts:
cluster_plots, large_clusters = clusters_from_experiments(expts)
if large_clusters:
xia2_logger.info(f"{condensed_unit_cell_info(large_clusters)}")
if cluster_plots:
generate_html_report(
cluster_plots, working_directory / "dials.cell_clusters.html"
)
cluster_plots["success_per_image"] = success_per_image
with open(working_directory / "geometry_refinement.json", "w") as outfile:
json.dump(cluster_plots, outfile, indent=2)
if not (expts and refl):
raise ValueError(
"No images successfully indexed, unable to run geometry refinement"
)
# now do geom refinement.
expts.as_file(working_directory / "indexed.expt")
refl.as_file(working_directory / "indexed.refl")
run_refinement(working_directory, refinement_params)
xia2_logger.info(
f"Refined reference geometry saved to {working_directory}/refined.expt"
)
def cumulative_determine_reference_geometry(
working_directory: pathlib.Path,
imported_expts: pathlib.Path,
options: AlgorithmParams,
spotfinding_params: SpotfindingParams,
indexing_params: IndexingParams,
refinement_params: RefinementParams,
) -> None:
xia2_logger.notice(banner("Joint-refinement of experimental geometry")) # type: ignore
cluster_plots: dict = {}
success_per_image: List[bool] = []
n_xtal = 0
first_image = 0
all_expts = ExperimentList()
all_tables = []
while n_xtal < options.geometry_refinement_n_crystals:
try:
slice_images_from_experiments(
imported_expts,
working_directory,
(first_image, first_image + options.batch_size),
)
except NoMoreImages:
break
strong = ssx_find_spots(working_directory, spotfinding_params)
strong.as_file(working_directory / "strong.refl")
expts, refl, summary_this = ssx_index(working_directory, indexing_params)
n_xtal += len(expts)
xia2_logger.info(f"Indexed {n_xtal} crystals in total")
if refl.size():
all_expts.extend(expts)
all_tables.append(refl)
first_image += options.batch_size
success_per_image.extend(summary_this["success_per_image"])
if all_expts:
cluster_plots, large_clusters = clusters_from_experiments(all_expts)
if large_clusters:
xia2_logger.info(f"{condensed_unit_cell_info(large_clusters)}")
if cluster_plots:
generate_html_report(
cluster_plots, working_directory / "dials.cell_clusters.html"
)
cluster_plots["success_per_image"] = success_per_image
with open(working_directory / "geometry_refinement.json", "w") as outfile:
json.dump(cluster_plots, outfile, indent=2)
if not all_expts:
raise ValueError(
"No images successfully indexed, unable to run geometry refinement"
)
# now do geom refinement.
joint_table = flex.reflection_table.concat(all_tables)
all_expts = combine_with_reference(all_expts)
all_expts.as_file(working_directory / "indexed.expt")
joint_table.as_file(working_directory / "indexed.refl")
run_refinement(working_directory, refinement_params)
xia2_logger.info(
f"Refined reference geometry saved to {working_directory}/refined.expt"
)
class ProcessBatch(object):
"""A processing class as required for multi_node_parallel_map"""
def __init__(
self,
spotfinding_params: SpotfindingParams,
indexing_params: IndexingParams,
integration_params: IntegrationParams,
options: AlgorithmParams,
):
self.spotfinding_params = spotfinding_params
self.indexing_params = indexing_params
self.integration_params = integration_params
self.options = options
self.function = process_batch
def __call__(self, directory: pathlib.Path) -> dict:
with redirect_xia2_logger() as iostream:
summary_data = self.function(
directory,
self.spotfinding_params,
self.indexing_params,
self.integration_params,
self.options,
)
s = iostream.getvalue()
xia2_logger.info(s)
return summary_data
def process_batches(
batch_directories: List[pathlib.Path],
spotfinding_params: SpotfindingParams,
indexing_params: IndexingParams,
integration_params: IntegrationParams,
setup_data: dict,
options: AlgorithmParams,
):
class ProgressReport(object):
def __init__(self):
self.cumulative_images: int = 0
self.cumulative_images_indexed: int = 0
self.cumulative_crystals_integrated: int = 0
def add(self, summary_data: dict) -> None:
self.cumulative_images += setup_data["images_per_batch"][
summary_data["directory"]
]
xia2_logger.info(
f"Cumulative number of images processed: {self.cumulative_images}"
)
if summary_data["n_images_indexed"] is not None:
self.cumulative_images_indexed += summary_data["n_images_indexed"]
pc_indexed = (
self.cumulative_images_indexed * 100 / self.cumulative_images
)
xia2_logger.info(f"Cumulative % of images indexed: {pc_indexed:.2f}%")
if summary_data["n_cryst_integrated"] is not None:
self.cumulative_crystals_integrated += summary_data[
"n_cryst_integrated"
]
xia2_logger.info(
f"Total number of integrated crystals: {self.cumulative_crystals_integrated}"
)
progress = ProgressReport()
def process_output(summary_data):
progress.add(summary_data)
if "DataFiles" in summary_data:
for tag, file in zip(
summary_data["DataFiles"]["tags"],
summary_data["DataFiles"]["filenames"],
):
FileHandler.record_more_data_file(tag, file)
if options.njobs > 1:
njobs = min(options.njobs, len(batch_directories))
xia2_logger.info(
f"Submitting processing in {len(batch_directories)} batches across {njobs} cores, each with nproc={options.nproc}."
)
libtbx.easy_mp.parallel_map(
func=ProcessBatch(
spotfinding_params, indexing_params, integration_params, options
),
iterable=batch_directories,
qsub_command=f"qsub -pe smp {options.nproc}",
processes=njobs,
method=options.multiprocessing_method,
callback=process_output,
preserve_order=False,
)
else:
for batch_dir in batch_directories:
summary_data = process_batch(
batch_dir,
spotfinding_params,
indexing_params,
integration_params,
options,
)
process_output(summary_data)
def check_for_gaps_in_steps(steps: List[str]) -> bool:
if "find_spots" not in steps:
if "index" in steps or "integrate" in steps:
return True
else:
if "index" not in steps:
if "integrate" in steps:
return True
return False
def run_data_integration(
root_working_directory: pathlib.Path,
file_input: FileInput,
options: AlgorithmParams,
spotfinding_params: SpotfindingParams,
indexing_params: IndexingParams,
refinement_params: RefinementParams,
integration_params: IntegrationParams,
) -> List[pathlib.Path]:
"""
The main data integration processing function.
Import the data, followed by option crystal assessment (if the unit cell and
space group were not given) and geometry refinement (if a reference geometry
was not given). Then prepare and run data integration in batches with the
given/determined reference geometry.
"""
# First do a bit of input validation
has_gaps = check_for_gaps_in_steps(options.steps)
if not file_input.reference_geometry and has_gaps:
raise ValueError(
"Some processing steps are missing, and no reference geometry specified. Please adjust input."
)
# Note, it is allowed in general to not have to have index or find_spots, as
# one may be rerunning in a stepwise manner.
# Start by importing the data
import_wd = root_working_directory / "import"
same_as_previous, previous = check_previous_import(import_wd, file_input)
if previous and not same_as_previous:
xia2_logger.info(
"Previous import options:\n"
+ "\n".join(f" {k} : {v}" for k, v in previous.items())
)
if same_as_previous:
xia2_logger.info("Import options identical to previous run")
if not same_as_previous and has_gaps:
raise ValueError(
"Some processing steps, specified by workflow.steps, are missing and a new import was required "
"due to first run or rerun with different options. Please adjust input."
)
import_was_run = False
if not same_as_previous:
# Run the first import, or reimport if options different
run_import(import_wd, file_input)
import_was_run = True
imported_expts = import_wd / "imported.expt"
if not imported_expts.is_file():
raise ValueError(
"Unable to successfully import images, please check input filepaths"
)
# If space group and unit cell not both given, then assess the crystals
if not (indexing_params.space_group and indexing_params.unit_cell):
assess_wd = root_working_directory / "assess_crystals"
if options.assess_images_to_use:
assess_crystal_parameters_from_images(
assess_wd,
imported_expts,
options.assess_images_to_use,
spotfinding_params,
indexing_params,
)
else:
cumulative_assess_crystal_parameters(
assess_wd, imported_expts, options, spotfinding_params, indexing_params
)
xia2_logger.info(
"Rerun with a space group and unit cell to continue processing"
)
return []
# Do joint geometry refinement if a reference geometry was not specified.
if not file_input.reference_geometry:
geom_ref_wd = root_working_directory / "geometry_refinement"
if options.refinement_images_to_use:
determine_reference_geometry_from_images(
geom_ref_wd,
imported_expts,
options.refinement_images_to_use,
spotfinding_params,
indexing_params,
refinement_params,
)
else:
cumulative_determine_reference_geometry(
geom_ref_wd,
imported_expts,
options,
spotfinding_params,
indexing_params,
refinement_params,
)
# Reimport with this reference geometry to prepare for the main processing
file_input.reference_geometry = geom_ref_wd / "refined.expt"
run_import(import_wd, file_input)
import_was_run = True
if not options.steps:
return []
# Now do the main processing using reference geometry
if import_was_run and has_gaps:
raise ValueError(
"New data was imported, but there are gaps in the processing steps. Please adjust input."
)
try:
batch_directories, setup_data = inspect_existing_batch_directories(
root_working_directory
)
except ValueError: # if existing batches weren't found
batch_directories, setup_data = setup_main_process(
root_working_directory,
imported_expts,
options.batch_size,
)
if not batch_directories:
raise ValueError("Unable to determine directories for processing.")
process_batches(
batch_directories,
spotfinding_params,
indexing_params,
integration_params,
setup_data,
options,
)
return batch_directories
| bsd-3-clause | 7f970b39e6d251c41fdb28e753ad9651 | 35.9975 | 127 | 0.631799 | 3.816142 | false | false | false | false |
xia2/xia2 | src/xia2/Wrappers/Dials/Reindex.py | 1 | 3848 | from __future__ import annotations
import logging
import os
from xia2.Driver.DriverFactory import DriverFactory
logger = logging.getLogger("xia2.Wrappers.Dials.Reindex")
def Reindex(DriverType=None):
"""A factory for ReindexWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class ReindexWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.reindex")
self._experiments_filename = None
self._indexed_filename = None
self._reference_filename = None
self._reference_reflections = None
self._space_group = None
self._cb_op = None
self._hkl_offset = None
self._reindexed_experiments_filename = None
self._reindexed_reflections_filename = None
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def set_indexed_filename(self, indexed_filename):
self._indexed_filename = indexed_filename
def set_reference_filename(self, reference_filename):
self._reference_filename = reference_filename
def set_reference_reflections(self, reference_reflections):
self._reference_reflections = reference_reflections
def set_space_group(self, space_group):
self._space_group = space_group
def set_cb_op(self, cb_op):
self._cb_op = cb_op
def set_hkl_offset(self, hkl_offset):
assert len(hkl_offset) == 3
self._hkl_offset = hkl_offset
def get_reindexed_experiments_filename(self):
return self._reindexed_experiments_filename
def get_reindexed_reflections_filename(self):
return self._reindexed_reflections_filename
def run(self):
logger.debug("Running dials.reindex")
wd = self.get_working_directory()
self.clear_command_line()
if self._experiments_filename is not None:
self.add_command_line(self._experiments_filename)
if not self._reindexed_experiments_filename:
self._reindexed_experiments_filename = os.path.join(
wd, "%d_reindexed.expt" % self.get_xpid()
)
self.add_command_line(
"output.experiments=%s" % self._reindexed_experiments_filename
)
if self._indexed_filename is not None:
self.add_command_line(self._indexed_filename)
if not self._reindexed_reflections_filename:
self._reindexed_reflections_filename = os.path.join(
wd, "%d_reindexed.refl" % self.get_xpid()
)
self.add_command_line(
"output.reflections=%s" % self._reindexed_reflections_filename
)
if self._reference_filename is not None:
self.add_command_line(
"reference.experiments=%s" % self._reference_filename
)
if self._reference_reflections is not None:
self.add_command_line(
"reference.reflections=%s" % self._reference_reflections
)
if self._cb_op:
self.add_command_line("change_of_basis_op=%s" % self._cb_op)
if self._space_group:
self.add_command_line("space_group=%s" % self._space_group)
if self._hkl_offset is not None:
self.add_command_line("hkl_offset=%i,%i,%i" % self._hkl_offset)
self.start()
self.close_wait()
self.check_for_errors()
return ReindexWrapper()
| bsd-3-clause | bbc1ef330cb9abbc3b673cb9d7292832 | 36.72549 | 82 | 0.575364 | 4.237885 | false | false | false | false |
xia2/xia2 | src/xia2/lib/SymmetryLib.py | 1 | 4740 | # A library of things to help with simple symmetry operation stuff.
#
# FIXED 17/NOV/06 add a method in here to give a list of likely, and then
# less likely, spacegroups based on an input spacegroup.
# For instance, if the input spacegroup is P 41 21 2 then
# another likely spacegroup is P 43 21 2 and less likely
# spacegroups are all those in the same pointgroup with
# different screw axes - e.g. P 41 2 2 (thinking of an Ed
# Mitchell example.) This should also allow in the likely
# case for body centred spacegroups where the screw axes
# are hidden, for example I 2 2 2/I 21 21 21 and I 2 3/I 21 3.
# This is now handled by Pointless in the "likely spacegroups"
# section.
#
# FIXME 06/DEC/06 need a mapping table from "old" spacegroup names to e.g. xHM
# for use with phenix.hyss.
from __future__ import annotations
import os
def lattice_to_spacegroup(lattice):
"""Convert a lattice e.g. tP into the minimal spacegroup number
to represent this."""
_lattice_to_spacegroup = {
"aP": 1,
"mP": 3,
"mC": 5,
"mI": 5,
"oP": 16,
"oC": 20,
"oF": 22,
"oI": 23,
"tP": 75,
"tI": 79,
"hP": 143,
"hR": 146,
"cP": 195,
"cF": 196,
"cI": 197,
}
if lattice not in _lattice_to_spacegroup:
raise RuntimeError('lattice "%s" unknown' % lattice)
return _lattice_to_spacegroup[lattice]
def spacegroup_name_xHM_to_old(xHM):
"""Convert to an old name."""
# generate mapping table
mapping = {}
current_old = ""
current_xHM = ""
old_names = set()
syminfo = os.path.join(os.environ["CCP4"], "lib", "data", "syminfo.lib")
with open(syminfo) as fh:
for line in fh.readlines():
if line[0] == "#":
continue
if "symbol old" in line:
current_old = line.split("'")[1]
if "symbol xHM" in line:
current_xHM = line.split("'")[1]
if "end_spacegroup" in line:
mapping[current_xHM] = current_old
old_names.add(current_old)
xHM = xHM.upper()
if xHM not in mapping:
if xHM in old_names:
return xHM
raise RuntimeError("spacegroup %s unknown" % xHM)
return mapping[xHM]
def clean_reindex_operator(symop):
return str(symop).replace("[", "").replace("]", "")
def lattices_in_order():
"""Return a list of possible crystal lattices (e.g. tP) in order of
increasing symmetry..."""
# eliminated this entry ... 'oA': 38,
lattices = [
"aP",
"mP",
"mC",
"oP",
"oC",
"oF",
"oI",
"tP",
"tI",
"hP",
"hR",
"cP",
"cF",
"cI",
]
# FIXME this should = lattice!
spacegroup_to_lattice = {
lattice_to_spacegroup(lattice): lattice for lattice in lattices
}
# lattice_to_spacegroup(lattice)
spacegroups = sorted(spacegroup_to_lattice)
return [spacegroup_to_lattice[s] for s in spacegroups]
def sort_lattices(lattices):
ordered_lattices = []
for l in lattices_in_order():
if l in lattices:
ordered_lattices.append(l)
return ordered_lattices
def lauegroup_to_lattice(lauegroup):
"""Convert a Laue group representation (from pointless, e.g. I m m m)
to something useful, like the implied crystal lattice (in this
case, oI.)"""
# this has been calculated from the results of Ralf GK's sginfo and a
# little fiddling...
#
# 19/feb/08 added mI record as pointless has started producing this -
# why??? this is not a "real" spacegroup... may be able to switch this
# off...
# 'I2/m': 'mI',
lauegroup_to_lattice = {
"Ammm": "oA",
"C2/m": "mC",
"Cmmm": "oC",
"Fm-3": "cF",
"Fm-3m": "cF",
"Fmmm": "oF",
"H-3": "hR",
"H-3m": "hR",
"R-3:H": "hR",
"R-3m:H": "hR",
"I4/m": "tI",
"I4/mmm": "tI",
"Im-3": "cI",
"Im-3m": "cI",
"Immm": "oI",
"P-1": "aP",
"P-3": "hP",
"P-3m": "hP",
"P2/m": "mP",
"P4/m": "tP",
"P4/mmm": "tP",
"P6/m": "hP",
"P6/mmm": "hP",
"Pm-3": "cP",
"Pm-3m": "cP",
"Pmmm": "oP",
}
updated_laue = ""
for l in lauegroup.split():
if not l == "1":
updated_laue += l
return lauegroup_to_lattice[updated_laue]
| bsd-3-clause | 8af08357cadd451c00b547d3ef8ffe80 | 24.621622 | 78 | 0.512447 | 3.246575 | false | false | false | false |
xia2/xia2 | src/xia2/Wrappers/Dials/Merge.py | 1 | 3558 | from __future__ import annotations
import logging
from xia2.Driver.DriverFactory import DriverFactory
logger = logging.getLogger("xia2.Wrappers.Dials.Merge")
def DialsMerge(DriverType=None):
"""A factory for DialsMergeWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class DialsMergeWrapper(DriverInstance.__class__):
"""A wrapper for dials.merge"""
def __init__(self):
# generic things
super().__init__()
self.set_executable("dials.merge")
# clear all the header junk
self.reset()
self._experiments_filename = None
self._reflections_filename = None
self._mtz_filename = None
self._truncate = False
self._html_report = None
self._project_name = None
self._crystal_names = None
self._dataset_names = None
self._partiality_threshold = None
def set_partiality_threshold(self, v):
self._partiality_threshold = v
def set_project_name(self, name):
self._project_name = name
def set_crystal_names(self, names):
self._crystal_names = names
def set_dataset_names(self, names):
self._dataset_names = names
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def get_experiments_filename(self):
return self._experiments_filename
def set_reflections_filename(self, reflections_filename):
self._reflections_filename = reflections_filename
def get_reflections_filename(self):
return self._reflections_filename
def set_mtz_filename(self, filename):
self._mtz_filename = filename
def get_mtz_filename(self):
return self._mtz_filename
def set_html_report(self, filename):
self._html_report = filename
def run(self):
"""Run dials.merge"""
self.clear_command_line()
assert self._experiments_filename
assert self._reflections_filename
self.add_command_line(self._reflections_filename)
self.add_command_line(self._experiments_filename)
self.add_command_line("truncate=%s" % self._truncate)
if self._mtz_filename:
self.add_command_line("output.mtz=%s" % self._mtz_filename)
if self._project_name:
self.add_command_line("output.project_name=%s" % self._project_name)
if self._crystal_names:
self.add_command_line("output.crystal_names=%s" % self._crystal_names)
if self._dataset_names:
self.add_command_line("output.dataset_names=%s" % self._dataset_names)
if self._partiality_threshold:
self.add_command_line(
"partiality_threshold=%s" % self._partiality_threshold
)
self.add_command_line("output.html=%s" % self._html_report)
self.start()
self.close_wait()
# check for errors
try:
self.check_for_errors()
except Exception:
logger.warning(
"dials.merge failed, see log file for more details:\n %s",
self.get_log_file(),
)
raise
logger.debug("dials.merge status: OK")
return DialsMergeWrapper()
| bsd-3-clause | 342dd69ea78e0238056ef20a7bd0bbdb | 30.767857 | 86 | 0.571669 | 4.286747 | false | false | false | false |
xia2/xia2 | src/xia2/Modules/Indexer/IndexerSelectImages.py | 1 | 2000 | # Code for the selection of images for autoindexing - selecting lone images
# from a list or wedges from a list, for XDS.
from __future__ import annotations
import logging
logger = logging.getLogger("xia2.Modules.Indexer.IndexerSelectImages")
def index_select_images_lone(phi_width, images):
"""Select images close to 0, 45 and 90 degrees from the list of available
frames. N.B. we assume all frames have the same oscillation width."""
selected_images = [images[0]]
offset = images[0] - 1
if offset + int(90.0 / phi_width) in images:
selected_images.append(offset + int(45.0 / phi_width))
selected_images.append(offset + int(90.0 / phi_width))
else:
middle = len(images) // 2 - 1
if len(images) >= 3:
selected_images.append(images[middle])
selected_images.append(images[-1])
return selected_images
def index_select_image_wedges_user(sweep_id, phi_width, images):
images = [(min(images), max(images))]
images_list = ", ".join("%d-%d" % i for i in images)
logger.info("Existing images for indexing %s: %s", sweep_id, images_list)
while True:
record = input(">")
if not record.strip():
return images
try:
images = [
tuple(int(t.strip()) for t in r.split("-")) for r in record.split(",")
]
images_list = ", ".join("%d-%d" % i for i in images)
logger.info("New images for indexing: %s", images_list)
return images
except ValueError:
pass
if __name__ == "__main__":
images = list(range(1, 91))
assert index_select_images_lone(0.5, images) == [1, 45, 90]
assert index_select_images_lone(1.0, images) == [1, 45, 90]
assert index_select_images_lone(2.0, images) == [1, 22, 45]
images = list(range(1, 361))
assert index_select_images_lone(0.5, images) == [1, 90, 180]
assert index_select_images_lone(1.0, images) == [1, 45, 90]
| bsd-3-clause | fd859ce6fd5b0d297bfd96f4255af83d | 27.571429 | 86 | 0.601 | 3.454231 | false | false | false | false |
xia2/xia2 | src/xia2/Wrappers/Dials/ExportXDSASCII.py | 1 | 1741 | from __future__ import annotations
import logging
from xia2.Driver.DriverFactory import DriverFactory
logger = logging.getLogger("xia2.Wrappers.Dials.ExportXDSASCII")
def ExportXDSASCII(DriverType=None):
"""A factory for ExportXDSASCIISWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class ExportXDSASCIISWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.export")
self._experiments_filename = None
self._reflections_filename = None
self._hkl_filename = "DIALS.HKL"
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def set_reflections_filename(self, reflections_filename):
self._reflections_filename = reflections_filename
def set_hkl_filename(self, hkl_filename):
self._hkl_filename = hkl_filename
def get_hkl_filename(self):
return self._hkl_filename
def run(self):
logger.debug("Running dials.export")
assert self._experiments_filename is not None
assert self._reflections_filename is not None
self.clear_command_line()
self.add_command_line(self._experiments_filename)
self.add_command_line(self._reflections_filename)
if self._hkl_filename is not None:
self.add_command_line("xds_ascii.hklout=%s" % self._hkl_filename)
self.add_command_line("format=xds_ascii")
self.start()
self.close_wait()
self.check_for_errors()
return ExportXDSASCIISWrapper()
| bsd-3-clause | 55c60d204252191c7f12c56f4c58aa35 | 32.480769 | 81 | 0.639288 | 4.011521 | false | false | false | false |
xia2/xia2 | src/xia2/cli/plot_multiplicity.py | 1 | 12688 | from __future__ import annotations
import json
import sys
import iotbx.phil
from cctbx.miller.display import render_2d, scene
from dials.util import Sorry
from iotbx.gui_tools.reflections import get_array_description
from iotbx.reflection_file_reader import any_reflection_file
from scitbx.array_family import flex
class MultiplicityViewPng(render_2d):
def __init__(self, scene, settings=None):
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
render_2d.__init__(self, scene, settings)
self._open_circle_points = flex.vec2_double()
self._open_circle_radii = []
self._open_circle_colors = []
self._filled_circle_points = flex.vec2_double()
self._filled_circle_radii = []
self._filled_circle_colors = []
self.fig, self.ax = pyplot.subplots(figsize=self.settings.size_inches)
self.render(self.ax)
pyplot.close()
def GetSize(self):
return self.fig.get_size_inches() * self.fig.dpi # size in pixels
def draw_line(self, ax, x1, y1, x2, y2):
ax.plot([x1, x2], [y1, y2], c=self._foreground)
def draw_text(self, ax, text, x, y):
ax.text(x, y, text, color=self._foreground, size=self.settings.font_size)
def draw_open_circle(self, ax, x, y, radius, color=None):
self._open_circle_points.append((x, y))
self._open_circle_radii.append(2 * radius)
if color is None:
color = self._foreground
self._open_circle_colors.append(color)
def draw_filled_circle(self, ax, x, y, radius, color):
self._filled_circle_points.append((x, y))
self._filled_circle_radii.append(2 * radius)
self._filled_circle_colors.append(color)
def render(self, ax):
from matplotlib import colors, pyplot
render_2d.render(self, ax)
if self._open_circle_points.size():
x, y = self._open_circle_points.parts()
ax.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
s=self._open_circle_radii,
marker="o",
edgecolors=self._open_circle_colors,
facecolors=None,
)
if self._filled_circle_points.size():
x, y = self._filled_circle_points.parts()
# use pyplot colormaps then we can more easily get a colorbar
data = self.scene.multiplicities.data()
cmap_d = {
"heatmap": "hot",
"redblue": colors.LinearSegmentedColormap.from_list(
"RedBlue", ["b", "r"]
),
"grayscale": "Greys_r" if self.settings.black_background else "Greys",
"mono": (
colors.LinearSegmentedColormap.from_list("mono", ["w", "w"])
if self.settings.black_background
else colors.LinearSegmentedColormap.from_list(
"mono", ["black", "black"]
)
),
}
cm = cmap_d.get(self.settings.color_scheme, self.settings.color_scheme)
if isinstance(cm, str):
cm = pyplot.cm.get_cmap(cm)
im = ax.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
s=self._filled_circle_radii,
marker="o",
c=data.select(self.scene.slice_selection).as_numpy_array(),
edgecolors="none",
vmin=0,
vmax=flex.max(data),
cmap=cm,
)
# colorbar
cb = self.fig.colorbar(im, ax=ax)
[t.set_color(self._foreground) for t in cb.ax.get_yticklabels()]
[t.set_fontsize(self.settings.font_size) for t in cb.ax.get_yticklabels()]
self.ax.set_aspect("equal")
self.ax.set_facecolor(self._background)
xmax, ymax = self.GetSize()
ax.set_xlim(0, xmax)
ax.set_ylim(0, ymax)
ax.invert_yaxis()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
self.fig.tight_layout()
self.fig.savefig(
self.settings.plot.filename, bbox_inches="tight", facecolor=self._background
)
class MultiplicityViewJson(render_2d):
def __init__(self, scene, settings=None):
render_2d.__init__(self, scene, settings)
self._open_circle_points = flex.vec2_double()
self._open_circle_radii = []
self._open_circle_colors = []
self._filled_circle_points = flex.vec2_double()
self._filled_circle_radii = []
self._filled_circle_colors = []
self._text = {"x": [], "y": [], "text": []}
self._lines = []
json_d = self.render(None)
if self.settings.json.compact:
indent = None
else:
indent = 2
with open(self.settings.json.filename, "w") as fh:
json.dump(json_d, fh, indent=indent)
def GetSize(self):
return 1600, 1600 # size in pixels
def draw_line(self, ax, x1, y1, x2, y2):
self._lines.append((x1, y1, x2, y2))
def draw_text(self, ax, text, x, y):
self._text["x"].append(x)
self._text["y"].append(y)
self._text["text"].append(text)
def draw_open_circle(self, ax, x, y, radius, color=None):
self._open_circle_points.append((x, y))
self._open_circle_radii.append(2 * radius)
if color is None:
color = self._foreground
self._open_circle_colors.append(color)
def draw_filled_circle(self, ax, x, y, radius, color):
self._filled_circle_points.append((x, y))
self._filled_circle_radii.append(2 * radius)
self._filled_circle_colors.append(color)
def render(self, ax):
render_2d.render(self, ax)
data = []
if self._open_circle_points.size():
x, y = self._open_circle_points.parts()
z = self._open_circle_colors
data.append(
{
"x": list(x.round(1)),
"y": list(y.round(1)),
#'z': list(z),
"type": "scatter",
"mode": "markers",
"name": "missing reflections",
"showlegend": False,
"marker": {
#'color': list(z),
"color": (
"white" if self.settings.black_background else "black"
),
"line": {
#'color': 'black',
"width": 0
},
"symbol": "circle",
"size": 5,
},
}
)
if self._filled_circle_points.size():
x, y = self._filled_circle_points.parts()
z = self.scene.multiplicities.data().select(self.scene.slice_selection)
# why doesn't this work?
# colorscale = []
# assert len(z) == len(self._filled_circle_colors)
# for zi in range(flex.max(z)+1):
# i = flex.first_index(z, zi)
# if i is None: continue
# print i, self._filled_circle_colors[i], 'rgb(%i,%i,%i)' %tuple(rgb * 264 for rgb in self._filled_circle_colors[i])
# colorscale.append([zi, 'rgb(%i,%i,%i)' %self._filled_circle_colors[i]])
cmap_d = {
"rainbow": "Jet",
"heatmap": "Hot",
"redblue": "RdbU",
"grayscale": "Greys",
"mono": None,
}
color = list(z)
colorscale = cmap_d.get(
self.settings.color_scheme, self.settings.color_scheme
)
if self.settings.color_scheme == "mono":
color = "black"
colorscale = None
data.append(
{
"x": list(x.round(1)),
"y": list(y.round(1)),
#'z': list(z),
"type": "scatter",
"mode": "markers",
"name": "multiplicity",
"showlegend": False,
"marker": {
"color": color,
"colorscale": colorscale,
"cmin": 0,
"cmax": flex.max(self.scene.multiplicities.data()),
"showscale": True,
"colorbar": {"title": "Multiplicity", "titleside": "right"},
"line": {
#'color': 'white',
"width": 0
},
"symbol": "circle",
"size": 5,
},
}
)
text = {"mode": "text", "showlegend": False, "textposition": "top right"}
text.update(self._text)
data.append(text)
shapes = []
for x0, y0, x1, y1 in self._lines:
# color = 'rgb(%i,%i,%i)' %tuple(rgb * 264 for rgb in self._foreground)
color = "black"
shapes.append(
{
"type": "line",
"x0": x0,
"y0": y0,
"x1": x1,
"y1": y1,
"layer": "below",
"line": {"color": color, "width": 2},
}
)
d = {
"data": data,
"layout": {
"plot_bgcolor": "rgb(%i,%i,%i)"
% tuple(rgb * 264 for rgb in self._background),
"title": "Multiplicity plot (%s=%s)"
% (self.settings.slice_axis, self.settings.slice_index),
"shapes": shapes,
"hovermode": False,
"xaxis": {
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
"yaxis": {
"autorange": "reversed",
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
},
}
return d
master_phil = iotbx.phil.parse(
"""
include scope cctbx.miller.display.master_phil
unit_cell = None
.type = unit_cell
space_group = None
.type = space_group
plot {
filename = multiplicities.png
.type = path
}
json {
filename = None
.type = path
compact = True
.type = bool
}
size_inches = 20,20
.type = floats(size=2, value_min=0)
font_size = 20
.type = int(value_min=1)
""",
process_includes=True,
)
def run(args=sys.argv[1:]):
pcl = iotbx.phil.process_command_line_with_files(
args=args,
master_phil=master_phil,
reflection_file_def="data",
pdb_file_def="symmetry_file",
usage_string="xia2.plot_multiplicity scaled_unmerged.mtz [options]",
)
settings = pcl.work.extract()
file_name = settings.data
try:
hkl_file = any_reflection_file(file_name)
except Exception as e:
raise Sorry(str(e))
arrays = hkl_file.as_miller_arrays(merge_equivalents=False)
valid_arrays = []
array_info = []
for array in arrays:
if array.is_hendrickson_lattman_array():
continue
if (not array.is_real_array()) and (not array.is_complex_array()):
continue
labels = array.info().label_string()
desc = get_array_description(array)
array_info.append(f"{labels} ({desc})")
valid_arrays.append(array)
if len(valid_arrays) == 0:
msg = "No arrays of the supported types in this file."
raise Sorry(msg)
miller_array = valid_arrays[0]
plot_multiplicity(miller_array, settings)
def plot_multiplicity(miller_array, settings):
settings.scale_colors_multiplicity = True
settings.scale_radii_multiplicity = True
settings.expand_to_p1 = True
settings.expand_anomalous = True
settings.slice_mode = True
if settings.plot.filename is not None:
MultiplicityViewPng(
scene(miller_array, settings, merge=True), settings=settings
)
if settings.json.filename is not None:
MultiplicityViewJson(
scene(miller_array, settings, merge=True), settings=settings
)
| bsd-3-clause | 2f686234311a281abbbbe4e44e696db5 | 33.016086 | 129 | 0.493064 | 3.844848 | false | false | false | false |
xia2/xia2 | src/xia2/Wrappers/Dials/ImportXDS.py | 1 | 3161 | from __future__ import annotations
import os
from xia2.Driver.DriverFactory import DriverFactory
def ImportXDS(DriverType=None):
"""A factory for ImportXDSWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class ImportXDSWrapper(DriverInstance.__class__):
def __init__(self):
super().__init__()
self.set_executable("dials.import_xds")
self._spot_xds = None
self._integrate_hkl = None
self._xparm_xds = None
self._experiments_json = None
self._reflection_filename = None
def set_spot_xds(self, spot_xds):
self._spot_xds = spot_xds
def set_integrate_hkl(self, integrate_hkl):
self._integrate_hkl = integrate_hkl
def set_xparm_xds(self, xparm_xds):
self._xparm_xds = xparm_xds
def set_experiments_json(self, experiments_json):
self._experiments_json = experiments_json
def get_reflection_filename(self):
return self._reflection_filename
def get_experiments_json(self):
return self._experiments_json
def run(self):
self.clear_command_line()
if self._spot_xds is not None:
self._reflection_filename = os.path.join(
self.get_working_directory(), "%s_spot_xds.refl" % self.get_xpid()
)
self.add_command_line(self._spot_xds)
self.add_command_line("output.filename=%s" % self._reflection_filename)
self.add_command_line("method=reflections")
elif self._integrate_hkl is not None:
self._reflection_filename = os.path.join(
self.get_working_directory(),
"%s_integrate_hkl.refl" % self.get_xpid(),
)
assert self._experiments_json is not None
self.add_command_line(self._integrate_hkl)
self.add_command_line(self._experiments_json)
self.add_command_line("output.filename=%s" % self._reflection_filename)
self.add_command_line("method=reflections")
elif self._xparm_xds is not None:
if self._experiments_json is None:
self._experiments_json = os.path.join(
self.get_working_directory(),
"%s_xparm_xds.expt" % self.get_xpid(),
)
directory, xparm = os.path.split(self._xparm_xds)
self.add_command_line(directory)
self.add_command_line("xds_file=%s" % xparm)
self.add_command_line("output.filename=%s" % self._experiments_json)
self.start()
self.close_wait()
self.check_for_errors()
if self._reflection_filename is not None:
assert os.path.exists(
self._reflection_filename
), self._reflection_filename
else:
assert os.path.exists(self._experiments_json), self._experiments_json
return ImportXDSWrapper()
| bsd-3-clause | 2116e86030ddb4d610ac86cf31ae4530 | 34.920455 | 87 | 0.554571 | 4.047375 | false | false | false | false |
xia2/xia2 | src/xia2/Wrappers/Phenix/LatticeSymmetry.py | 1 | 6087 | from __future__ import annotations
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Syminfo import spacegroup_number_to_name
from xia2.lib.SymmetryLib import lauegroup_to_lattice
def LatticeSymmetry(DriverType=None):
"""A factory for the LatticeSymmetry wrappers."""
DriverInstance = DriverFactory.Driver("simple")
class LatticeSymmetryWrapper(DriverInstance.__class__):
"""A wrapper class for iotbx.lattice_symmetry."""
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("iotbx.lattice_symmetry")
if "phaser-1.3" in self.get_executable():
raise RuntimeError("unsupported version of lattice_symmetry")
self._cell = None
self._spacegroup = None
# following on from the othercell wrapper...
self._lattices = []
self._distortions = {}
self._cells = {}
self._reindex_ops = {}
self._reindex_ops_basis = {}
def set_cell(self, cell):
self._cell = cell
def set_spacegroup(self, spacegroup):
self._spacegroup = spacegroup
def set_lattice(self, lattice):
lattice_to_spacegroup = {
"aP": 1,
"mP": 3,
"mC": 5,
"oP": 16,
"oC": 20,
"oF": 22,
"oI": 23,
"tP": 75,
"tI": 79,
"hP": 143,
"hR": 146,
"cP": 195,
"cF": 196,
"cI": 197,
}
self._spacegroup = spacegroup_number_to_name(lattice_to_spacegroup[lattice])
# bug 22/JUL/08 latest lattice symmetry no longer recognises
# the spacegroup H3...
if self._spacegroup == "H3":
self._spacegroup = "R3:H"
def generate(self):
if not self._cell:
raise RuntimeError("no unit cell specified")
if not self._spacegroup:
raise RuntimeError("no spacegroup specified")
self.add_command_line("--unit_cell=%f,%f,%f,%f,%f,%f" % tuple(self._cell))
self.add_command_line("--space_group=%s" % self._spacegroup)
self.start()
self.close_wait()
# now wade through all of the options and see which comes
# out best for each lattice class... - as defined by the
# minimum value of Maximal angular difference
state = {}
for o in self.get_all_output():
if ":" in o:
count = o.find(":")
left = o[:count]
right = o[count + 1 :]
state[left.strip()] = right.strip()
if "Maximal angular difference" in o:
# transform & digest results
distortion = float(state["Maximal angular difference"].split()[0])
# this appears to be getting the wrong cell - I want the
# one which corresponds to the correct lattice, yes?!
# cell = map(float, state[
# 'Symmetry-adapted cell'].replace(
# '(', ' ').replace(')', ' ').replace(',', ' ').split())
cell = list(
map(
float,
state["Unit cell"]
.replace("(", " ")
.replace(")", " ")
.replace(",", " ")
.split(),
)
)
lauegroup = ""
# FIXME for more recent versions of cctbx the conventional
# setting I 1 2/m 1 has appeared -> look at the
# 'Symmetry in minimum-lengths cell' instead (equivalent
# to changing lkey here to 'Conventional setting'
#
# No, can't do this because this now reports the Hall
# symmetry not the Laue group. Will have to cope with
# the I setting instead :o(
lkey = "Symmetry in minimum-lengths cell"
for token in state[lkey].split("(")[0].split():
if token == "1":
continue
lauegroup += token
# FIXME bug 3157 - there appears to be a bug in
# recent versions of cctbx (cf. above) which means
# a lauegroup of 'R-3m:R' is given -> correct this
# in the string. Also :h as well :o(
lauegroup = lauegroup.replace(":R", ":H")
lauegroup = lauegroup.replace(":h", ":H")
lattice = lauegroup_to_lattice(lauegroup)
reindex_basis = state["Change of basis"]
reindex = state["Inverse"]
if lattice not in self._lattices:
self._lattices.append(lattice)
self._distortions[lattice] = distortion
self._cells[lattice] = cell
self._reindex_ops[lattice] = reindex
self._reindex_ops_basis[lattice] = reindex_basis
elif distortion < self._distortions[lattice]:
self._distortions[lattice] = distortion
self._cells[lattice] = cell
self._reindex_ops[lattice] = reindex
self._reindex_ops_basis[lattice] = reindex_basis
state = {}
def get_lattices(self):
return self._lattices
def get_cell(self, lattice):
return self._cells[lattice]
def get_reindex_op(self, lattice):
return self._reindex_ops[lattice]
return LatticeSymmetryWrapper()
| bsd-3-clause | dc610e3bc221202e1b6dd1f9101d86cc | 35.232143 | 88 | 0.469361 | 4.696759 | false | false | false | false |
xia2/xia2 | src/xia2/Modules/SSX/xia2_ssx_reduce.py | 1 | 6275 | from __future__ import annotations
import logging
from pathlib import Path
import iotbx.phil
from libtbx import Auto
from libtbx.introspection import number_of_processors
from xia2.Modules.SSX.data_reduction_definitions import ReductionParams
from xia2.Modules.SSX.data_reduction_interface import get_reducer
from xia2.Modules.SSX.util import report_timing
phil_str = """
input {
directory = None
.type = str
.multiple = True
.help = "Path to directory containing integrated_*.{refl,expt} files"
.expert_level = 1
reflections = None
.type = str
.multiple = True
.help = "Path to an integrated reflections file"
.expert_level = 1
experiments = None
.type = str
.multiple = True
.help = "Path to an integrated experiments file"
.expert_level = 1
processed_directory = None
.type = str
.multiple = True
.help = "Path to previously reduced data"
.expert_level = 1
}
multiprocessing.nproc = Auto
.type = int
.expert_level = 1
batch_size = None
.type = int
.help = "An alias for reduction_batch_size"
.expert_level = 1
d_min = None
.type = float
.expert_level = 1
"""
data_reduction_phil_str = """
reduction_batch_size=1000
.type = int
.help = "The minimum batch size for consistent reindexing of data with cosym."
.expert_level=2
reference = None
.type = path
.help = "A reference to use for scaling + indexing ambiguity resolution."
"Can be a model pdb/cif file or a cif/mtz data file contaning"
"intensity information."
.expert_level = 1
clustering {
threshold=None
.type = float(value_min=0, allow_none=True)
.help = "If no data has previously been reduced, then unit cell clustering"
"is performed. This threshold is the value at which the dendrogram"
"will be split in dials.cluster_unit_cell (the default value there"
"is 5000). A higher threshold value means that unit cells with greater"
"differences will be retained."
"Only the largest cluster obtained from cutting at this threshold is"
"used for data reduction. Setting the threshold to None/0 will"
"skip this unit cell clustering and proceed to filtering based on"
"the absolute angle/length tolerances."
.expert_level = 2
absolute_angle_tolerance = 1.0
.type = float(value_min=0, allow_none=True)
.help = "Filter the integrated data based on the median unit cell angles"
"and this tolerance. If set to None/0, filtering will be skipped."
.expert_level = 2
absolute_length_tolerance = 1.0
.type = float(value_min=0, allow_none=True)
.help = "Filters the integrated data based on the median unit cell lengths"
"and this tolerance. If set to None/0, filtering will be skipped."
.expert_level = 2
central_unit_cell = None
.type = unit_cell
.help = "Filter the integrated data based on the tolerances about these cell"
"parameters, rather than the median cell."
.expert_level = 2
}
symmetry {
space_group = None
.type = space_group
.expert_level = 2
lattice_symmetry_max_delta = 2
.type = float
.help = "Tolerance for lattice symmetry analysis, used for example when"
"determining possible symmetries for ambiguity resolution."
.expert_level=3
phil = None
.type = path
.help = "Phil options file to use for symmetry analysis with dials.cosym. "
"Parameters defined in the xia2.ssx phil scope will take precedent"
"over identical options defined in the phil file."
.expert_level = 3
}
scaling {
anomalous = False
.type = bool
.help = "If True, keep anomalous pairs separate during scaling."
.expert_level = 1
model = None
.type = path
.help = "An alias for the reference= option"
.expert_level = 2
phil = None
.type = path
.help = "Phil options file to use for dials.scale. "
"Parameters defined in the xia2.ssx phil scope will take precedent"
"over identical options defined in the phil file."
.expert_level = 3
}
"""
full_phil_str = phil_str + data_reduction_phil_str
xia2_logger = logging.getLogger(__name__)
@report_timing
def run_xia2_ssx_reduce(
root_working_directory: Path, params: iotbx.phil.scope_extract
) -> None:
if params.multiprocessing.nproc is Auto:
params.multiprocessing.nproc = number_of_processors(return_value_if_unknown=1)
if params.batch_size: # This is an alias for reduction_batch_size
params.reduction_batch_size = params.batch_size
reduction_params = ReductionParams.from_phil(params)
reducer_class = get_reducer(reduction_params)
processed_directories = []
if params.input.processed_directory:
for d in params.input.processed_directory:
processed_directories.append(Path(d).resolve())
if params.input.directory:
if params.input.reflections or params.input.experiments:
xia2_logger.warning(
"Only a directory or reflections+experiments can be given\n"
"as input. Proceeding using only directories"
)
directories = [Path(i).resolve() for i in params.input.directory]
reducer = reducer_class.from_directories(
root_working_directory,
directories,
processed_directories,
reduction_params,
)
elif params.input.reflections or params.input.experiments:
if not (params.input.reflections and params.input.experiments):
raise ValueError("Reflections and experiments files must both be specified")
reflections = [Path(i).resolve() for i in params.input.reflections]
experiments = [Path(i).resolve() for i in params.input.experiments]
reducer = reducer_class.from_files(
root_working_directory,
reflections,
experiments,
processed_directories,
reduction_params,
)
elif processed_directories:
reducer = reducer_class.from_processed_only(
root_working_directory, processed_directories, reduction_params
)
else:
raise ValueError(reducer_class._no_input_error_msg)
reducer.run()
| bsd-3-clause | 647a40ae205b84cdf909306da9409bd7 | 34.252809 | 88 | 0.66502 | 3.78012 | false | false | false | false |
xia2/xia2 | src/xia2/Modules/SSX/data_integration_programs.py | 1 | 24606 | from __future__ import annotations
import copy
import json
import logging
import os
from dataclasses import dataclass
from functools import reduce
from pathlib import Path
from typing import List, Optional, Tuple
import iotbx.phil
from cctbx import crystal, sgtbx, uctbx
from dials.algorithms.clustering.unit_cell import Cluster
from dials.algorithms.indexing.ssx.analysis import (
generate_html_report,
generate_plots,
make_summary_table,
report_on_crystal_clusters,
)
from dials.algorithms.integration.ssx.ssx_integrate import (
generate_html_report as generate_integration_html_report,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalUnitCellParameterisation,
)
from dials.algorithms.shoebox import MaskCode
from dials.array_family import flex
from dials.command_line.combine_experiments import CombineWithReference
from dials.command_line.find_spots import working_phil as find_spots_phil
from dials.command_line.refine import run_dials_refine
from dials.command_line.refine import working_phil as refine_phil
from dials.command_line.ssx_index import index
from dials.command_line.ssx_index import phil_scope as indexing_phil
from dials.command_line.ssx_integrate import run_integration
from dials.command_line.ssx_integrate import working_phil as integration_phil
from dials.util.ascii_art import spot_counts_per_image_plot
from dxtbx.model import ExperimentList
from dxtbx.serialize import load
from xia2.Driver.timing import record_step
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Streams import banner
from xia2.Modules.SSX.reporting import (
generate_refinement_step_table,
indexing_summary_output,
)
from xia2.Modules.SSX.util import log_to_file, run_in_directory
xia2_logger = logging.getLogger(__name__)
@dataclass
class SpotfindingParams:
min_spot_size: int = 2
max_spot_size: int = 10
d_min: Optional[float] = None
nproc: int = 1
phil: Optional[Path] = None
@classmethod
def from_phil(cls, params):
spotfinding_phil = None
if params.spotfinding.phil:
spotfinding_phil = Path(params.spotfinding.phil).resolve()
if not spotfinding_phil.is_file():
raise FileNotFoundError(os.fspath(spotfinding_phil))
return cls(
params.spotfinding.min_spot_size,
params.spotfinding.max_spot_size,
params.d_min,
params.multiprocessing.nproc,
spotfinding_phil,
)
@dataclass
class IndexingParams:
space_group: Optional[sgtbx.space_group] = None
unit_cell: Optional[uctbx.unit_cell] = None
max_lattices: int = 1
nproc: int = 1
phil: Optional[Path] = None
output_nuggets_dir: Optional[Path] = None
@classmethod
def from_phil(cls, params):
indexing_phil = None
if params.indexing.phil:
indexing_phil = Path(params.indexing.phil).resolve()
if not indexing_phil.is_file():
raise FileNotFoundError(os.fspath(indexing_phil))
if params.indexing.unit_cell and params.space_group:
try:
_ = crystal.symmetry(
unit_cell=params.indexing.unit_cell,
space_group_info=params.space_group,
assert_is_compatible_unit_cell=True,
)
except AssertionError as e:
raise ValueError(e)
return cls(
params.space_group,
params.indexing.unit_cell,
params.indexing.max_lattices,
params.multiprocessing.nproc,
indexing_phil,
)
@dataclass
class RefinementParams:
phil: Optional[Path] = None
@classmethod
def from_phil(cls, params):
refinement_phil = None
if params.geometry_refinement.phil:
refinement_phil = Path(params.geometry_refinement.phil).resolve()
if not refinement_phil.is_file():
raise FileNotFoundError(os.fspath(refinement_phil))
return cls(refinement_phil)
@dataclass
class IntegrationParams:
algorithm: str = "ellipsoid"
rlp_mosaicity: str = "angular4"
d_min: Optional[float] = None
nproc: int = 1
phil: Optional[Path] = None
output_nuggets_dir: Optional[Path] = None
@classmethod
def from_phil(cls, params):
integration_phil = None
if params.integration.phil:
integration_phil = Path(params.integration.phil).resolve()
if not integration_phil.is_file():
raise FileNotFoundError(os.fspath(integration_phil))
return cls(
params.integration.algorithm,
params.integration.ellipsoid.rlp_mosaicity,
params.d_min,
params.multiprocessing.nproc,
integration_phil,
)
def ssx_find_spots(
working_directory: Path,
spotfinding_params: SpotfindingParams,
) -> flex.reflection_table:
if not (working_directory / "imported.expt").is_file():
raise ValueError(f"Data has not yet been imported into {working_directory}")
xia2_logger.notice(banner("Spotfinding")) # type: ignore
logfile = "dials.find_spots.log"
with run_in_directory(working_directory), log_to_file(
logfile
) as dials_logger, record_step("dials.find_spots"):
# Set up the input
imported_expts = load.experiment_list("imported.expt", check_format=True)
xia2_phil = f"""
input.experiments = imported.expt
spotfinder.mp.nproc = {spotfinding_params.nproc}
spotfinder.filter.max_spot_size = {spotfinding_params.max_spot_size}
spotfinder.filter.min_spot_size = {spotfinding_params.min_spot_size}
"""
if spotfinding_params.d_min:
xia2_phil += f"\nspotfinder.filter.d_min = {spotfinding_params.d_min}"
if spotfinding_params.phil:
itpr = find_spots_phil.command_line_argument_interpreter()
try:
user_phil = itpr.process(args=[os.fspath(spotfinding_params.phil)])[0]
working_phil = find_spots_phil.fetch(
sources=[user_phil, iotbx.phil.parse(xia2_phil)]
)
except Exception as e:
xia2_logger.warning(
f"Unable to interpret {spotfinding_params.phil} as a spotfinding phil file. Error:\n{e}"
)
working_phil = find_spots_phil.fetch(
sources=[iotbx.phil.parse(xia2_phil)]
)
else:
working_phil = find_spots_phil.fetch(sources=[iotbx.phil.parse(xia2_phil)])
diff_phil = find_spots_phil.fetch_diff(source=working_phil)
params = working_phil.extract()
dials_logger.info(
"The following parameters have been modified:\n"
+ "input.experiments = imported.expt\n"
+ f"{diff_phil.as_str()}"
)
# Do spot-finding
reflections = flex.reflection_table.from_observations(imported_expts, params)
good = MaskCode.Foreground | MaskCode.Valid
reflections["n_signal"] = reflections["shoebox"].count_mask_values(good)
isets = imported_expts.imagesets()
if len(isets) > 1:
for i, imageset in enumerate(isets):
selected = flex.bool(reflections.nrows(), False)
for j, experiment in enumerate(imported_expts):
if experiment.imageset is not imageset:
continue
selected.set_selected(reflections["id"] == j, True)
plot = spot_counts_per_image_plot(reflections.select(selected))
out_ = f"Histogram of per-image spot count for imageset {i}:\n" + plot
dials_logger.info(out_)
xia2_logger.info(out_)
else:
plot = spot_counts_per_image_plot(reflections)
dials_logger.info(plot)
xia2_logger.info(plot)
return reflections
def clusters_from_experiments(
experiments: ExperimentList,
) -> Tuple[dict, List[Cluster]]:
crystal_symmetries = [
crystal.symmetry(
unit_cell=expt.crystal.get_unit_cell(),
space_group=expt.crystal.get_space_group(),
)
for expt in experiments
]
cluster_plots, large_clusters = report_on_crystal_clusters(crystal_symmetries, True)
return cluster_plots, large_clusters
def ssx_index(
working_directory: Path,
indexing_params: IndexingParams,
) -> Tuple[ExperimentList, flex.reflection_table, dict]:
if not (working_directory / "imported.expt").is_file():
raise ValueError(f"Data has not yet been imported into {working_directory}")
if not (working_directory / "strong.refl").is_file():
raise ValueError(f"Unable to find spotfinding results in {working_directory}")
xia2_logger.notice(banner("Indexing")) # type: ignore
with run_in_directory(working_directory):
logfile = "dials.ssx_index.log"
with log_to_file(logfile) as dials_logger, record_step("dials.ssx_index"):
# Set up the input and log it to the dials log file
strong_refl = flex.reflection_table.from_file("strong.refl")
imported_expts = load.experiment_list("imported.expt", check_format=False)
xia2_phil = f"""
input.experiments = imported.expt
input.reflections = strong.refl
indexing.nproc={indexing_params.nproc}
"""
if indexing_params.unit_cell:
uc = ",".join(str(i) for i in indexing_params.unit_cell.parameters())
xia2_phil += f"\nindexing.known_symmetry.unit_cell={uc}"
if indexing_params.space_group:
xia2_phil += f"\nindexing.known_symmetry.space_group={str(indexing_params.space_group)}"
if indexing_params.max_lattices > 1:
xia2_phil += f"\nindexing.multiple_lattice_search.max_lattices={indexing_params.max_lattices}"
if indexing_params.output_nuggets_dir:
xia2_phil += (
f"\noutput.nuggets={os.fspath(indexing_params.output_nuggets_dir)}"
)
if indexing_params.phil:
itpr = indexing_phil.command_line_argument_interpreter()
try:
user_phil = itpr.process(args=[os.fspath(indexing_params.phil)])[0]
working_phil = indexing_phil.fetch(
sources=[user_phil, iotbx.phil.parse(xia2_phil)]
)
# Note, the order above makes the xia2_phil take precedent
# over the user phil
except Exception as e:
xia2_logger.warning(
f"Unable to interpret {indexing_params.phil} as an indexing phil file. Error:\n{e}"
)
working_phil = indexing_phil.fetch(
sources=[iotbx.phil.parse(xia2_phil)]
)
else:
working_phil = indexing_phil.fetch(
sources=[iotbx.phil.parse(xia2_phil)]
)
diff_phil = indexing_phil.fetch_diff(source=working_phil)
params = working_phil.extract()
dials_logger.info(
"The following parameters have been modified:\n"
+ "input.experiments = imported.expt\n"
+ "input.reflections = strong.refl\n"
+ f"{diff_phil.as_str()}"
)
# Do the indexing
indexed_experiments, indexed_reflections, summary_data = index(
imported_expts, strong_refl, params
)
n_images = reduce(
lambda a, v: a + (v[0]["n_indexed"] > 0), summary_data.values(), 0
)
indexing_success_per_image = [
bool(v[0]["n_indexed"]) for v in summary_data.values()
]
report = (
"Summary of images sucessfully indexed\n"
+ make_summary_table(summary_data)
+ f"\n{indexed_reflections.size()} spots indexed on {n_images} images"
)
dials_logger.info(report)
# Report on clustering, and generate html report and json output
if indexed_experiments:
cluster_plots, large_clusters = clusters_from_experiments(
indexed_experiments
)
else:
cluster_plots, large_clusters = ({}, [])
summary_plots = {}
if indexed_experiments:
summary_plots = generate_plots(summary_data)
output_ = (
f"{indexed_reflections.size()} spots indexed on {n_images} images\n"
+ f"{indexing_summary_output(summary_data, summary_plots)}"
)
xia2_logger.info(output_)
summary_plots.update(cluster_plots)
if summary_plots:
generate_html_report(summary_plots, "dials.ssx_index.html")
with open("dials.ssx_index.json", "w") as outfile:
json.dump(summary_plots, outfile, indent=2)
summary_for_xia2 = {
"n_images_indexed": n_images,
"large_clusters": large_clusters,
"success_per_image": indexing_success_per_image,
}
return indexed_experiments, indexed_reflections, summary_for_xia2
def combine_with_reference(experiments: ExperimentList) -> ExperimentList:
combine = CombineWithReference(
detector=experiments[0].detector, beam=experiments[0].beam
)
elist = ExperimentList()
for expt in experiments:
elist.append(combine(expt))
return elist
def run_refinement(
working_directory: Path,
refinement_params: RefinementParams,
) -> None:
xia2_logger.notice(banner("Joint refinement")) # type: ignore
logfile = "dials.refine.log"
with run_in_directory(working_directory), log_to_file(
logfile
) as dials_logger, record_step("dials.refine"):
indexed_refl = flex.reflection_table.from_file("indexed.refl")
indexed_expts = load.experiment_list("indexed.expt", check_format=False)
extra_defaults = """
refinement.parameterisation.beam.fix="all"
refinement.parameterisation.auto_reduction.action="fix"
refinement.parameterisation.detector.fix_list="Tau1"
refinement.refinery.engine=SparseLevMar
refinement.reflections.outlier.algorithm=sauter_poon
"""
if refinement_params.phil:
itpr = refine_phil.command_line_argument_interpreter()
try:
user_phil = itpr.process(args=[os.fspath(refinement_params.phil)])[0]
working_phil = refine_phil.fetch(
sources=[iotbx.phil.parse(extra_defaults), user_phil]
)
# Note, the order above makes the user phil take precedent over the extra defaults
except Exception as e:
xia2_logger.warning(
f"Unable to interpret {refinement_params.phil} as a refinement phil file. Error:\n{e}"
)
working_phil = refine_phil.fetch(
sources=[iotbx.phil.parse(extra_defaults)]
)
else:
working_phil = refine_phil.fetch(sources=[iotbx.phil.parse(extra_defaults)])
diff_phil = refine_phil.fetch_diff(source=working_phil)
params = working_phil.extract()
dials_logger.info(
"The following parameters have been modified:\n"
+ "input.experiments = indexed.expt\n"
+ "input.reflections = indexed.refl\n"
+ f"{diff_phil.as_str()}"
)
expts, refls, refiner, _ = run_dials_refine(indexed_expts, indexed_refl, params)
dials_logger.info("Saving refined experiments to refined.expt")
expts.as_file("refined.expt")
dials_logger.info("Saving reflections with updated predictions to refined.refl")
refls.as_file("refined.refl")
FileHandler.record_data_file(working_directory / "refined.expt")
FileHandler.record_log_file(
"dials.refine", working_directory / "dials.refine.log"
)
step_table = generate_refinement_step_table(refiner)
xia2_logger.info("Summary of joint refinement steps:\n" + step_table)
def ssx_integrate(
working_directory: Path, integration_params: IntegrationParams
) -> dict:
if not (
(working_directory / "indexed.expt").is_file()
and (working_directory / "indexed.refl").is_file()
):
raise ValueError(f"Unable to find indexing results in {working_directory}")
xia2_logger.notice(banner("Integrating")) # type: ignore
with run_in_directory(working_directory):
logfile = "dials.ssx_integrate.log"
with log_to_file(logfile) as dials_logger, record_step("dials.ssx_integrate"):
# Set up the input and log it to the dials log file
indexed_refl = flex.reflection_table.from_file(
"indexed.refl"
).split_by_experiment_id()
indexed_expts = load.experiment_list("indexed.expt", check_format=True)
xia2_phil = f"""
nproc={integration_params.nproc}
algorithm={integration_params.algorithm}
"""
if integration_params.algorithm == "ellipsoid":
model = integration_params.rlp_mosaicity
xia2_phil += f"\nprofile.ellipsoid.rlp_mosaicity.model={model}"
d_min = integration_params.d_min
if d_min:
xia2_phil += f"\nprediction.d_min={d_min}"
if integration_params.algorithm == "ellipsoid":
xia2_phil += f"\nprofile.ellipsoid.prediction.d_min={d_min}"
if integration_params.output_nuggets_dir:
xia2_phil += f"\noutput.nuggets={os.fspath(integration_params.output_nuggets_dir)}"
extra_defaults = """
output.batch_size=1000
"""
if integration_params.algorithm == "ellipsoid":
n_uc_params = CrystalUnitCellParameterisation(
indexed_expts[0].crystal
).num_free()
n_orientation_params = 3
n_mosaicity_params = {
"angular4": 4,
"angular2": 2,
"simple1": 1,
"simple6": 6,
}[integration_params.rlp_mosaicity]
min_n_reflections = max(
n_uc_params + n_orientation_params + 1, n_mosaicity_params + 1
)
# ellipsoid does refinement of uc+orientation, then mosaicity,
# so ensure min_n_reflections >= n_params + 1.
# for angular 4, min_n_reflections will range from 5 (cubic SG) to 10 (P1)
extra_defaults += f"""
profile.ellipsoid.refinement.min_n_reflections={min_n_reflections}
"""
if integration_params.phil:
itpr = integration_phil.command_line_argument_interpreter()
try:
user_phil = itpr.process(args=[os.fspath(integration_params.phil)])[
0
]
working_phil = integration_phil.fetch(
sources=[
iotbx.phil.parse(extra_defaults),
user_phil,
iotbx.phil.parse(xia2_phil),
]
)
# Note, the order above makes the xia2_phil take precedent
# over the user phil, which takes precedent over the extra defaults
except Exception as e:
xia2_logger.warning(
f"Unable to interpret {integration_params.phil} as an integration phil file. Error:\n{e}"
)
working_phil = integration_phil.fetch(
sources=[
iotbx.phil.parse(extra_defaults),
iotbx.phil.parse(xia2_phil),
]
)
else:
working_phil = integration_phil.fetch(
sources=[
iotbx.phil.parse(extra_defaults),
iotbx.phil.parse(xia2_phil),
]
)
diff_phil = integration_phil.fetch_diff(source=working_phil)
params = working_phil.extract()
dials_logger.info(
"The following parameters have been modified:\n"
+ "input.experiments = indexed.expt\n"
+ "input.reflections = indexed.refl\n"
+ f"{diff_phil.as_str()}"
)
# Run the integration
# Record the datafiles so that the information can be passed
# out in the case of processing on multiple nodes, as adding to
# the FileHandler won't work here.
summary_for_xia2: dict = {"DataFiles": {"tags": [], "filenames": []}}
integrated_crystal_symmetries = []
n_refl, n_cryst = (0, 0)
for i, (int_expt, int_refl, aggregator) in enumerate(
run_integration(indexed_refl, indexed_expts, params)
):
reflections_filename = f"integrated_{i+1}.refl"
experiments_filename = f"integrated_{i+1}.expt"
n_refl += int_refl.size()
dials_logger.info(
f"Saving {int_refl.size()} reflections to {reflections_filename}"
)
int_refl.as_file(reflections_filename)
n_cryst += len(int_expt)
dials_logger.info(f"Saving the experiments to {experiments_filename}")
int_expt.as_file(experiments_filename)
summary_for_xia2["DataFiles"]["tags"].append(
f"integrated_{i+1} {working_directory.name}"
)
summary_for_xia2["DataFiles"]["filenames"].append(
working_directory / f"integrated_{i+1}.refl"
)
summary_for_xia2["DataFiles"]["tags"].append(
f"integrated_{i+1} {working_directory.name}"
)
summary_for_xia2["DataFiles"]["filenames"].append(
working_directory / f"integrated_{i+1}.expt"
)
integrated_crystal_symmetries.extend(
[
crystal.symmetry(
unit_cell=copy.deepcopy(cryst.get_unit_cell()),
space_group=copy.deepcopy(cryst.get_space_group()),
)
for cryst in int_expt.crystals()
]
)
xia2_logger.info(f"{n_refl} reflections integrated from {n_cryst} crystals")
# Report on clustering, and generate html report and json output
plots = {}
if integrated_crystal_symmetries:
cluster_plots, large_clusters = report_on_crystal_clusters(
integrated_crystal_symmetries,
make_plots=True,
)
else:
cluster_plots, large_clusters = ({}, {})
if integrated_crystal_symmetries:
plots = aggregator.make_plots()
plots.update(cluster_plots)
generate_integration_html_report(plots, "dials.ssx_integrate.html")
with open("dials.ssx_integrate.json", "w") as outfile:
json.dump(plots, outfile, indent=2)
summary_for_xia2["n_cryst_integrated"] = n_cryst
summary_for_xia2["large_clusters"] = large_clusters
return summary_for_xia2
def best_cell_from_cluster(cluster: Cluster) -> Tuple:
input_symmetry = crystal.symmetry(
unit_cell=uctbx.unit_cell(cluster.median_cell[0:6]), space_group_symbol="P 1"
)
group = sgtbx.lattice_symmetry.metric_subgroups(input_symmetry, 3.00).result_groups[
0
]
uc_params_conv = group["best_subsym"].unit_cell().parameters()
sg = group["best_subsym"].space_group_info().symbol_and_number()
return sg, uc_params_conv
| bsd-3-clause | 5cba784f769aab954d77fe96ea48854a | 41.133562 | 113 | 0.578558 | 3.804855 | false | false | false | false |
xia2/xia2 | src/xia2/cli/overload.py | 1 | 4525 | from __future__ import annotations
import json
import sys
import timeit
from collections import Counter
import iotbx.phil
from dials.util.options import ArgumentParser, flatten_experiments
from libtbx import easy_mp
from scitbx.array_family import flex
help_message = """
Examples::
xia2.overload (data_master.h5|integrated.expt) [nproc=8]
"""
phil_scope = iotbx.phil.parse(
"""
nproc = 1
.type = int(value_min=1)
.help = "The number of processes to use."
output {
filename = overload.json
.type = path
.help = "Histogram output file name"
}
"""
)
def run(args=None):
usage = "xia2.overload (data_master.h5|integrated.expt) [nproc=8]"
parser = ArgumentParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
read_experiments_from_images=True,
epilog=help_message,
)
params, _ = parser.parse_args(args=args, show_diff_phil=True)
experiments = flatten_experiments(params.input.experiments)
if len(experiments) != 1:
parser.print_help()
sys.exit("Please pass an experiment list\n")
return
build_hist(experiments, params)
def build_hist(experiment_list, params):
"""Iterate through the images in experiment_list and generate a pixel
histogram, which is written to params.output.filename."""
nproc = params.nproc
for experiment in experiment_list:
imageset = experiment.imageset
limit = experiment.detector[0].get_trusted_range()[1]
n0, n1 = experiment.scan.get_image_range()
image_count = n1 - n0 + 1
binfactor = 5 # register up to 500% counts
histmax = (limit * binfactor) + 0.0
histbins = int(limit * binfactor) + 1
use_python_counter = histbins > 90000000 # empirically determined
print(
"Processing %d images in %d processes using %s\n"
% (
image_count,
nproc,
"python Counter" if use_python_counter else "flex arrays",
)
)
def process_image(process):
last_update = start = timeit.default_timer()
i = process
if use_python_counter:
local_hist = Counter()
else:
local_hist = flex.histogram(
flex.double(), data_min=0.0, data_max=histmax, n_slots=histbins
)
max_images = image_count // nproc
if process >= image_count % nproc:
max_images += 1
while i < image_count:
data = imageset.get_raw_data(i)[0]
if not use_python_counter:
data = flex.histogram(
data.as_double().as_1d(),
data_min=0.0,
data_max=histmax,
n_slots=histbins,
)
local_hist.update(data)
i = i + nproc
if process == 0:
if timeit.default_timer() > (last_update + 3):
last_update = timeit.default_timer()
if sys.stdout.isatty():
sys.stdout.write("\033[A")
print(
"Processed %d%% (%d seconds remain) "
% (
100 * i // image_count,
round((image_count - i) * (last_update - start) / (i + 1)),
)
)
return local_hist
results = easy_mp.parallel_map(
func=process_image,
iterable=range(nproc),
processes=nproc,
preserve_exception_message=True,
)
print("Merging results")
result_hist = None
for hist in results:
if result_hist is None:
result_hist = hist
else:
result_hist.update(hist)
if not use_python_counter:
# reformat histogram into dictionary
result = list(result_hist.slots())
result_hist = {b: count for b, count in enumerate(result) if count > 0}
results = {
"scale_factor": 1 / limit,
"overload_limit": limit,
"counts": result_hist,
}
print("Writing results to overload.json")
with open("overload.json", "w") as fh:
json.dump(results, fh, indent=1, sort_keys=True)
if __name__ == "__main__":
run()
| bsd-3-clause | fe9dad784a5e4a1f15356c56acc0ac63 | 28.193548 | 87 | 0.525083 | 4.022222 | false | false | false | false |
xia2/xia2 | src/xia2/Modules/Indexer/XDSIndexerII.py | 1 | 13662 | # An reimplementation of the XDS indexer to work for harder cases, for example
# cases where the whole sweep needs to be read into memory in IDXREF to get
# a decent indexing solution (these do happen) and also cases where the
# crystal is highly mosaic. Perhaps. This will now be directly inherited from
# the original XDSIndexer and only the necessary method overloaded (as I
# should have done this in the first place.)
from __future__ import annotations
import logging
import math
import os
import dxtbx
from dials.array_family import flex
from dials.util.ascii_art import spot_counts_per_image_plot
from dxtbx.model import Experiment, ExperimentList
from dxtbx.serialize.xds import to_crystal, to_xds
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import banner
from xia2.lib.bits import auto_logfiler
from xia2.Modules.Indexer.XDSIndexer import XDSIndexer
from xia2.Wrappers.Dials.ImportXDS import ImportXDS
from xia2.Wrappers.XDS.XDS import XDSException
logger = logging.getLogger("xia2.Modules.Indexer.XDSIndexerII")
class XDSIndexerII(XDSIndexer):
"""An extension of XDSIndexer using all available images."""
def __init__(self):
super().__init__()
self._index_select_images = "ii"
self._i_or_ii = None
# helper functions
def _index_select_images_ii(self):
"""Select correct images based on image headers."""
phi_width = self.get_phi_width()
if phi_width == 0.0:
raise RuntimeError("cannot use still images")
# use five degrees for the background calculation
five_deg = int(round(5.0 / phi_width)) - 1
turn = int(round(360.0 / phi_width)) - 1
if five_deg < 5:
five_deg = 5
images = self.get_matching_images()
# characterise the images - are there just two (e.g. dna-style
# reference images) or is there a full block? if it is the
# former then we have a problem, as we want *all* the images in the
# sweep...
wedges = []
min_images = PhilIndex.params.xia2.settings.input.min_images
if len(images) < 3 and len(images) < min_images:
raise RuntimeError(
"This INDEXER cannot be used for only %d images" % len(images)
)
# including > 360 degrees in indexing does not add fresh information
start = min(images)
end = max(images)
if (end - start) > turn:
end = start + turn
logger.debug("Adding images for indexer: %d -> %d", start, end)
wedges.append((start, end))
# FIXME this should have a wrapper function!
if start + five_deg in images:
self._background_images = (start, start + five_deg)
else:
self._background_images = (start, end)
return wedges
def _index_prepare(self):
logger.notice(banner("Spotfinding %s" % self.get_indexer_sweep_name()))
super()._index_prepare()
reflections_file = spot_xds_to_reflection_file(
self._indxr_payload["SPOT.XDS"],
working_directory=self.get_working_directory(),
)
refl = flex.reflection_table.from_file(reflections_file)
logger.info(spot_counts_per_image_plot(refl))
def _index(self):
"""Actually do the autoindexing using the data prepared by the
previous method."""
self._index_remove_masked_regions()
if self._i_or_ii is None:
self._i_or_ii = self.decide_i_or_ii()
logger.debug("Selecting I or II, chose %s", self._i_or_ii)
idxref = self.Idxref()
for file in ["SPOT.XDS"]:
idxref.set_input_data_file(file, self._indxr_payload[file])
# set the phi start etc correctly
idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1])
idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])
if self._i_or_ii == "i":
blocks = self._index_select_images_i()
for block in blocks[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(
starting_frame
)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
for block in blocks[1:]:
idxref.add_spot_range(block[0], block[1])
else:
for block in self._indxr_images[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(
starting_frame
)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
for block in self._indxr_images[1:]:
idxref.add_spot_range(block[0], block[1])
# FIXME need to also be able to pass in the known unit
# cell and lattice if already available e.g. from
# the helper... indirectly
if self._indxr_user_input_lattice:
idxref.set_indexer_user_input_lattice(True)
if self._indxr_input_lattice and self._indxr_input_cell:
idxref.set_indexer_input_lattice(self._indxr_input_lattice)
idxref.set_indexer_input_cell(self._indxr_input_cell)
logger.debug("Set lattice: %s", self._indxr_input_lattice)
logger.debug("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell)
original_cell = self._indxr_input_cell
elif self._indxr_input_lattice:
idxref.set_indexer_input_lattice(self._indxr_input_lattice)
original_cell = None
else:
original_cell = None
# FIXED need to set the beam centre here - this needs to come
# from the input .xinfo object or header, and be converted
# to the XDS frame... done.
from dxtbx.serialize.xds import to_xds
converter = to_xds(self.get_imageset())
xds_beam_centre = converter.detector_origin
idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
# fixme need to check if the lattice, cell have been set already,
# and if they have, pass these in as input to the indexing job.
done = False
while not done:
try:
done = idxref.run()
# N.B. in here if the IDXREF step was being run in the first
# pass done is FALSE however there should be a refined
# P1 orientation matrix etc. available - so keep it!
except XDSException as e:
# inspect this - if we have complaints about not
# enough reflections indexed, and we have a target
# unit cell, and they are the same, well ignore it
if "solution is inaccurate" in str(e):
logger.debug("XDS complains solution inaccurate - ignoring")
done = idxref.continue_from_error()
elif (
"insufficient percentage (< 70%)" in str(e)
or "insufficient percentage (< 50%)" in str(e)
) and original_cell:
done = idxref.continue_from_error()
lattice, cell, mosaic = idxref.get_indexing_solution()
# compare solutions
check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
for j in range(3):
# allow two percent variation in unit cell length
if (
math.fabs((cell[j] - original_cell[j]) / original_cell[j])
> 0.02
and check
):
logger.debug("XDS unhappy and solution wrong")
raise e
# and two degree difference in angle
if (
math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
and check
):
logger.debug("XDS unhappy and solution wrong")
raise e
logger.debug("XDS unhappy but solution ok")
elif "insufficient percentage (< 70%)" in str(
e
) or "insufficient percentage (< 50%)" in str(e):
logger.debug("XDS unhappy but solution probably ok")
done = idxref.continue_from_error()
else:
raise e
FileHandler.record_log_file(
"%s INDEX" % self.get_indexer_full_name(),
os.path.join(self.get_working_directory(), "IDXREF.LP"),
)
for file in ["SPOT.XDS", "XPARM.XDS"]:
self._indxr_payload[file] = idxref.get_output_data_file(file)
# need to get the indexing solutions out somehow...
self._indxr_other_lattice_cell = idxref.get_indexing_solutions()
(
self._indxr_lattice,
self._indxr_cell,
self._indxr_mosaic,
) = idxref.get_indexing_solution()
xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
models = dxtbx.load(xparm_file)
crystal_model = to_crystal(xparm_file)
# this information gets lost when re-creating the models from the
# XDS results - however is not refined so can simply copy from the
# input - https://github.com/xia2/xia2/issues/372
models.get_detector()[0].set_thickness(
converter.get_detector()[0].get_thickness()
)
experiment = Experiment(
beam=models.get_beam(),
detector=models.get_detector(),
goniometer=models.get_goniometer(),
scan=models.get_scan(),
crystal=crystal_model,
# imageset=self.get_imageset(),
)
experiment_list = ExperimentList([experiment])
self.set_indexer_experiment_list(experiment_list)
# I will want this later on to check that the lattice was ok
self._idxref_subtree_problem = idxref.get_index_tree_problem()
def decide_i_or_ii(self):
logger.debug("Testing II or I indexing")
try:
fraction_etc_i = self.test_i()
fraction_etc_ii = self.test_ii()
if not fraction_etc_i and fraction_etc_ii:
return "ii"
if fraction_etc_i and not fraction_etc_ii:
return "i"
logger.debug("I: %.2f %.2f %.2f" % fraction_etc_i)
logger.debug("II: %.2f %.2f %.2f" % fraction_etc_ii)
if (
fraction_etc_i[0] > fraction_etc_ii[0]
and fraction_etc_i[1] < fraction_etc_ii[1]
and fraction_etc_i[2] < fraction_etc_ii[2]
):
return "i"
return "ii"
except Exception as e:
logger.debug(str(e), exc_info=True)
return "ii"
def test_i(self):
idxref = self.Idxref()
self._index_remove_masked_regions()
for file in ["SPOT.XDS"]:
idxref.set_input_data_file(file, self._indxr_payload[file])
idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1])
idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])
# set the phi start etc correctly
blocks = self._index_select_images_i()
for block in blocks[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(starting_frame)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
for block in blocks[1:]:
idxref.add_spot_range(block[0], block[1])
converter = to_xds(self.get_imageset())
xds_beam_centre = converter.detector_origin
idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
idxref.run()
return idxref.get_fraction_rmsd_rmsphi()
def test_ii(self):
idxref = self.Idxref()
self._index_remove_masked_regions()
for file in ["SPOT.XDS"]:
idxref.set_input_data_file(file, self._indxr_payload[file])
idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1])
idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])
for block in self._indxr_images[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(starting_frame)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
converter = to_xds(self.get_imageset())
xds_beam_centre = converter.detector_origin
idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
idxref.run()
return idxref.get_fraction_rmsd_rmsphi()
def spot_xds_to_reflection_file(spot_xds, working_directory):
importer = ImportXDS()
importer.set_working_directory(working_directory)
auto_logfiler(importer)
importer.set_spot_xds(spot_xds)
importer.run()
return importer.get_reflection_filename()
| bsd-3-clause | d91db4d70414ebf876d9528c6e219313 | 34.671018 | 87 | 0.576197 | 3.789736 | false | false | false | false |
xia2/xia2 | src/xia2/cli/multiplex.py | 1 | 5586 | from __future__ import annotations
import logging
import random
import sys
import numpy as np
import iotbx.phil
from dials.array_family import flex
from dials.util.exclude_images import exclude_image_ranges_for_scaling
from dials.util.multi_dataset_handling import (
assign_unique_identifiers,
parse_multiple_datasets,
)
from dials.util.options import ArgumentParser, flatten_experiments, flatten_reflections
from dials.util.version import dials_version
import xia2.Handlers.Streams
from xia2.Applications.xia2_main import write_citations
from xia2.Handlers.Citations import Citations
from xia2.Modules.MultiCrystal import ScaleAndMerge
logger = logging.getLogger("xia2.multiplex")
help_message = """
xia2.multiplex performs symmetry analysis, scaling and merging of multi-crystal data
sets, as well as analysis of various pathologies that typically affect multi-crystal
data sets, including non-isomorphism, radiation damage and preferred orientation.
It uses a number of DIALS programs internally, including dials.cosym,
dials.two_theta_refine, dials.scale and dials.symmetry:
- Preliminary filtering of datasets using hierarchical unit cell clustering
- Laue group determination and resolution of indexing ambiguities with dials.cosym
- Determination of "best" overall unit cell with dials.two_theta_refine
- Initial round of scaling with dials.scale
- Estimation of resolution limit with dials.estimate_resolution
- Final round of scaling after application of the resolution limit
- Analysis of systematic absences with dials.symmetry
- Optional ΔCC½ filtering to remove outlier data sets
- Analysis of non-isomorphism, radiation damage and preferred orientation
For further details, and to cite usage, please see:
`Gildea, R. J. et al. (2022) Acta Cryst. D78, 752-769 <https://doi.org/10.1107/S2059798322004399>`_.
Examples use cases
------------------
Multiple integrated experiments and reflections in combined files::
xia2.multiplex integrated.expt integrated.refl
Integrated experiments and reflections in separate input files::
xia2.multiplex integrated_1.expt integrated_1.refl \\
integrated_2.expt integrated_2.refl
Override the automatic space group determination and resolution estimation::
xia2.multiplex space_group=C2 resolution.d_min=2.5 \\
integrated_1.expt integrated_1.refl \\
integrated_2.expt integrated_2.refl
Filter potential outlier data sets using the ΔCC½ method::
xia2.multiplex filtering.method=deltacchalf \\
integrated.expt integrated.refl
"""
phil_scope = iotbx.phil.parse(
"""
include scope xia2.Modules.MultiCrystal.ScaleAndMerge.phil_scope
include scope dials.util.exclude_images.phil_scope
seed = 42
.type = int(value_min=0)
output {
log = xia2.multiplex.log
.type = str
}
""",
process_includes=True,
)
def run(args=sys.argv[1:]):
Citations.cite("xia2.multiplex")
usage = "xia2.multiplex [options] [param.phil] integrated.expt integrated.refl"
# Create the parser
parser = ArgumentParser(
usage=usage,
phil=phil_scope,
read_reflections=True,
read_experiments=True,
check_format=False,
epilog=help_message,
)
# Parse the command line
params, options = parser.parse_args(args=args, show_diff_phil=False)
# Configure the logging
xia2.Handlers.Streams.setup_logging(
logfile=params.output.log, verbose=options.verbose
)
logger.info(dials_version())
# Log the diff phil
diff_phil = parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
# Try to load the models and data
if len(params.input.experiments) == 0:
logger.info("No Experiments found in the input")
parser.print_help()
return
if len(params.input.reflections) == 0:
logger.info("No reflection data found in the input")
parser.print_help()
return
try:
assert len(params.input.reflections) == len(params.input.experiments)
except AssertionError:
raise sys.exit(
"The number of input reflections files does not match the "
"number of input experiments"
)
if params.seed is not None:
flex.set_random_seed(params.seed)
np.random.seed(params.seed)
random.seed(params.seed)
experiments = flatten_experiments(params.input.experiments)
reflections = flatten_reflections(params.input.reflections)
if len(experiments) < 2:
sys.exit("xia2.multiplex requires a minimum of two experiments")
reflections = parse_multiple_datasets(reflections)
experiments, reflections = assign_unique_identifiers(experiments, reflections)
reflections, experiments = exclude_image_ranges_for_scaling(
reflections, experiments, params.exclude_images
)
reflections_all = flex.reflection_table()
assert len(reflections) == 1 or len(reflections) == len(experiments)
for i, (expt, refl) in enumerate(zip(experiments, reflections)):
reflections_all.extend(refl)
reflections_all.assert_experiment_identifiers_are_consistent(experiments)
if params.identifiers is not None:
identifiers = []
for identifier in params.identifiers:
identifiers.extend(identifier.split(","))
params.identifiers = identifiers
try:
ScaleAndMerge.MultiCrystalScale(experiments, reflections_all, params)
except ValueError as e:
sys.exit(str(e))
write_citations(program="xia2.multiplex")
| bsd-3-clause | fa51e8b0539aefe4e0a84ba33f1cccbf | 31.643275 | 100 | 0.727159 | 3.605943 | false | false | false | false |
xia2/xia2 | src/xia2/Wrappers/Dials/ExportMMCIF.py | 1 | 3007 | from __future__ import annotations
import logging
from xia2.Driver.DriverFactory import DriverFactory
logger = logging.getLogger("xia2.Wrappers.Dials.ExportMMCIF")
def ExportMMCIF(DriverType=None):
"""A factory for ExportMMCIFWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class ExportMMCIFWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.export")
self._experiments_filename = None
self._reflections_filename = None
self._filename = "scaled.mmcif"
self._partiality_threshold = 0.4
self._combine_partials = True
self._intensity_choice = "scale"
self._compress = None
self._pdb_version = "v5_next"
def set_intensity_choice(self, choice):
self._intensity_choice = choice
def set_partiality_threshold(self, partiality_threshold):
self._partiality_threshold = partiality_threshold
def set_compression(self, compression=None):
"Set a compression type: gz bz2 xz or None(uncompressed)"
self._compress = compression
def set_pdb_version(self, version):
self._pdb_version = version
def set_combine_partials(self, combine_partials):
self._combine_partials = combine_partials
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def get_experiments_filename(self):
return self._experiments_filename
def set_reflections_filename(self, reflections_filename):
self._reflections_filename = reflections_filename
def get_reflections_filename(self):
return self._reflections_filename
def set_filename(self, filename):
self._filename = filename
def get_filename(self):
return self._filename
def run(self):
logger.debug("Running dials.export")
self.clear_command_line()
self.add_command_line("experiments=%s" % self._experiments_filename)
self.add_command_line("reflections=%s" % self._reflections_filename)
self.add_command_line("format=mmcif")
self.add_command_line("mmcif.hklout=%s" % self._filename)
if self._combine_partials:
self.add_command_line("combine_partials=true")
if self._compress:
self.add_command_line("mmcif.compress=%s" % self._compress)
self.add_command_line(
"partiality_threshold=%s" % self._partiality_threshold
)
self.add_command_line("pdb_version=%s" % self._pdb_version)
self.add_command_line("intensity=%s" % self._intensity_choice)
self.start()
self.close_wait()
self.check_for_errors()
return ExportMMCIFWrapper()
| bsd-3-clause | 1542cb58bf65573e62bcdb3fcb3c8638 | 34.797619 | 80 | 0.615896 | 4.170596 | false | false | false | false |
xia2/xia2 | src/xia2/Wrappers/Dials/SearchBeamPosition.py | 1 | 2368 | from __future__ import annotations
import logging
import os
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
logger = logging.getLogger("xia2.Wrappers.Dials.SearchBeamPosition")
def SearchBeamPosition(DriverType=None):
"""A factory for SearchBeamPosition classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class SearchBeamPositionWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.search_beam_position")
self._sweep_filename = None
self._spot_filename = None
self._optimized_filename = None
self._phil_file = None
self._image_range = None
def set_sweep_filename(self, sweep_filename):
self._sweep_filename = sweep_filename
def set_spot_filename(self, spot_filename):
self._spot_filename = spot_filename
def set_phil_file(self, phil_file):
self._phil_file = phil_file
def set_image_range(self, image_range):
self._image_range = image_range
def get_optimized_experiments_filename(self):
return self._optimized_filename
def run(self):
logger.debug("Running %s", self.get_executable())
self.clear_command_line()
self.add_command_line(self._sweep_filename)
self.add_command_line(self._spot_filename)
nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
self.set_cpu_threads(nproc)
self.add_command_line("nproc=%i" % nproc)
if self._image_range:
self.add_command_line("image_range=%d,%d" % self._image_range)
if self._phil_file is not None:
self.add_command_line(self._phil_file)
self._optimized_filename = os.path.join(
self.get_working_directory(), "%d_optimised.expt" % self.get_xpid()
)
self.add_command_line("output.experiments=%s" % self._optimized_filename)
self.start()
self.close_wait()
self.check_for_errors()
self.get_all_output()
assert os.path.exists(self._optimized_filename), self._optimized_filename
return SearchBeamPositionWrapper()
| bsd-3-clause | 2089ce536eca690670e343d7a9cf39e0 | 32.352113 | 85 | 0.61402 | 3.9401 | false | false | false | false |
xia2/xia2 | src/xia2/Experts/Filenames.py | 1 | 1106 | # An expert who knows about how file names are structured on a number of
# platforms... this handles them mostly as strings, which of course they
# are...
from __future__ import annotations
import os
def windows_environment_vars_to_unix(token):
"""Transmogrify windows environment tokens (e.g. %WINDIR%) to
the UNIX form ($WINDIR) for python environment token replacement."""
if token.count("%") % 2:
raise RuntimeError("must have even number of % tokens")
in_env_variable = False
token_list = token.split("%")
result = ""
for l in token_list:
if not in_env_variable:
result += l
in_env_variable = True
else:
result += "$%s" % l
in_env_variable = False
return result
def expand_path(path):
"""Expand the input to give a full path."""
if path is None:
return None
if os.name == "nt":
return os.path.expandvars(
os.path.expanduser(windows_environment_vars_to_unix(path))
)
else:
return os.path.expandvars(os.path.expanduser(path))
| bsd-3-clause | 57edacfafa2d83f9bea229650aa3bea9 | 24.136364 | 72 | 0.61302 | 3.921986 | false | false | false | false |
xia2/xia2 | src/xia2/Wrappers/XIA/Report.py | 1 | 1815 | from __future__ import annotations
import logging
import os
import shutil
from xia2.Driver.DriverFactory import DriverFactory
logger = logging.getLogger("xia2.Wrappers.XIA.Report")
def Report(DriverType=None):
"""A factory for ReportWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class ReportWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("xia2.report")
self._mtz_filename = None
self._html_filename = None
self._chef_min_completeness = None
def set_mtz_filename(self, mtz_filename):
self._mtz_filename = mtz_filename
def set_html_filename(self, html_filename):
self._html_filename = html_filename
def set_chef_min_completeness(self, min_completeness):
self._chef_min_completeness = min_completeness
def run(self):
logger.debug("Running xia2.report")
assert self._mtz_filename is not None
self.clear_command_line()
self.add_command_line(self._mtz_filename)
if self._chef_min_completeness is not None:
self.add_command_line(
"chef_min_completeness=%s" % self._chef_min_completeness
)
self.start()
self.close_wait()
self.check_for_errors()
html_filename = os.path.join(
self.get_working_directory(), "xia2-report.html"
)
assert os.path.exists(html_filename)
if self._html_filename is None:
self._html_filename = html_filename
else:
shutil.move(html_filename, self._html_filename)
return ReportWrapper()
| bsd-3-clause | b87403e7cab66d628c1867d84280c7d2 | 30.293103 | 76 | 0.591736 | 4.069507 | false | false | false | false |
xia2/xia2 | src/xia2/Applications/xia2_helpers.py | 1 | 4081 | from __future__ import annotations
import glob
import logging
import os
import shutil
import uuid
from xia2.Driver.DriverFactory import DriverFactory
from xia2.lib.bits import auto_logfiler
from xia2.Wrappers.XIA.Integrate import Integrate as XIA2Integrate
logger = logging.getLogger("xia2.Applications.xia2_helpers")
def process_one_sweep(args):
assert len(args) == 1
args = args[0]
# stop_after = args.stop_after
command_line_args = args.command_line_args
nproc = args.nproc
crystal_id = args.crystal_id
wavelength_id = args.wavelength_id
sweep_id = args.sweep_id
failover = args.failover
driver_type = args.driver_type
default_driver_type = DriverFactory.get_driver_type()
DriverFactory.set_driver_type(driver_type)
curdir = os.path.abspath(os.curdir)
if "-xinfo" in command_line_args:
idx = command_line_args.index("-xinfo")
del command_line_args[idx + 1]
del command_line_args[idx]
xia2_integrate = XIA2Integrate()
# import tempfile
# tmpdir = tempfile.mkdtemp(dir=curdir)
tmpdir = os.path.join(curdir, str(uuid.uuid4()))
os.makedirs(tmpdir)
xia2_integrate.set_working_directory(tmpdir)
xia2_integrate.add_command_line_args(args.command_line_args)
xia2_integrate.set_phil_file(os.path.join(curdir, "xia2-working.phil"))
xia2_integrate.add_command_line_args(["sweep.id=%s" % sweep_id])
xia2_integrate.set_nproc(nproc)
xia2_integrate.set_njob(1)
xia2_integrate.set_mp_mode("serial")
auto_logfiler(xia2_integrate)
sweep_tmp_dir = os.path.join(tmpdir, crystal_id, wavelength_id, sweep_id)
sweep_target_dir = os.path.join(curdir, crystal_id, wavelength_id, sweep_id)
output = None
success = False
xsweep_dict = None
try:
xia2_integrate.run()
output = get_sweep_output_only(xia2_integrate.get_all_output())
success = True
except Exception as e:
logger.warning("Processing sweep %s failed: %s", sweep_id, str(e))
if not failover:
raise
finally:
from xia2.Schema.XProject import XProject
xia2_json = os.path.join(tmpdir, "xia2.json")
json_files = glob.glob(os.path.join(sweep_tmp_dir, "*", "*.json"))
json_files.extend(glob.glob(os.path.join(sweep_tmp_dir, "*", "*.expt")))
if os.path.exists(xia2_json):
json_files.append(xia2_json)
import fileinput
for line in fileinput.FileInput(files=json_files, inplace=1):
line = line.replace(sweep_tmp_dir, sweep_target_dir)
print(line)
if os.path.exists(xia2_json):
new_json = os.path.join(curdir, "xia2-%s.json" % sweep_id)
shutil.copyfile(xia2_json, new_json)
move_output_folder(sweep_tmp_dir, sweep_target_dir)
if success:
xinfo = XProject.from_json(new_json)
xcryst = list(xinfo.get_crystals().values())[0]
xsweep = xcryst.get_xwavelength(wavelength_id).get_sweeps()[0]
xsweep_dict = xsweep.to_dict()
shutil.rmtree(tmpdir, ignore_errors=True)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir, ignore_errors=True)
DriverFactory.set_driver_type(default_driver_type)
return success, output, xsweep_dict
def get_sweep_output_only(all_output):
sweep_lines = []
in_sweep = False
for line in all_output:
if line.startswith("Processing took "):
break
elif in_sweep:
sweep_lines.append(line)
elif line.startswith("Command line: "):
in_sweep = True
return "".join(sweep_lines)
def move_output_folder(sweep_tmp_dir, sweep_target_dir):
"""Move contents of xia2 sweep processing folder from sweep_tmp_dir to
sweep_target_dir, while also updating any absolute path in any xia2.json
file.
"""
if os.path.exists(sweep_target_dir):
shutil.rmtree(sweep_target_dir)
# print "Moving %s to %s" %(sweep_tmp_dir, sweep_target_dir)
shutil.move(sweep_tmp_dir, sweep_target_dir)
| bsd-3-clause | 8e959efca58d1a30bd1af4d716660ef8 | 31.388889 | 80 | 0.650821 | 3.198276 | false | false | false | false |
xia2/xia2 | tests/Modules/Indexer/test_DIALS_indexer.py | 1 | 2528 | from __future__ import annotations
import os
import sys
from unittest import mock
import pytest
from dxtbx.model import ExperimentList
from xia2.Handlers.Phil import PhilIndex
from xia2.Modules.Indexer.DialsIndexer import DialsIndexer
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XSample import XSample
from xia2.Schema.XSweep import XSweep
from xia2.Schema.XWavelength import XWavelength
def _exercise_dials_indexer(dials_data, tmp_path):
PhilIndex.params.xia2.settings.multiprocessing.nproc = 1
template = dials_data("centroid_test_data", pathlib=True) / "centroid_####.cbf"
indexer = DialsIndexer()
indexer.set_working_directory(os.fspath(tmp_path))
experiments = ExperimentList.from_templates([template])
imageset = experiments.imagesets()[0]
indexer.add_indexer_imageset(imageset)
cryst = XCrystal("CRYST1", None)
wav = XWavelength("WAVE1", cryst, imageset.get_beam().get_wavelength())
samp = XSample("X1", cryst)
directory, image = os.path.split(imageset.get_path(1))
sweep = XSweep("SWEEP1", wav, samp, directory=directory, image=image)
indexer.set_indexer_sweep(sweep)
indexer.index()
assert indexer.get_indexer_cell() == pytest.approx(
(42.20, 42.20, 39.68, 90, 90, 90), rel=1e-3
)
solution = indexer.get_solution()
assert solution["rmsd"] == pytest.approx(0.09241, abs=1e-3)
assert solution["metric"] == pytest.approx(0.34599, abs=5e-3)
assert solution["number"] == 9
assert solution["lattice"] == "tP"
beam_centre = indexer.get_indexer_beam_centre()
assert beam_centre == pytest.approx((219.8758, 212.6103), abs=1e-3)
print(indexer.get_indexer_experiment_list()[0].crystal)
print(indexer.get_indexer_experiment_list()[0].detector)
# test serialization of indexer
json_str = indexer.as_json()
indexer2 = DialsIndexer.from_json(string=json_str)
indexer2.index()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_beam_centre() == pytest.approx(
indexer2.get_indexer_beam_centre()
)
indexer.eliminate()
indexer2.eliminate()
assert indexer.get_indexer_cell() == pytest.approx(indexer2.get_indexer_cell())
assert indexer.get_indexer_lattice() == "oC"
assert indexer2.get_indexer_lattice() == "oC"
def test_dials_indexer_serial(ccp4, dials_data, run_in_tmp_path):
with mock.patch.object(sys, "argv", []):
_exercise_dials_indexer(dials_data, run_in_tmp_path)
| bsd-3-clause | 747a733386ecd09ee68a59d85107dfa8 | 33.162162 | 83 | 0.704509 | 3.15212 | false | true | false | false |
xia2/xia2 | tests/Modules/Scaler/test_CCP4ScalerA.py | 1 | 3230 | from __future__ import annotations
import os
import sys
from unittest import mock
def test_ccp4_scalerA(regression_test, ccp4, dials_data, run_in_tmp_path):
from xia2.Handlers.Phil import PhilIndex
PhilIndex.params.xia2.settings.multiprocessing.nproc = 1
template = dials_data("centroid_test_data", pathlib=True) / "centroid_####.cbf"
tmpdir = str(run_in_tmp_path)
from xia2.Modules.Indexer.DialsIndexer import DialsIndexer
from xia2.Modules.Integrater.DialsIntegrater import DialsIntegrater
from xia2.Modules.Refiner.DialsRefiner import DialsRefiner
from xia2.Modules.Scaler.CCP4ScalerA import CCP4ScalerA
indexer = DialsIndexer()
indexer.set_working_directory(tmpdir)
from dxtbx.model import ExperimentList
experiments = ExperimentList.from_templates([template])
imageset = experiments.imagesets()[0]
indexer.add_indexer_imageset(imageset)
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XProject import XProject
from xia2.Schema.XSample import XSample
from xia2.Schema.XSweep import XSweep
from xia2.Schema.XWavelength import XWavelength
proj = XProject(name="AUTOMATIC")
cryst = XCrystal("CRYST1", proj)
wav = XWavelength("WAVE1", cryst, imageset.get_beam().get_wavelength())
cryst.add_wavelength(wav)
samp = XSample("X1", cryst)
directory, image = os.path.split(imageset.get_path(1))
with mock.patch.object(sys, "argv", []):
sweep = XSweep("SWEEP1", wav, samp, directory=directory, image=image)
indexer.set_indexer_sweep(sweep)
refiner = DialsRefiner()
refiner.set_working_directory(tmpdir)
refiner.add_refiner_indexer(sweep.get_epoch(1), indexer)
integrater = DialsIntegrater()
integrater.set_output_format("hkl")
integrater.set_working_directory(tmpdir)
integrater.setup_from_image(imageset.get_path(1))
integrater.set_integrater_refiner(refiner)
# integrater.set_integrater_indexer(indexer)
integrater.set_integrater_sweep(sweep)
integrater.set_integrater_sweep_name("SWEEP1")
integrater.set_integrater_project_info("AUTOMATIC", "CRYST1", "WAVE1")
scaler = CCP4ScalerA(base_path=run_in_tmp_path)
scaler.add_scaler_integrater(integrater)
scaler.set_scaler_xcrystal(cryst)
scaler.set_scaler_project_info("AUTOMATIC", "CRYST1")
_check_scaler_files_exist(scaler)
# test serialization of scaler
json_str = scaler.as_json()
# print json_str
scaler2 = CCP4ScalerA.from_json(string=json_str)
scaler2.set_scaler_xcrystal(cryst)
_check_scaler_files_exist(scaler2)
scaler2.set_scaler_done(False)
_check_scaler_files_exist(scaler2)
scaler2._scalr_integraters = {} # XXX
scaler2.add_scaler_integrater(integrater)
scaler2.set_scaler_prepare_done(False)
_check_scaler_files_exist(scaler2)
def _check_scaler_files_exist(scaler):
merged = scaler.get_scaled_merged_reflections()
for filetype in ("mtz", "sca", "sca_unmerged"):
assert filetype in merged
if isinstance(merged[filetype], str):
files = [merged[filetype]]
else:
files = merged[filetype].values()
for f in files:
assert os.path.isfile(f)
| bsd-3-clause | 22a1767b0b88281332b338bb0291cc60 | 33.731183 | 83 | 0.711765 | 3.166667 | false | true | false | false |
xia2/xia2 | src/xia2/Wrappers/CCP4/Ctruncate.py | 1 | 6043 | from __future__ import annotations
import logging
import os
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Citations import Citations
from xia2.lib.bits import transpose_loggraph
logger = logging.getLogger("xia2.Wrappers.CCP4.Ctruncate")
def Ctruncate(DriverType=None):
"""A factory for CtruncateWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class CtruncateWrapper(DriverInstance.__class__):
"""A wrapper for Ctruncate, using the regular Driver."""
def __init__(self):
# generic things
DriverInstance.__class__.__init__(self)
Citations.cite("ccp4")
self.set_executable(os.path.join(os.environ.get("CBIN", ""), "ctruncate"))
self._anomalous = False
self._nres = 0
self._b_factor = 0.0
self._moments = None
# numbers of reflections in and out, and number of absences
# counted
self._nref_in = 0
self._nref_out = 0
self._nabsent = 0
self._xmlout = None
def set_hklin(self, hklin):
self._hklin = hklin
def set_hklout(self, hklout):
self._hklout = hklout
def set_nres(self, nres):
self._nres = nres
def set_anomalous(self, anomalous):
self._anomalous = anomalous
def get_xmlout(self):
return self._xmlout
def truncate(self):
"""Actually perform the truncation procedure."""
if not self._hklin:
raise RuntimeError("hklin not defined")
if not self._hklout:
raise RuntimeError("hklout not defined")
self.add_command_line("-hklin")
self.add_command_line(self._hklin)
self.add_command_line("-hklout")
self.add_command_line(self._hklout)
if self._nres:
self.add_command_line("-nres")
self.add_command_line("%d" % self._nres)
if self._anomalous:
self.add_command_line("-colano")
self.add_command_line("/*/*/[I(+),SIGI(+),I(-),SIGI(-)]")
self.add_command_line("-colin")
self.add_command_line("/*/*/[IMEAN,SIGIMEAN]")
self._xmlout = os.path.join(
self.get_working_directory(), "%d_truncate.xml" % self.get_xpid()
)
self.add_command_line("-xmlout")
self.add_command_line(self._xmlout)
self.start()
self.close_wait()
try:
self.check_for_errors()
except RuntimeError as e:
try:
os.remove(self._hklout)
except Exception:
pass
logger.debug(str(e))
raise RuntimeError("ctruncate failure")
nref = 0
for record in self.get_all_output():
if "Number of reflections:" in record:
nref = int(record.split()[-1])
if "Estimate of Wilson B factor:" in record:
self._b_factor = float(record.split(":")[1].split()[0])
self._nref_in, self._nref_out = nref, nref
self._nabsent = 0
moments = None
results = self.parse_ccp4_loggraph()
if "Acentric moments of E using Truncate method" in results:
moments = transpose_loggraph(
results["Acentric moments of E using Truncate method"]
)
elif "Acentric moments of I" in results:
moments = transpose_loggraph(results["Acentric moments of I"])
elif "Acentric moments of E" in results:
moments = transpose_loggraph(results["Acentric moments of E"])
else:
logger.debug("Acentric moments of E/I not found")
self._moments = moments
def get_b_factor(self):
return self._b_factor
def get_moments(self):
return self._moments
def get_nref_in(self):
return self._nref_in
def get_nref_out(self):
return self._nref_out
def get_nabsent(self):
return self._nabsent
def parse_ccp4_loggraph(self):
"""Look through the standard output of the program for
CCP4 loggraph text. When this is found store it in a
local dictionary to allow exploration."""
# reset the loggraph store
self._loggraph = {}
output = self.get_all_output()
for i in range(len(output)):
line = output[i]
if "$TABLE" in line:
n_dollar = line.count("$$")
current = line.split(":")[1].replace(">", "").strip()
self._loggraph[current] = {}
self._loggraph[current]["columns"] = []
self._loggraph[current]["data"] = []
loggraph_info = ""
while n_dollar < 4:
n_dollar += line.count("$$")
loggraph_info += line
if n_dollar == 4:
break
i += 1
line = output[i]
tokens = loggraph_info.split("$$")
self._loggraph[current]["columns"] = tokens[1].split()
if len(tokens) < 4:
raise RuntimeError('loggraph "%s" broken' % current)
data = tokens[3].split("\n")
columns = len(self._loggraph[current]["columns"])
for record in data:
record = record.split()
if len(record) == columns:
self._loggraph[current]["data"].append(record)
return self._loggraph
return CtruncateWrapper()
| bsd-3-clause | a96b13ad11d2df448e77ef61b185bf2d | 29.675127 | 86 | 0.500083 | 4.304131 | false | false | false | false |
xia2/xia2 | tests/command_line/test_to_shelx.py | 1 | 1108 | from __future__ import annotations
import subprocess
def test_to_shelx(dials_data, tmp_path):
l_cyst = dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
# First create an unmerged mtz.
expt = l_cyst / "scaled_30.expt"
refls = l_cyst / "scaled_30.refl"
result = subprocess.run(
["dials.export", expt, refls, "mtz.hklout=scaled.mtz"], cwd=tmp_path
)
assert not result.returncode or result.stderr
# now test the program
args = ["xia2.to_shelx", tmp_path / "scaled.mtz", "lcys", "C3H7NO2S"]
result = subprocess.run(args, cwd=tmp_path)
assert not result.returncode or result.stderr
assert (tmp_path / "lcys.hkl").is_file()
assert (tmp_path / "lcys.ins").is_file()
# now test the program with '--cell' option
args = [
"xia2.to_shelx",
tmp_path / "scaled.mtz",
"lcyst",
"C3H7NO2S",
f"--cell={expt}",
]
result = subprocess.run(args, cwd=tmp_path)
assert not result.returncode or result.stderr
assert (tmp_path / "lcyst.hkl").is_file()
assert (tmp_path / "lcyst.ins").is_file()
| bsd-3-clause | 00fce639100d3f1b1bc69636ba2f2860 | 30.657143 | 76 | 0.620036 | 2.885417 | false | true | false | false |
menpo/menpo | menpo/visualize/textutils.py | 2 | 7332 | import sys
from collections import deque
from datetime import datetime
from time import time
def progress_bar_str(percentage, bar_length=20, bar_marker="=", show_bar=True):
r"""
Returns an `str` of the specified progress percentage. The percentage is
represented either in the form of a progress bar or in the form of a
percentage number. It can be combined with the :func:`print_dynamic`
function.
Parameters
----------
percentage : `float`
The progress percentage to be printed. It must be in the range
``[0, 1]``.
bar_length : `int`, optional
Defines the length of the bar in characters.
bar_marker : `str`, optional
Defines the marker character that will be used to fill the bar.
show_bar : `bool`, optional
If ``True``, the `str` includes the bar followed by the percentage,
e.g. ``'[===== ] 50%'``
If ``False``, the `str` includes only the percentage,
e.g. ``'50%'``
Returns
-------
progress_str : `str`
The progress percentage string that can be printed.
Raises
------
ValueError
``percentage`` is not in the range ``[0, 1]``
ValueError
``bar_length`` must be an integer >= ``1``
ValueError
``bar_marker`` must be a string of length 1
Examples
--------
This for loop: ::
n_iters = 2000
for k in range(n_iters):
print_dynamic(progress_bar_str(float(k) / (n_iters-1)))
prints a progress bar of the form: ::
[============= ] 68%
"""
if percentage < 0:
raise ValueError("percentage is not in the range [0, 1]")
elif percentage > 1:
percentage = 1
if not isinstance(bar_length, int) or bar_length < 1:
raise ValueError("bar_length must be an integer >= 1")
if not isinstance(bar_marker, str) or len(bar_marker) != 1:
raise ValueError("bar_marker must be a string of length 1")
# generate output string
if show_bar:
str_param = "[%-" + str(bar_length) + "s] %d%%"
bar_percentage = int(percentage * bar_length)
return str_param % (bar_marker * bar_percentage, percentage * 100)
else:
return "%d%%" % (percentage * 100)
def print_dynamic(str_to_print):
r"""
Prints dynamically the provided `str`, i.e. the `str` is printed and then
the buffer gets flushed.
Parameters
----------
str_to_print : `str`
The string to print.
"""
sys.stdout.write("\r{}".format(str_to_print.ljust(80)))
sys.stdout.flush()
def bytes_str(num):
r"""
Converts bytes to a human readable format. For example: ::
print_bytes(12345) returns '12.06 KB'
print_bytes(123456789) returns '117.74 MB'
Parameters
----------
num : `int`
The size in bytes.
Raises
------
ValueError
num must be int >= 0
"""
if not isinstance(num, int) or num < 0:
raise ValueError("num must be int >= 0")
for x in ["bytes", "KB", "MB", "GB"]:
if num < 1024.0:
return "{0:3.2f} {1:s}".format(num, x)
num /= 1024.0
return "{0:3.2f} {1:s}".format(num, "TB")
def print_progress(
iterable,
prefix="",
n_items=None,
offset=0,
show_bar=True,
show_count=True,
show_eta=True,
end_with_newline=True,
min_seconds_between_updates=0.1,
):
r"""
Print the remaining time needed to compute over an iterable.
To use, wrap an existing iterable with this function before processing in
a for loop (see example).
The estimate of the remaining time is based on a moving average of the last
100 items completed in the loop.
Parameters
----------
iterable : `iterable`
An iterable that will be processed. The iterable is passed through by
this function, with the time taken for each complete iteration logged.
prefix : `str`, optional
If provided a string that will be prepended to the progress report at
each level.
n_items : `int`, optional
Allows for ``iterator`` to be a generator whose length will be assumed
to be `n_items`. If not provided, then ``iterator`` needs to be
`Sizable`.
offset : `int`, optional
Useful in combination with ``n_items`` - report back the progress as
if `offset` items have already been handled. ``n_items`` will be left
unchanged.
show_bar : `bool`, optional
If False, The progress bar (e.g. [========= ]) will be hidden.
show_count : `bool`, optional
If False, The item count (e.g. (4/25)) will be hidden.
show_eta : `bool`, optional
If False, The estimated time to finish (e.g. - 00:00:03 remaining)
will be hidden.
end_with_newline : `bool`, optional
If False, there will be no new line added at the end of the dynamic
printing. This means the next print statement will overwrite the
dynamic report presented here. Useful if you want to follow up a
print_progress with a second print_progress, where the second
overwrites the first on the same line.
min_seconds_between_updates : `float`, optional
The number of seconds that have to pass between two print updates.
This allows ``print_progress`` to be used on fast iterations without
incurring a significant overhead. Set to ``0`` to disable this
throttling.
Raises
------
ValueError
``offset`` provided without ``n_items``
Examples
--------
This for loop: ::
from time import sleep
for i in print_progress(range(100)):
sleep(1)
prints a progress report of the form: ::
[============= ] 70% (7/10) - 00:00:03 remaining
"""
if n_items is None and offset != 0:
raise ValueError(
"offset can only be set when n_items has been" " manually provided."
)
if prefix != "":
prefix += ": "
bar_length = 10
else:
bar_length = 20
n = n_items if n_items is not None else len(iterable)
timings = deque([], 100)
time1 = time()
last_update_time = 0
for i, x in enumerate(iterable, 1 + offset):
yield x
time2 = time()
timings.append(time2 - time1)
time1 = time2
remaining = n - i
if time2 - last_update_time < min_seconds_between_updates:
continue
last_update_time = time2
duration = datetime.utcfromtimestamp(sum(timings) / len(timings) * remaining)
bar_str = progress_bar_str(i / n, bar_length=bar_length, show_bar=show_bar)
count_str = " ({}/{})".format(i, n) if show_count else ""
eta_str = (
" - {} remaining".format(duration.strftime("%H:%M:%S")) if show_eta else ""
)
print_dynamic("{}{}{}{}".format(prefix, bar_str, count_str, eta_str))
# the iterable has now finished - to make it clear redraw the progress with
# a done message. We also hide the eta at this stage.
count_str = " ({}/{})".format(n, n) if show_count else ""
bar_str = progress_bar_str(1, bar_length=bar_length, show_bar=show_bar)
print_dynamic("{}{}{} - done.".format(prefix, bar_str, count_str))
if end_with_newline:
print("")
| bsd-3-clause | 5c4db0c457060413f0ed768e63030789 | 31.878924 | 87 | 0.592199 | 3.948304 | false | false | false | false |
menpo/menpo | menpo/transform/homogeneous/base.py | 2 | 15465 | from warnings import warn
import numpy as np
from menpo.base import Vectorizable, MenpoDeprecationWarning
from menpo.transform.base import (
Alignment,
ComposableTransform,
VComposable,
VInvertible,
)
class HomogFamilyAlignment(Alignment):
r"""
Simple subclass of Alignment that adds the ability to create a copy of an
alignment class without the alignment behavior.
Note that subclasses should inherit from :map:`HomogFamilyAlignment` first
to have the correct copy behavior.
"""
def as_non_alignment(self):
r"""
Returns a copy of this transform without its alignment nature.
Returns
-------
transform : :map:`Homogeneous` but not :map:`Alignment` subclass
A version of this transform with the same transform behavior but
without the alignment logic.
"""
raise NotImplementedError()
def copy(self):
r"""
Generate an efficient copy of this :map:`HomogFamilyAlignment`.
Returns
-------
new_transform : ``type(self)``
A copy of this object
"""
new = self.__class__.__new__(self.__class__)
# Shallow copy everything except the h_matrix
new.__dict__ = self.__dict__.copy()
new._h_matrix = new._h_matrix.copy()
return new
def pseudoinverse(self):
r"""
The pseudoinverse of the transform - that is, the transform that
results from swapping source and target, or more formally, negating
the transforms parameters. If the transform has a true inverse this
is returned instead.
Returns
-------
transform : ``type(self)``
The inverse of this transform.
"""
selfcopy = self.copy()
selfcopy._h_matrix = self._h_matrix_pseudoinverse()
selfcopy._source, selfcopy._target = selfcopy._target, selfcopy._source
return selfcopy
class Homogeneous(ComposableTransform, Vectorizable, VComposable, VInvertible):
r"""
A simple ``n``-dimensional homogeneous transformation.
Adds a unit homogeneous coordinate to points, performs the dot
product, re-normalizes by division by the homogeneous coordinate,
and returns the result.
Can be composed with another :map:`Homogeneous`, so long as the
dimensionality matches.
Parameters
----------
h_matrix : ``(n_dims + 1, n_dims + 1)`` `ndarray`
The homogeneous matrix defining this transform.
copy : `bool`, optional
If ``False``, avoid copying ``h_matrix``. Useful for performance.
skip_checks : `bool`, optional
If ``True``, avoid sanity checks on the ``h_matrix``. Useful for
performance.
"""
def __init__(self, h_matrix, copy=True, skip_checks=False):
self._h_matrix = None
# Delegate setting to the most specialized setter method possible
self._set_h_matrix(h_matrix, copy=copy, skip_checks=skip_checks)
@classmethod
def init_identity(cls, n_dims):
r"""
Creates an identity matrix Homogeneous transform.
Parameters
----------
n_dims : `int`
The number of dimensions.
Returns
-------
identity : :class:`Homogeneous`
The identity matrix transform.
"""
return Homogeneous(np.eye(n_dims + 1))
@property
def h_matrix_is_mutable(self):
r"""Deprecated
``True`` iff :meth:`set_h_matrix` is permitted on this type of
transform.
If this returns ``False`` calls to :meth:`set_h_matrix` will raise
a ``NotImplementedError``.
:type: `bool`
"""
warn(
"the public API for mutable operations is deprecated "
"and will be removed in a future version of Menpo. "
"Create a new transform instead.",
MenpoDeprecationWarning,
)
return False
def from_vector(self, vector):
"""
Build a new instance of the object from its vectorized state.
``self`` is used to fill out the missing state required to rebuild a
full object from it's standardized flattened state. This is the default
implementation, which is a ``deepcopy`` of the object followed by a call
to :meth:`from_vector_inplace()`. This method can be overridden for a
performance benefit if desired.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
Flattened representation of the object.
Returns
-------
transform : :class:`Homogeneous`
An new instance of this class.
"""
# avoid the deepcopy with an efficient copy
self_copy = self.copy()
self_copy._from_vector_inplace(vector)
return self_copy
def __str__(self):
rep = self._transform_str() + "\n"
rep += str(self.h_matrix)
return rep
def _transform_str(self):
r"""
A string representation explaining what this homogeneous transform
does. Has to be implemented by base classes.
Returns
-------
string : `str`
String representation of transform.
"""
return "Homogeneous"
@property
def h_matrix(self):
r"""
The homogeneous matrix defining this transform.
:type: ``(n_dims + 1, n_dims + 1)`` `ndarray`
"""
return self._h_matrix
def set_h_matrix(self, value, copy=True, skip_checks=False):
r"""Deprecated
Deprecated - do not use this method - you are better off just creating
a new transform!
Updates ``h_matrix``, optionally performing sanity checks.
Note that it won't always be possible to manually specify the
``h_matrix`` through this method, specifically if changing the
``h_matrix`` could change the nature of the transform. See
:attr:`h_matrix_is_mutable` for how you can discover if the
``h_matrix`` is allowed to be set for a given class.
Parameters
----------
value : `ndarray`
The new homogeneous matrix to set.
copy : `bool`, optional
If ``False``, do not copy the h_matrix. Useful for performance.
skip_checks : `bool`, optional
If ``True``, skip checking. Useful for performance.
Raises
------
NotImplementedError
If :attr:`h_matrix_is_mutable` returns ``False``.
"""
warn(
"the public API for mutable operations is deprecated "
"and will be removed in a future version of Menpo. "
"Create a new transform instead.",
MenpoDeprecationWarning,
)
if self.h_matrix_is_mutable:
self._set_h_matrix(value, copy=copy, skip_checks=skip_checks)
else:
raise NotImplementedError(
"h_matrix cannot be set on {}".format(self._transform_str())
)
def _set_h_matrix(self, value, copy=True, skip_checks=False):
r"""
Actually updates the ``h_matrix``, optionally performing sanity checks.
Called by :meth:`set_h_matrix` on classes that have
:attr:`h_matrix_is_mutable` as ``True``.
Every subclass should invoke this method internally when the
``h_matrix`` needs to be set in order to get the most sanity checking
possible.
Parameters
----------
value : `ndarray`
The new homogeneous matrix to set
copy : `bool`, optional
If ``False``, do not copy the h_matrix. Useful for performance.
skip_checks : `bool`, optional
If ``True``, skip checking. Useful for performance.
"""
if copy:
value = value.copy()
self._h_matrix = value
@property
def n_dims(self):
r"""
The dimensionality of the data the transform operates on.
:type: `int`
"""
return self.h_matrix.shape[1] - 1
@property
def n_dims_output(self):
r"""
The output of the data from the transform.
:type: `int`
"""
# doesn't have to be a square homogeneous matrix...
return self.h_matrix.shape[0] - 1
def _apply(self, x, **kwargs):
# convert to homogeneous
h_x = np.hstack([x, np.ones([x.shape[0], 1])])
# apply the transform
h_y = h_x.dot(self.h_matrix.T)
# normalize and return
return (h_y / h_y[:, -1][:, None])[:, :-1]
def _as_vector(self):
return self.h_matrix.ravel()
def _from_vector_inplace(self, vector):
"""
Update the state of this object from a vector form.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
Flattened representation of this object
"""
self._set_h_matrix(
vector.reshape(self.h_matrix.shape), copy=True, skip_checks=True
)
@property
def composes_inplace_with(self):
r"""
:class:`Homogeneous` can swallow composition with any other
:class:`Homogeneous`, subclasses will have to override and be more
specific.
"""
return Homogeneous
def compose_after_from_vector_inplace(self, vector):
self.compose_after_inplace(self.from_vector(vector))
@property
def composes_with(self):
r"""
Any Homogeneous can compose with any other Homogeneous.
"""
return Homogeneous
# noinspection PyProtectedMember
def _compose_before(self, t):
r"""
Chains an Homogeneous family transform with another transform of the
same family, producing a new transform that is the composition of
the two.
.. note::
The type of the returned transform is always the first common
ancestor between self and transform.
Any Alignment will be lost.
Parameters
----------
t : :class:`Homogeneous`
Transform to be applied **after** self
Returns
-------
transform : :class:`Homogeneous`
The resulting homogeneous transform.
"""
# note that this overload of the basic _compose_before is just to
# deal with the complexities of maintaining the correct class of
# transform upon composition
if isinstance(t, type(self)):
# He is a subclass of me - I can swallow him.
# What if I'm an Alignment though? Rules of composition state we
# have to produce a non-Alignment result. Nasty, but we check
# here to save a lot of repetition.
if isinstance(self, HomogFamilyAlignment):
new_self = self.as_non_alignment()
else:
new_self = self.copy()
new_self._compose_before_inplace(t)
elif isinstance(self, type(t)):
# I am a subclass of him - he can swallow me
new_self = t._compose_after(self)
elif isinstance(self, Similarity) and isinstance(t, Similarity):
# we're both in the Similarity family
new_self = Similarity(self.h_matrix)
new_self._compose_before_inplace(t)
elif isinstance(self, Affine) and isinstance(t, Affine):
# we're both in the Affine family
new_self = Affine(self.h_matrix)
new_self._compose_before_inplace(t)
else:
# at least one of us is Homogeneous
new_self = Homogeneous(self.h_matrix)
new_self._compose_before_inplace(t)
return new_self
# noinspection PyProtectedMember
def _compose_after(self, t):
r"""
Chains an Homogeneous family transform with another transform of the
same family, producing a new transform that is the composition of
the two.
.. note::
The type of the returned transform is always the first common
ancestor between self and transform.
Any Alignment will be lost.
Parameters
----------
t : :class:`Homogeneous`
Transform to be applied **before** self
Returns
-------
transform : :class:`Homogeneous`
The resulting homogeneous transform.
"""
# note that this overload of the basic _compose_after is just to
# deal with the complexities of maintaining the correct class of
# transform upon composition
if isinstance(t, type(self)):
# He is a subclass of me - I can swallow him.
# What if I'm an Alignment though? Rules of composition state we
# have to produce a non-Alignment result. Nasty, but we check
# here to save a lot of repetition.
if isinstance(self, HomogFamilyAlignment):
new_self = self.as_non_alignment()
else:
new_self = self.copy()
new_self._compose_after_inplace(t)
elif isinstance(self, type(t)):
# I am a subclass of him - he can swallow me
new_self = t._compose_before(self)
elif isinstance(self, Similarity) and isinstance(t, Similarity):
# we're both in the Similarity family
new_self = Similarity(self.h_matrix)
new_self._compose_after_inplace(t)
elif isinstance(self, Affine) and isinstance(t, Affine):
# we're both in the Affine family
new_self = Affine(self.h_matrix)
new_self._compose_after_inplace(t)
else:
# at least one of us is Homogeneous
new_self = Homogeneous(self.h_matrix)
new_self._compose_after_inplace(t)
return new_self
def _compose_before_inplace(self, transform):
# Compose machinery will guarantee this is only invoked in the right
# circumstances (e.g. the types will match) so we don't need to block
# the setting of the matrix
self._set_h_matrix(
np.dot(transform.h_matrix, self.h_matrix), copy=False, skip_checks=True
)
def _compose_after_inplace(self, transform):
# Compose machinery will guarantee this is only invoked in the right
# circumstances (e.g. the types will match) so we don't need to block
# the setting of the matrix
self._set_h_matrix(
np.dot(self.h_matrix, transform.h_matrix), copy=False, skip_checks=True
)
@property
def has_true_inverse(self):
r"""
The pseudoinverse is an exact inverse.
:type: ``True``
"""
return True
def pseudoinverse(self):
r"""
The pseudoinverse of the transform - that is, the transform that
results from swapping `source` and `target`, or more formally, negating
the transforms parameters. If the transform has a true inverse this
is returned instead.
:type: :class:`Homogeneous`
"""
# Skip the checks as we know inverse of a homogeneous is a homogeneous
return self.__class__(
self._h_matrix_pseudoinverse(), copy=False, skip_checks=True
)
def _h_matrix_pseudoinverse(self):
return np.linalg.inv(self.h_matrix)
from .affine import Affine
from .similarity import Similarity
| bsd-3-clause | 8e8e0a8012ab9e64e76c519abba99ef9 | 32.546638 | 83 | 0.589848 | 4.43759 | false | false | false | false |
menpo/menpo | menpo/landmark/labels/human/face_3d.py | 2 | 3003 | from collections import OrderedDict
import numpy as np
from ..base import labeller_func, validate_input, connectivity_from_array
@labeller_func(group_label="face_bu3dfe_83")
def face_bu3dfe_83_to_face_bu3dfe_83(pcloud):
r"""
Apply the BU-3DFE (Binghamton University 3D Facial Expression)
Database 83-point facial semantic labels.
The semantic labels applied are as follows:
- right_eye
- left_eye
- right_eyebrow
- left_eyebrow
- right_nose
- left_nose
- nostrils
- outer_mouth
- inner_mouth
- jaw
References
----------
.. [1] http://www.cs.binghamton.edu/~lijun/Research/3DFE/3DFE_Analysis.html
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 83
validate_input(pcloud, n_expected_points)
reye_indices = np.arange(0, 8)
leye_indices = np.arange(8, 16)
rbrow_indices = np.arange(16, 26)
lbrow_indices = np.arange(26, 36)
rnose_indices = np.arange(36, 39)
lnose_indices = np.arange(39, 42)
nostril_indices = np.arange(42, 48)
outermouth_indices = np.arange(48, 60)
innermouth_indices = np.arange(60, 68)
jaw_indices = np.arange(68, 83)
reye_connectivity = connectivity_from_array(reye_indices, close_loop=True)
leye_connectivity = connectivity_from_array(leye_indices, close_loop=True)
rbrow_connectivity = connectivity_from_array(rbrow_indices, close_loop=True)
lbrow_connectivity = connectivity_from_array(lbrow_indices, close_loop=True)
rnose_connectivity = connectivity_from_array(rnose_indices)
nostril_connectivity = connectivity_from_array(nostril_indices)
lnose_connectivity = connectivity_from_array(lnose_indices)
outermouth_connectivity = connectivity_from_array(
outermouth_indices, close_loop=True
)
innermouth_connectivity = connectivity_from_array(
innermouth_indices, close_loop=True
)
jaw_connectivity = connectivity_from_array(jaw_indices)
all_connectivity = np.vstack(
[
reye_connectivity,
leye_connectivity,
rbrow_connectivity,
lbrow_connectivity,
rnose_connectivity,
nostril_connectivity,
lnose_connectivity,
outermouth_connectivity,
innermouth_connectivity,
jaw_connectivity,
]
)
mapping = OrderedDict()
mapping["right_eye"] = reye_indices
mapping["left_eye"] = leye_indices
mapping["right_eyebrow"] = rbrow_indices
mapping["left_eyebrow"] = lbrow_indices
mapping["right_nose"] = rnose_indices
mapping["left_nose"] = lnose_indices
mapping["nostrils"] = nostril_indices
mapping["outer_mouth"] = outermouth_indices
mapping["inner_mouth"] = innermouth_indices
mapping["jaw"] = jaw_indices
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points, all_connectivity, mapping
)
return new_pcloud, mapping
| bsd-3-clause | 87512535718446445c12df79e0147f65 | 31.641304 | 80 | 0.668665 | 3.487805 | false | false | false | false |
menpo/menpo | menpo/transform/base/composable.py | 2 | 11232 | from menpo.transform.base import Transform
from functools import reduce
class ComposableTransform(Transform):
r"""
:map:`Transform` subclass that enables native composition, such that the
behavior of multiple :map:`Transform` s is composed together in a natural
way.
"""
@property
def composes_inplace_with(self):
r"""
The :map:`Transform` s that this transform composes inplace with
**natively** (i.e. no :map:`TransformChain` will be produced).
An attempt to compose inplace against any type that is not an instance
of this property on this class will result in an `Exception`.
:type: :map:`Transform` or `tuple` of :map:`Transform` s
"""
raise NotImplementedError()
@property
def composes_with(self):
r"""
The :map:`Transform` s that this transform composes with **natively**
(i.e. no :map:`TransformChain` will be produced).
If native composition is not possible, falls back to producing a
:map:`TransformChain`.
By default, this is the same list as :attr:`composes_inplace_with`.
:type: :map:`Transform` or `tuple` of :map:`Transform` s
"""
return self.composes_inplace_with
def compose_before(self, transform):
r"""
A :map:`Transform` that represents **this** transform composed
**before** the given transform::
c = a.compose_before(b)
c.apply(p) == b.apply(a.apply(p))
``a`` and ``b`` are left unchanged.
An attempt is made to perform native composition, but will fall back
to a :map:`TransformChain` as a last resort. See :attr:`composes_with`
for a description of how the mode of composition is decided.
Parameters
----------
transform : :map:`Transform`
Transform to be applied **after** ``self``
Returns
-------
transform : :map:`Transform` or :map:`TransformChain`
If the composition was native, a single new :map:`Transform` will
be returned. If not, a :map:`TransformChain` is returned instead.
"""
if isinstance(transform, self.composes_with):
return self._compose_before(transform)
else:
# best we can do is a TransformChain, let Transform handle that.
return Transform.compose_before(self, transform)
def compose_after(self, transform):
r"""
A :map:`Transform` that represents **this** transform
composed **after** the given transform::
c = a.compose_after(b)
c.apply(p) == a.apply(b.apply(p))
``a`` and ``b`` are left unchanged.
This corresponds to the usual mathematical formalism for the compose
operator, ``o``.
An attempt is made to perform native composition, but will fall back
to a :map:`TransformChain` as a last resort. See :attr:`composes_with`
for a description of how the mode of composition is decided.
Parameters
----------
transform : :map:`Transform`
Transform to be applied **before** ``self``
Returns
-------
transform : :map:`Transform` or :map:`TransformChain`
If the composition was native, a single new :map:`Transform` will
be returned. If not, a :map:`TransformChain` is returned instead.
"""
if isinstance(transform, self.composes_with):
return self._compose_after(transform)
else:
# best we can do is a TransformChain, let Transform handle that.
return Transform.compose_after(self, transform)
def compose_before_inplace(self, transform):
r"""
Update ``self`` so that it represents **this** transform composed
**before** the given transform::
a_orig = a.copy()
a.compose_before_inplace(b)
a.apply(p) == b.apply(a_orig.apply(p))
``a`` is permanently altered to be the result of the composition.
``b`` is left unchanged.
Parameters
----------
transform : :attr:`composes_inplace_with`
Transform to be applied **after** ``self``
Raises
------
ValueError
If ``transform`` isn't an instance of :attr:`composes_inplace_with`
"""
if isinstance(transform, self.composes_inplace_with):
self._compose_before_inplace(transform)
else:
raise ValueError(
"{} can only compose inplace with {} - not "
"{}".format(type(self), self.composes_inplace_with, type(transform))
)
def compose_after_inplace(self, transform):
r"""
Update ``self`` so that it represents **this** transform composed
**after** the given transform::
a_orig = a.copy()
a.compose_after_inplace(b)
a.apply(p) == a_orig.apply(b.apply(p))
``a`` is permanently altered to be the result of the composition. ``b``
is left unchanged.
Parameters
----------
transform : :attr:`composes_inplace_with`
Transform to be applied **before** ``self``
Raises
------
ValueError
If ``transform`` isn't an instance of :attr:`composes_inplace_with`
"""
if isinstance(transform, self.composes_inplace_with):
self._compose_after_inplace(transform)
else:
raise ValueError(
"{} can only compose inplace with {} - not "
"{}".format(type(self), self.composes_inplace_with, type(transform))
)
def _compose_before(self, transform):
r"""
Naive implementation of composition, ``self.copy()`` and then
:meth:``compose_before_inplace``. Apply this transform **first**.
Parameters
----------
transform : :map:`ComposableTransform`
Transform to be applied **after** ``self``
Returns
-------
transform : :map:`ComposableTransform`
The resulting transform.
"""
# naive approach - copy followed by the inplace operation
self_copy = self.copy()
self_copy._compose_before_inplace(transform)
return self_copy
def _compose_after(self, transform):
r"""
Naive implementation of composition, ``self.copy()`` and then
:meth:``compose_after_inplace``. Apply this transform **second**.
Parameters
----------
transform : :map:`ComposableTransform`
Transform to be applied **before** ``self``
Returns
-------
transform : :map:`ComposableTransform`
The resulting transform.
"""
# naive approach - copy followed by the inplace operation
self_copy = self.copy()
self_copy._compose_after_inplace(transform)
return self_copy
def _compose_before_inplace(self, transform):
r"""
Specialised inplace composition. This should be overridden to provide
specific cases of composition as defined in
:attr:`composes_inplace_with`.
Parameters
----------
transform : :attr:`composes_inplace_with`
Transform to be applied **after** ``self``
"""
raise NotImplementedError()
def _compose_after_inplace(self, transform):
r"""
Specialised inplace composition. This should be overridden to provide
specific cases of composition as defined in
:attr:`composes_inplace_with`.
Parameters
----------
transform : :attr:`composes_inplace_with`
Transform to be applied **before** ``self``
"""
raise NotImplementedError()
class VComposable(object):
r"""
Mix-in for :map:`Vectorizable` :map:`ComposableTransform` s.
Use this mix-in with :map:`ComposableTransform` if the
:map:`ComposableTransform` in question is :map:`Vectorizable` as this adds
:meth:`from_vector` variants to the :map:`ComposableTransform` interface.
These can be tuned for performance.
"""
def compose_after_from_vector_inplace(self, vector):
r"""
Specialised inplace composition with a vector. This should be
overridden to provide specific cases of composition whereby the current
state of the transform can be derived purely from the provided vector.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
Vector to update the transform state with.
"""
raise NotImplementedError()
class TransformChain(ComposableTransform):
r"""
A chain of transforms that can be efficiently applied one after the other.
This class is the natural product of composition. Note that objects may
know how to compose themselves more efficiently - such objects implement
the :map:`ComposableTransform` or :map:`VComposable` interfaces.
Parameters
----------
transforms : `list` of :map:`Transform`
The `list` of transforms to be applied. Note that the first transform
will be applied first - the result of which is fed into the second
transform and so on until the chain is exhausted.
"""
def __init__(self, transforms):
# TODO Should TransformChain copy on input?
self.transforms = transforms
def _apply(self, x, **kwargs):
r"""
Applies each of the transforms to the array ``x``, in order.
Parameters
----------
x : ``(n_points, n_dims)`` `ndarray`
The array to transform.
Returns
-------
transformed : ``(n_points, n_dims_output)`` `ndarray`
Transformed array having passed through the chain of transforms.
"""
return reduce(lambda x_i, tr: tr._apply(x_i), self.transforms, x)
@property
def composes_inplace_with(self):
r"""
The :map:`Transform` s that this transform composes inplace with
**natively** (i.e. no :map:`TransformChain` will be produced).
An attempt to compose inplace against any type that is not an instance
of this property on this class will result in an `Exception`.
:type: :map:`Transform` or `tuple` of :map:`Transform` s
"""
return Transform
def _compose_before_inplace(self, transform):
r"""
Specialised inplace composition. In this case we merely keep a `list`
of :map:`Transform` s to apply in order.
Parameters
----------
transform : :map:`ComposableTransform`
Transform to be applied **after** ``self``
"""
self.transforms.append(transform)
def _compose_after_inplace(self, transform):
r"""
Specialised inplace composition. In this case we merely keep a `list`
of :map:`Transform`s to apply in order.
Parameters
----------
transform : :map:`ComposableTransform`
Transform to be applied **before** ``self``
"""
self.transforms.insert(0, transform)
| bsd-3-clause | 7f1f6fce8b173898788ec101bb273f1b | 33.243902 | 84 | 0.593305 | 4.577017 | false | false | false | false |
menpo/menpo | menpo/transform/homogeneous/rotation.py | 2 | 19578 | # Parts of this code taken from:
#
# Copyright (c) 2006-2015, Christoph Gohlke
# Copyright (c) 2006-2015, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from .base import HomogFamilyAlignment
from .affine import DiscreteAffine
from .similarity import Similarity
def optimal_rotation_matrix(source, target, allow_mirror=False):
r"""
Performs an SVD on the correlation matrix to find an optimal rotation
between `source` and `target`.
Parameters
----------
source: :map:`PointCloud`
The source points to be aligned
target: :map:`PointCloud`
The target points to be aligned
allow_mirror : `bool`, optional
If ``True``, the Kabsch algorithm check is not performed, and mirroring
of the Rotation matrix is permitted.
Returns
-------
rotation : `ndarray`
The optimal square rotation matrix.
"""
correlation = np.dot(target.points.T, source.points)
U, D, Vt = np.linalg.svd(correlation)
R = np.dot(U, Vt)
if not allow_mirror:
# d = sgn(det(V * Ut))
d = np.sign(np.linalg.det(R))
if d < 0:
E = np.eye(U.shape[0])
E[-1, -1] = d
# R = U * E * Vt, E = [[1, 0, 0], [0, 1, 0], [0, 0, d]] for 2D
R = np.dot(U, np.dot(E, Vt))
return R
# TODO build rotations about axis, euler angles etc
# see http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
# for details
class Rotation(DiscreteAffine, Similarity):
r"""
Abstract `n_dims` rotation transform.
Parameters
----------
rotation_matrix : ``(n_dims, n_dims)`` `ndarray`
A valid, square rotation matrix
skip_checks : `bool`, optional
If ``True`` avoid sanity checks on ``rotation_matrix`` for performance.
"""
def __init__(self, rotation_matrix, skip_checks=False):
h_matrix = np.eye(rotation_matrix.shape[0] + 1)
Similarity.__init__(self, h_matrix, copy=False, skip_checks=True)
self.set_rotation_matrix(rotation_matrix, skip_checks=skip_checks)
@classmethod
def init_identity(cls, n_dims):
r"""
Creates an identity transform.
Parameters
----------
n_dims : `int`
The number of dimensions.
Returns
-------
identity : :class:`Rotation`
The identity matrix transform.
"""
return Rotation(np.eye(n_dims))
@classmethod
def init_from_2d_ccw_angle(cls, theta, degrees=True):
r"""
Convenience constructor for 2D CCW rotations about the origin.
Parameters
----------
theta : `float`
The angle of rotation about the origin
degrees : `bool`, optional
If ``True`` theta is interpreted as a degree. If ``False``, theta is
interpreted as radians.
Returns
-------
rotation : :map:`Rotation`
A 2D rotation transform.
"""
if degrees:
theta = np.deg2rad(theta)
return Rotation(
np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]),
skip_checks=True,
)
@classmethod
def init_3d_from_quaternion(cls, q):
r"""
Convenience constructor for 3D rotations based on quaternion parameters.
Parameters
----------
q : ``(4,)`` `ndarray`
The quaternion parameters.
Returns
-------
rotation : :map:`Rotation`
A 3D rotation transform.
"""
r = cls.init_identity(n_dims=3)
return r.from_vector(q)
@classmethod
def init_from_3d_ccw_angle_around_x(cls, theta, degrees=True):
r"""
Convenience constructor for 3D CCW rotations around the x axis
Parameters
----------
theta : `float`
The angle of rotation about the origin
degrees : `bool`, optional
If ``True`` theta is interpreted as a degree. If ``False``, theta is
interpreted as radians.
Returns
-------
rotation : :map:`Rotation`
A 3D rotation transform.
"""
if degrees:
theta = np.deg2rad(theta)
return Rotation(
np.array(
[
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
),
skip_checks=True,
)
@classmethod
def init_from_3d_ccw_angle_around_y(cls, theta, degrees=True):
r"""
Convenience constructor for 3D CCW rotations around the y axis
Parameters
----------
theta : `float`
The angle of rotation about the origin
degrees : `bool`, optional
If ``True`` theta is interpreted as a degree. If ``False``, theta is
interpreted as radians.
Returns
-------
rotation : :map:`Rotation`
A 3D rotation transform.
"""
if degrees:
theta = np.deg2rad(theta)
return Rotation(
np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)],
]
),
skip_checks=True,
)
@classmethod
def init_from_3d_ccw_angle_around_z(cls, theta, degrees=True):
r"""
Convenience constructor for 3D CCW rotations around the z axis
Parameters
----------
theta : `float`
The angle of rotation about the origin
degrees : `bool`, optional
If ``True`` theta is interpreted as a degree. If ``False``, theta is
interpreted as radians.
Returns
-------
rotation : :map:`Rotation`
A 3D rotation transform.
"""
if degrees:
theta = np.deg2rad(theta)
return Rotation(
np.array(
[
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
),
skip_checks=True,
)
@property
def rotation_matrix(self):
r"""
The rotation matrix.
:type: ``(n_dims, n_dims)`` `ndarray`
"""
return self.linear_component
def set_rotation_matrix(self, value, skip_checks=False):
r"""
Sets the rotation matrix.
Parameters
----------
value : ``(n_dims, n_dims)`` `ndarray`
The new rotation matrix.
skip_checks : `bool`, optional
If ``True`` avoid sanity checks on ``value`` for performance.
"""
if not skip_checks:
shape = value.shape
if len(shape) != 2 and shape[0] != shape[1]:
raise ValueError("You need to provide a square rotation matrix")
# The update better be the same size
elif self.n_dims != shape[0]:
raise ValueError(
"Trying to update the rotation " "matrix to a different dimension"
)
# TODO actually check I am a valid rotation
# TODO slightly dodgy here accessing _h_matrix
self._h_matrix[:-1, :-1] = value
def _transform_str(self):
axis, radians_of_rotation = self.axis_and_angle_of_rotation()
if axis is None:
return "NO OP"
degrees_of_rotation = np.rad2deg(radians_of_rotation)
message = "CCW Rotation of {:.1f} degrees " "about {}".format(
degrees_of_rotation, axis
)
return message
def axis_and_angle_of_rotation(self):
r"""
Abstract method for computing the axis and angle of rotation.
Returns
-------
axis : ``(n_dims,)`` `ndarray`
The unit vector representing the axis of rotation
angle_of_rotation : `float`
The angle in radians of the rotation about the axis. The angle is
signed in a right handed sense.
"""
if self.n_dims == 2:
return self._axis_and_angle_of_rotation_2d()
elif self.n_dims == 3:
return self._axis_and_angle_of_rotation_3d()
def _axis_and_angle_of_rotation_2d(self):
r"""
Decomposes this Rotation's rotation matrix into a angular rotation
The rotation is considered in a right handed sense. The axis is, by
definition, `[0, 0, 1]`.
Returns
-------
axis : ``(2,)`` `ndarray`
The vector representing the axis of rotation
angle_of_rotation : `float`
The angle in radians of the rotation about the axis. The angle is
signed in a right handed sense.
"""
axis = np.array([0, 0, 1])
test_vector = np.array([1, 0])
transformed_vector = np.dot(self.rotation_matrix, test_vector)
angle_of_rotation = np.arccos(np.dot(transformed_vector, test_vector))
return axis, angle_of_rotation
def _axis_and_angle_of_rotation_3d(self):
r"""
Decomposes this 3D rotation's rotation matrix into a angular rotation
about an axis. The rotation is considered in a right handed sense.
Returns
-------
axis : ``(3,)`` `ndarray`
A unit vector, the axis about which the rotation takes place
angle_of_rotation : `float`
The angle in radians of the rotation about the `axis`.
The angle is signed in a right handed sense.
References
----------
.. [1] http://en.wikipedia.org/wiki/Rotation_matrix#Determining_the_axis
"""
eval_, evec = np.linalg.eig(self.rotation_matrix)
real_eval_mask = np.isreal(eval_)
real_eval = np.real(eval_[real_eval_mask])
evec_with_real_eval = np.real_if_close(evec[:, real_eval_mask])
error = 1e-7
below_margin = np.abs(real_eval) < (1 + error)
above_margin = (1 - error) < np.abs(real_eval)
re_unit_eval_mask = np.logical_and(below_margin, above_margin)
evec_with_real_unitary_eval = evec_with_real_eval[:, re_unit_eval_mask]
# all the eigenvectors with real unitary eigenvalues are now all
# equally 'valid' if multiple remain that probably means that this
# rotation is actually a no op (i.e. rotate by 360 degrees about any
# axis is an invariant transform) but need to check this. For now,
# just take the first
if evec_with_real_unitary_eval.shape[1] != 1:
# TODO confirm that multiple eigenvalues of 1 means the rotation
# does nothing
return None, None
axis = evec_with_real_unitary_eval[:, 0]
axis /= np.sqrt((axis ** 2).sum()) # normalize to unit vector
# to find the angle of rotation, build a new unit vector perpendicular
# to the axis, and see how it rotates
axis_temp_vector = axis - np.random.rand(axis.size)
perpendicular_vector = np.cross(axis, axis_temp_vector)
perpendicular_vector /= np.sqrt((perpendicular_vector ** 2).sum())
transformed_vector = np.dot(self.rotation_matrix, perpendicular_vector)
angle_of_rotation = np.arccos(np.dot(transformed_vector, perpendicular_vector))
chirality_of_rotation = np.dot(
axis, np.cross(perpendicular_vector, transformed_vector)
)
if chirality_of_rotation < 0:
angle_of_rotation *= -1.0
return axis, angle_of_rotation
@property
def n_parameters(self):
r"""
Number of parameters of Rotation. Only 3D rotations are currently
supported.
Returns
-------
n_parameters : `int`
The transform parameters. Only 3D rotations are currently
supported which are parametrized with quaternions.
Raises
------
DimensionalityError, NotImplementedError
Non-3D Rotations are not yet vectorizable
"""
if self.n_dims == 3:
# Quaternion parameters
return 4
else:
raise NotImplementedError("Non-3D Rotations are not yet " "vectorizable")
def _as_vector(self):
r"""
Return the parameters of the transform as a 1D array. These parameters
are parametrised as quaternions. Only 3D transforms are currently
supported.
Returns
-------
q : ``(4,)`` `ndarray`
The 4 quaternion parameters.
Raises
------
DimensionalityError, NotImplementedError
Non-3D Rotations are not yet vectorizable
"""
if self.n_dims == 3:
m00 = self.h_matrix[0, 0]
m01 = self.h_matrix[0, 1]
m02 = self.h_matrix[0, 2]
m10 = self.h_matrix[1, 0]
m11 = self.h_matrix[1, 1]
m12 = self.h_matrix[1, 2]
m20 = self.h_matrix[2, 0]
m21 = self.h_matrix[2, 1]
m22 = self.h_matrix[2, 2]
# symmetric matrix K
K = np.array(
[
[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],
]
)
K /= 3.0
# Quaternion is eigenvector of K that corresponds to largest
# eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
q = -q
return q
else:
raise NotImplementedError("Non-3D Rotations are not yet " "vectorizable")
def _from_vector_inplace(self, p):
r"""
Returns an instance of the transform from the given parameters
expressed in quaternions. Currently only 3D rotations are supported.
Parameters
----------
p : ``(4,)`` `ndarray`
The array of quaternion parameters.
Returns
-------
transform : :map:`Rotation`
The transform initialised to the given parameters.
Raises
------
DimensionalityError, NotImplementedError
Non-3D Rotations are not yet vectorizable
ValueError
Expected 4 quaternion parameters; got {} instead.
"""
if self.n_dims == 3:
if len(p) == 4:
n = np.dot(p, p)
# epsilon for testing whether a number is close to zero
if n < np.finfo(float).eps * 4.0:
return np.identity(4)
p = p * np.sqrt(2.0 / n)
p = np.outer(p, p)
rotation = np.array(
[
[1.0 - p[2, 2] - p[3, 3], p[1, 2] - p[3, 0], p[1, 3] + p[2, 0]],
[p[1, 2] + p[3, 0], 1.0 - p[1, 1] - p[3, 3], p[2, 3] - p[1, 0]],
[p[1, 3] - p[2, 0], p[2, 3] + p[1, 0], 1.0 - p[1, 1] - p[2, 2]],
]
)
self.set_rotation_matrix(rotation, skip_checks=True)
else:
raise ValueError(
"Expected 4 quaternion parameters; got {} "
"instead.".format(len(p))
)
else:
raise NotImplementedError("Non-3D rotations are not yet " "vectorizable")
@property
def composes_inplace_with(self):
r"""
:class:`Rotation` can swallow composition with any other
:class:`Rotation`.
"""
return Rotation
def pseudoinverse(self):
r"""
The inverse rotation matrix.
:type: :class:`Rotation`
"""
return Rotation(np.linalg.inv(self.rotation_matrix), skip_checks=True)
class AlignmentRotation(HomogFamilyAlignment, Rotation):
r"""
Constructs an :class:`Rotation` by finding the optimal rotation transform to
align `source` to `target`.
Parameters
----------
source : :map:`PointCloud`
The source pointcloud instance used in the alignment
target : :map:`PointCloud`
The target pointcloud instance used in the alignment
allow_mirror : `bool`, optional
If ``True``, the Kabsch algorithm check is not performed, and mirroring
of the Rotation matrix is permitted.
"""
def __init__(self, source, target, allow_mirror=False):
HomogFamilyAlignment.__init__(self, source, target)
Rotation.__init__(
self, optimal_rotation_matrix(source, target, allow_mirror=allow_mirror)
)
self.allow_mirror = allow_mirror
def set_rotation_matrix(self, value, skip_checks=False):
r"""
Sets the rotation matrix.
Parameters
----------
value : ``(n_dims, n_dims)`` `ndarray`
The new rotation matrix.
skip_checks : `bool`, optional
If ``True`` avoid sanity checks on ``value`` for performance.
"""
Rotation.set_rotation_matrix(self, value, skip_checks=skip_checks)
self._sync_target_from_state()
def _sync_state_from_target(self):
r = optimal_rotation_matrix(
self.source, self.target, allow_mirror=self.allow_mirror
)
Rotation.set_rotation_matrix(self, r, skip_checks=True)
def as_non_alignment(self):
r"""
Returns a copy of this rotation without its alignment nature.
Returns
-------
transform : :map:`Rotation`
A version of this rotation with the same transform behavior but
without the alignment logic.
"""
return Rotation(self.rotation_matrix, skip_checks=True)
| bsd-3-clause | 91b8037e9bb11e9887acb671171d6692 | 33.712766 | 88 | 0.558944 | 4.119949 | false | false | false | false |
menpo/menpo | menpo/transform/base/__init__.py | 2 | 9702 | import warnings
import numpy as np
from menpo.base import Copyable, MenpoDeprecationWarning
class Transform(Copyable):
r"""
Abstract representation of any spatial transform.
Provides a unified interface to apply the transform with
:meth:`apply_inplace` and :meth:`apply`.
All Transforms support basic composition to form a :map:`TransformChain`.
There are two useful forms of composition. Firstly, the mathematical
composition symbol `o` has the following definition::
Let a(x) and b(x) be two transforms on x.
(a o b)(x) == a(b(x))
This functionality is provided by the :meth:`compose_after` family of
methods: ::
(a.compose_after(b)).apply(x) == a.apply(b.apply(x))
Equally useful is an inversion the order of composition - so that over
time a large chain of transforms can be built to do a useful job, and
composing on this chain adds another transform to the end (after all other
preceding transforms have been performed).
For instance, let's say we want to rescale a :map:`PointCloud` ``p`` around
its mean, and then translate it some place else. It would be nice to be able
to do something like::
t = Translation(-p.centre) # translate to centre
s = Scale(2.0) # rescale
move = Translate([10, 0 ,0]) # budge along the x axis
t.compose(s).compose(-t).compose(move)
In Menpo, this functionality is provided by the :meth:`compose_before()`
family of methods::
(a.compose_before(b)).apply(x) == b.apply(a.apply(x))
For native composition, see the :map:`ComposableTransform` subclass and
the :map:`VComposable` mix-in.
For inversion, see the :map:`Invertible` and :map:`VInvertible` mix-ins.
For alignment, see the :map:`Alignment` mix-in.
"""
@property
def n_dims(self):
r"""
The dimensionality of the data the transform operates on.
``None`` if the transform is not dimension specific.
:type: `int` or ``None``
"""
return None
@property
def n_dims_output(self):
r"""
The output of the data from the transform.
``None`` if the output of the transform is not dimension specific.
:type: `int` or ``None``
"""
# most Transforms don't change the dimensionality of their input.
return self.n_dims
def _apply(self, x, **kwargs):
r"""
Applies the transform to the array ``x``, returning the result.
This method does the actual work of transforming the data, and is the
one that subclasses must implement. :meth:`apply` and
:meth:`apply_inplace` both call this method to do that actual work.
Parameters
----------
x : ``(n_points, n_dims)`` `ndarray`
The array to be transformed.
kwargs : `dict`
Subclasses may need these in their ``_apply`` methods.
Returns
-------
transformed : ``(n_points, n_dims_output)`` `ndarray`
The transformed array
"""
raise NotImplementedError()
def apply_inplace(self, *args, **kwargs):
r"""
Deprecated as public supported API, use the non-mutating `apply()`
instead.
For internal performance-specific uses, see `_apply_inplace()`.
"""
warnings.warn(
"the public API for inplace operations is deprecated "
"and will be removed in a future version of Menpo. "
"Use .apply() instead.",
MenpoDeprecationWarning,
)
return self._apply_inplace(*args, **kwargs)
def _apply_inplace(self, x, **kwargs):
r"""
Applies this transform to a :map:`Transformable` ``x`` destructively.
Any ``kwargs`` will be passed to the specific transform :meth:`_apply`
method.
Note that this is an inplace operation that should be used sparingly,
by internal API's where creating a copy of the transformed object is
expensive. It does not return anything, as the operation is inplace.
Parameters
----------
x : :map:`Transformable`
The :map:`Transformable` object to be transformed.
kwargs : `dict`
Passed through to :meth:`_apply`.
"""
def transform(x_):
"""
Local closure which calls the :meth:`_apply` method with the
`kwargs` attached.
"""
return self._apply(x_, **kwargs)
try:
x._transform_inplace(transform)
except AttributeError:
raise ValueError(
"apply_inplace can only be used on Transformable" " objects."
)
def apply(self, x, batch_size=None, **kwargs):
r"""
Applies this transform to ``x``.
If ``x`` is :map:`Transformable`, ``x`` will be handed this transform
object to transform itself non-destructively (a transformed copy of the
object will be returned).
If not, ``x`` is assumed to be an `ndarray`. The transformation will be
non-destructive, returning the transformed version.
Any ``kwargs`` will be passed to the specific transform :meth:`_apply`
method.
Parameters
----------
x : :map:`Transformable` or ``(n_points, n_dims)`` `ndarray`
The array or object to be transformed.
batch_size : `int`, optional
If not ``None``, this determines how many items from the numpy
array will be passed through the transform at a time. This is
useful for operations that require large intermediate matrices
to be computed.
kwargs : `dict`
Passed through to :meth:`_apply`.
Returns
-------
transformed : ``type(x)``
The transformed object or array
"""
def transform(x_):
"""
Local closure which calls the :meth:`_apply` method with the
`kwargs` attached.
"""
return self._apply_batched(x_, batch_size, **kwargs)
try:
return x._transform(transform)
except AttributeError:
return self._apply_batched(x, batch_size, **kwargs)
def _apply_batched(self, x, batch_size, **kwargs):
if batch_size is None:
return self._apply(x, **kwargs)
else:
outputs = []
n_points = x.shape[0]
for lo_ind in range(0, n_points, batch_size):
hi_ind = lo_ind + batch_size
outputs.append(self._apply(x[lo_ind:hi_ind], **kwargs))
return np.vstack(outputs)
def compose_before(self, transform):
r"""
Returns a :map:`TransformChain` that represents **this** transform
composed **before** the given transform::
c = a.compose_before(b)
c.apply(p) == b.apply(a.apply(p))
``a`` and ``b`` are left unchanged.
Parameters
----------
transform : :map:`Transform`
Transform to be applied **after** self
Returns
-------
transform : :map:`TransformChain`
The resulting transform chain.
"""
return TransformChain([self, transform])
def compose_after(self, transform):
r"""
Returns a :map:`TransformChain` that represents **this** transform
composed **after** the given transform::
c = a.compose_after(b)
c.apply(p) == a.apply(b.apply(p))
``a`` and ``b`` are left unchanged.
This corresponds to the usual mathematical formalism for the compose
operator, `o`.
Parameters
----------
transform : :map:`Transform`
Transform to be applied **before** self
Returns
-------
transform : :map:`TransformChain`
The resulting transform chain.
"""
return TransformChain([transform, self])
class Transformable(Copyable):
r"""
Interface for objects that know how to be transformed by the
:map:`Transform` interface.
When ``Transform.apply_inplace`` is called on an object, the
:meth:`_transform_inplace` method is called, passing in the transforms'
:meth:`_apply` function.
This allows for the object to define how it should transform itself.
"""
def _transform_inplace(self, transform):
r"""
Apply the given transform function to ``self`` inplace.
Parameters
----------
transform : `function`
Function that applies a transformation to the transformable object.
Returns
-------
transformed : ``type(self)``
The transformed object, having been transformed in place.
"""
raise NotImplementedError()
def _transform(self, transform):
r"""
Apply the :map:`Transform` given in a non destructive manner -
returning the transformed object and leaving this object as it was.
Parameters
----------
transform : `function`
Function that applies a transformation to the transformable object.
Returns
-------
transformed : ``type(self)``
A copy of the object, transformed.
"""
copy_of_self = self.copy()
# transform the copy destructively
copy_of_self._transform_inplace(transform)
return copy_of_self
from .alignment import Alignment
from .composable import TransformChain, ComposableTransform, VComposable
from .invertible import Invertible, VInvertible
| bsd-3-clause | 0abd1be4f921a6c340a218afc2570f48 | 31.125828 | 80 | 0.589569 | 4.50627 | false | false | false | false |
menpo/menpo | menpo/image/interpolation.py | 2 | 5037 | import numpy as np
from menpo.transform import Homogeneous
# Store out a transform that simply switches the x and y axis
xy_yx = Homogeneous(np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]))
def scipy_interpolation(pixels, points_to_sample, mode="constant", order=1, cval=0.0):
r"""
Interpolation utilizing scipy map_coordinates function.
Parameters
----------
pixels : ``(n_channels, M, N, ...)`` `ndarray`
The image to be sampled from, the first axis containing channel
information
points_to_sample : ``(n_points, n_dims)`` `ndarray`
The points which should be sampled from pixels
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according to the
given mode
order : `int,` optional
The order of the spline interpolation. The order has to be in the
range [0, 5].
cval : `float`, optional
The value that should be used for points that are sampled from
outside the image bounds if mode is ``constant``.
Returns
-------
sampled_image : `ndarray`
The pixel information sampled at each of the points.
"""
from scipy.ndimage import map_coordinates # expensive
sampled_pixel_values = np.empty(
(pixels.shape[0], points_to_sample.shape[0]), dtype=pixels.dtype
)
# Loop over every channel in image - we know first axis is always channels
# Note that map_coordinates uses the opposite (dims, points) convention
# to us so we transpose
points_to_sample_t = points_to_sample.T
for i in range(pixels.shape[0]):
map_coordinates(
pixels[i],
points_to_sample_t,
mode=mode,
order=order,
cval=cval,
output=sampled_pixel_values[i],
)
return sampled_pixel_values
try:
import cv2
def _mode_to_opencv(mode):
if mode == "nearest":
return cv2.BORDER_REPLICATE
elif mode == "constant":
return cv2.BORDER_CONSTANT
else:
raise ValueError(
'Unknown mode "{}", must be one of ' "(nearest, constant)".format(mode)
)
def _order_to_opencv(order):
if order == 0:
return cv2.INTER_NEAREST
elif order == 1:
return cv2.INTER_LINEAR
else:
raise ValueError(
'Unsupported order "{}", ' "must be one of (0, 1)".format(order)
)
def cv2_perspective_interpolation(
pixels, template_shape, h_transform, mode="constant", order=1, cval=0.0
):
r"""
Interpolation utilizing OpenCV fast perspective warping. This method
assumes that the warp takes the form of a homogeneous transform, and
thus is much faster for operations such as scaling.
Note less modes and orders are supported than the more generic
interpolation methods.
Parameters
----------
pixels : ``(n_channels, M, N, ...)`` `ndarray`
The image to be sampled from, the first axis containing channel
information.
template_shape : `tuple`
The shape of the new image that will be sampled
mode : ``{constant, nearest}``, optional
Points outside the boundaries of the input are filled according to the
given mode.
order : int, optional
The order of the spline interpolation. The order has to be in the
range [0, 1].
cval : `float`, optional
The value that should be used for points that are sampled from
outside the image bounds if mode is 'constant'
Returns
-------
sampled_image : `ndarray`
The pixel information sampled at each of the points.
"""
matrix = xy_yx.compose_before(h_transform).compose_before(xy_yx).h_matrix
# Ensure template shape is a tuple (as required by OpenCV)
template_shape = tuple(template_shape)
cv_template_shape = template_shape[::-1] # Flip to (W, H)
# Unfortunately, OpenCV does not seem to support the boolean numpy type
if pixels.dtype == bool:
in_pixels = pixels.astype(np.uint8)
else:
in_pixels = pixels
warped_image = np.empty(
(pixels.shape[0],) + template_shape, dtype=in_pixels.dtype
)
for i in range(pixels.shape[0]):
cv2.warpPerspective(
in_pixels[i],
matrix,
cv_template_shape,
dst=warped_image[i],
flags=_order_to_opencv(order) + cv2.WARP_INVERSE_MAP,
borderMode=_mode_to_opencv(mode),
borderValue=cval,
)
# As above, we may need to convert the uint8 back to bool
if pixels.dtype == bool:
warped_image = warped_image.astype(bool)
return warped_image
except ImportError:
pass
| bsd-3-clause | 6bf8dccab3cd617e6a51e5dbca784594 | 33.979167 | 87 | 0.588048 | 4.215063 | false | false | false | false |
menpo/menpo | menpo/image/base.py | 2 | 131364 | from typing import Iterable, Optional
from warnings import warn
import PIL.Image as PILImage
import numpy as np
from menpo.base import MenpoDeprecationWarning, Vectorizable, copy_landmarks_and_path
from menpo.landmark import Landmarkable
from menpo.shape import PointCloud, bounding_box
from menpo.transform import (
AlignmentUniformScale,
Homogeneous,
NonUniformScale,
Rotation,
Translation,
scale_about_centre,
transform_about_centre,
)
from menpo.visualize.base import ImageViewer, LandmarkableViewable, Viewable
from .interpolation import scipy_interpolation
try:
from .interpolation import cv2_perspective_interpolation
except ImportError:
warn("Falling back to scipy interpolation for affine warps")
cv2_perspective_interpolation = None # type: ignore
from .patches import (
extract_patches_with_slice,
set_patches,
extract_patches_by_sampling,
)
# Cache the greyscale luminosity coefficients as they are invariant.
_greyscale_luminosity_coef: Optional[np.ndarray] = None
class ImageBoundaryError(ValueError):
r"""
Exception that is thrown when an attempt is made to crop an image beyond
the edge of it's boundary.
Parameters
----------
requested_min : ``(d,)`` `ndarray`
The per-dimension minimum index requested for the crop
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index requested for the crop
snapped_min : ``(d,)`` `ndarray`
The per-dimension minimum index that could be used if the crop was
constrained to the image boundaries.
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index that could be used if the crop was
constrained to the image boundaries.
"""
def __init__(self, requested_min, requested_max, snapped_min, snapped_max):
super(ImageBoundaryError, self).__init__()
self.requested_min = requested_min
self.requested_max = requested_max
self.snapped_min = snapped_min
self.snapped_max = snapped_max
def indices_for_image_of_shape(shape):
r"""
The indices of all pixels in an image with a given shape (without
channel information).
Parameters
----------
shape : ``(n_dims, n_pixels)`` `ndarray`
The shape of the image.
Returns
-------
indices : `ndarray`
The indices of all the pixels in the image.
"""
return np.indices(shape).reshape([len(shape), -1]).T
def normalize_pixels_range(pixels, error_on_unknown_type=True):
r"""
Normalize the given pixels to the Menpo valid floating point range, [0, 1].
This is a single place to handle normalising pixels ranges. At the moment
the supported types are uint8 and uint16.
Parameters
----------
pixels : `ndarray`
The pixels to normalize in the floating point range.
error_on_unknown_type : `bool`, optional
If ``True``, this method throws a ``ValueError`` if the given pixels
array is an unknown type. If ``False``, this method performs no
operation.
Returns
-------
normalized_pixels : `ndarray`
The normalized pixels in the range [0, 1].
Raises
------
ValueError
If ``pixels`` is an unknown type and ``error_on_unknown_type==True``
"""
dtype = pixels.dtype
if dtype == np.uint8:
max_range = 255.0
elif dtype == np.uint16:
max_range = 65535.0
else:
if error_on_unknown_type:
raise ValueError(
"Unexpected dtype ({}) - normalisation range "
"is unknown".format(dtype)
)
else:
# Do nothing
return pixels
# This multiplication is quite a bit faster than just dividing - will
# automatically cast it up to float64
return pixels * (1.0 / max_range)
def denormalize_pixels_range(pixels, out_dtype):
"""
Denormalize the given pixels array into the range of the given out dtype.
If the given pixels are floating point or boolean then the values
are scaled appropriately and cast to the output dtype. If the pixels
are already the correct dtype they are immediately returned.
Floating point pixels must be in the range [0, 1].
Currently uint8 and uint16 output dtypes are supported.
Parameters
----------
pixels : `ndarray`
The pixels to denormalize.
out_dtype : `np.dtype`
The numpy data type to output and scale the values into.
Returns
-------
out_pixels : `ndarray`
Will be in the correct range and will have type ``out_dtype``.
Raises
------
ValueError
Pixels are floating point and range outside [0, 1]
ValueError
Input pixels dtype not in the set {float32, float64, bool}.
ValueError
Output dtype not in the set {uint8, uint16}
"""
in_dtype = pixels.dtype
if in_dtype == out_dtype:
return pixels
if np.issubclass_(in_dtype.type, np.floating) or in_dtype == float:
if np.issubclass_(out_dtype, np.floating) or out_dtype == float:
return pixels.astype(out_dtype)
else:
p_min = pixels.min()
p_max = pixels.max()
if p_min < 0.0 or p_max > 1.0:
raise ValueError(
"Unexpected input range [{}, {}] - pixels "
"must be in the range [0, 1]".format(p_min, p_max)
)
elif in_dtype != bool:
raise ValueError(
"Unexpected input dtype ({}) - only float32, float64 "
"and bool supported".format(in_dtype)
)
if out_dtype == np.uint8:
max_range = 255.0
elif out_dtype == np.uint16:
max_range = 65535.0
else:
raise ValueError(
"Unexpected output dtype ({}) - normalisation range "
"is unknown".format(out_dtype)
)
return (pixels * max_range).astype(out_dtype)
def channels_to_back(pixels):
r"""
Roll the channels from the front to the back for an image. If the image
that is passed is already a numpy array, then that is also fine.
Always returns a numpy array because our :map:`Image` containers do not
support channels at the back.
Parameters
----------
image : `ndarray`
The pixels or image to roll the channel back for.
Returns
-------
rolled_pixels : `ndarray`
The numpy array of pixels with the channels on the last axis.
"""
return np.require(
np.rollaxis(pixels, 0, pixels.ndim), dtype=pixels.dtype, requirements=["C"]
)
def channels_to_front(pixels):
r"""
Convert the given pixels array (channels assumed to be at the last axis
as is common in other imaging packages) into a numpy array.
Parameters
----------
pixels : ``(H, W, C)`` `buffer`
The pixels to convert to the Menpo channels at axis 0.
Returns
-------
pixels : ``(C, H, W)`` `ndarray`
Numpy array, channels as axis 0.
"""
if not isinstance(pixels, np.ndarray):
pixels = np.array(pixels)
return np.require(np.rollaxis(pixels, -1), dtype=pixels.dtype, requirements=["C"])
class Image(Vectorizable, Landmarkable, Viewable, LandmarkableViewable):
r"""
An n-dimensional image.
Images are n-dimensional homogeneous regular arrays of data. Each
spatially distinct location in the array is referred to as a `pixel`.
At a pixel, ``k`` distinct pieces of information can be stored. Each
datum at a pixel is refereed to as being in a `channel`. All pixels in
the image have the same number of channels, and all channels have the
same data-type (`float64`).
Parameters
----------
image_data : ``(C, M, N ..., Q)`` `ndarray`
Array representing the image pixels, with the first axis being
channels.
copy : `bool`, optional
If ``False``, the ``image_data`` will not be copied on assignment.
Note that this will miss out on additional checks. Further note that we
still demand that the array is C-contiguous - if it isn't, a copy will
be generated anyway.
In general, this should only be used if you know what you are doing.
Raises
------
Warning
If ``copy=False`` cannot be honoured
ValueError
If the pixel array is malformed
"""
def __init__(self, image_data, copy=True):
super(Image, self).__init__()
if not copy:
if not image_data.flags.c_contiguous:
image_data = np.array(image_data, copy=True, order="C")
warn(
"The copy flag was NOT honoured. A copy HAS been made. "
"Please ensure the data you pass is C-contiguous."
)
else:
image_data = np.array(image_data, copy=True, order="C")
# Degenerate case whereby we can just put the extra axis
# on ourselves
if image_data.ndim == 2:
# Ensures that the data STAYS C-contiguous
image_data = image_data.reshape((1,) + image_data.shape)
if image_data.ndim < 2:
raise ValueError(
"Pixel array has to be 2D (implicitly 1 channel, "
"2D shape) or 3D+ (n_channels, 2D+ shape) "
" - a {}D array "
"was provided".format(image_data.ndim)
)
self.pixels = image_data
@classmethod
def init_blank(cls, shape, n_channels=1, fill=0, dtype=float):
r"""
Returns a blank image.
Parameters
----------
shape : `tuple` or `list`
The shape of the image. Any floating point values are rounded up
to the nearest integer.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
Returns
-------
blank_image : :map:`Image`
A new image of the requested size.
"""
# Ensure that the '+' operator means concatenate tuples
shape = tuple(np.ceil(shape).astype(int))
if fill == 0:
pixels = np.zeros((n_channels,) + shape, dtype=dtype)
else:
pixels = np.ones((n_channels,) + shape, dtype=dtype) * fill
# We know there is no need to copy...
return cls(pixels, copy=False)
@classmethod
def init_from_rolled_channels(cls, pixels):
r"""
Deprecated - please use the equivalent ``init_from_channels_at_back`` method.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .init_from_channels_at_back instead.",
MenpoDeprecationWarning,
)
return cls.init_from_channels_at_back(pixels)
@classmethod
def init_from_channels_at_back(cls, pixels):
r"""
Create an Image from a set of pixels where the channels axis is on
the last axis (the back). This is common in other frameworks, and
therefore this method provides a convenient means of creating a menpo
Image from such data. Note that a copy is always created due to the
need to rearrange the data.
Parameters
----------
pixels : ``(M, N ..., Q, C)`` `ndarray`
Array representing the image pixels, with the last axis being
channels.
Returns
-------
image : :map:`Image`
A new image from the given pixels, with the FIRST axis as the
channels.
Raises
------
ValueError
If image is not at least 2D, i.e. has at least 2 dimensions plus
the channels in the end.
"""
if pixels.ndim == 2:
pixels = pixels[..., None]
if pixels.ndim < 2:
raise ValueError(
"Pixel array has to be 2D "
"(2D shape, implicitly 1 channel) "
"or 3D+ (2D+ shape, n_channels) "
" - a {}D array "
"was provided".format(pixels.ndim)
)
return cls(channels_to_front(pixels))
@classmethod
def init_from_pointcloud(
cls,
pointcloud,
group=None,
boundary=0,
n_channels=1,
fill=0,
dtype=float,
return_transform=False,
):
r"""
Create an Image that is big enough to contain the given pointcloud.
The pointcloud will be translated to the origin and then translated
according to its bounds in order to fit inside the new image.
An optional boundary can be provided in order to increase the space
around the boundary of the pointcloud. The boundary will be added
to *all sides of the image* and so a boundary of 5 provides 10 pixels
of boundary total for each dimension.
Parameters
----------
pointcloud : :map:`PointCloud`
Pointcloud to place inside the newly created image.
group : `str`, optional
If ``None``, the pointcloud will only be used to create the image.
If a `str` then the pointcloud will be attached as a landmark
group to the image, with the given string as key.
boundary : `float`
A optional padding distance that is added to the pointcloud bounds.
Default is ``0``, meaning the max/min of tightest possible
containing image is returned.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
adjust the PointCloud in order to build the image, is returned.
Returns
-------
image : ``type(cls)`` Image or subclass
A new image with the same size as the given pointcloud, optionally
with the pointcloud attached as landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
# Translate pointcloud to the origin
minimum = pointcloud.bounds(boundary=boundary)[0]
tr = Translation(-minimum)
origin_pc = tr.apply(pointcloud)
image_shape = origin_pc.range(boundary=boundary)
new_image = cls.init_blank(
image_shape, n_channels=n_channels, fill=fill, dtype=dtype
)
if group is not None:
new_image.landmarks[group] = origin_pc
if return_transform:
return new_image, tr
else:
return new_image
def as_masked(self, mask=None, copy=True):
r"""
Return a copy of this image with an attached mask behavior.
A custom mask may be provided, or ``None``. See the :map:`MaskedImage`
constructor for details of how the kwargs will be handled.
Parameters
----------
mask : ``(self.shape)`` `ndarray` or :map:`BooleanImage`
A mask to attach to the newly generated masked image.
copy : `bool`, optional
If ``False``, the produced :map:`MaskedImage` will share pixels with
``self``. Only suggested to be used for performance.
Returns
-------
masked_image : :map:`MaskedImage`
An image with the same pixels and landmarks as this one, but with
a mask.
"""
from menpo.image import MaskedImage
return copy_landmarks_and_path(
self, MaskedImage(self.pixels, mask=mask, copy=copy)
)
@property
def n_dims(self):
r"""
The number of dimensions in the image. The minimum possible ``n_dims``
is 2.
:type: `int`
"""
return len(self.shape)
@property
def n_pixels(self):
r"""
Total number of pixels in the image ``(prod(shape),)``
:type: `int`
"""
return self.pixels[0, ...].size
@property
def n_elements(self):
r"""
Total number of data points in the image
``(prod(shape), n_channels)``
:type: `int`
"""
return self.pixels.size
@property
def n_channels(self):
"""
The number of channels on each pixel in the image.
:type: `int`
"""
return self.pixels.shape[0]
@property
def width(self):
r"""
The width of the image.
This is the width according to image semantics, and is thus the size
of the **last** dimension.
:type: `int`
"""
return self.pixels.shape[-1]
@property
def height(self):
r"""
The height of the image.
This is the height according to image semantics, and is thus the size
of the **second to last** dimension.
:type: `int`
"""
return self.pixels.shape[-2]
@property
def shape(self):
r"""
The shape of the image
(with ``n_channel`` values at each point).
:type: `tuple`
"""
return self.pixels.shape[1:]
def bounds(self):
r"""
The bounds of the image, minimum is always (0, 0). The maximum is
the maximum **index** that can be used to index into the image for each
dimension. Therefore, bounds will be of the form:
((0, 0), (self.height - 1, self.width - 1)) for a 2D image.
Note that this is akin to supporting a nearest neighbour interpolation.
Although the *actual* maximum subpixel value would be something
like ``self.height - eps`` where ``eps`` is some value arbitrarily
close to 0, this value at least allows sampling without worrying about
floating point error.
:type: `tuple`
"""
return (0,) * self.n_dims, tuple(s - 1 for s in self.shape)
def diagonal(self):
r"""
The diagonal size of this image
:type: `float`
"""
return np.sqrt(np.sum(np.array(self.shape) ** 2))
def centre(self):
r"""
The geometric centre of the Image - the subpixel that is in the
middle.
Useful for aligning shapes and images.
:type: (``n_dims``,) `ndarray`
"""
return np.array(self.shape, dtype=np.double) / 2
def _str_shape(self):
if self.n_dims > 2:
return " x ".join(str(dim) for dim in self.shape)
elif self.n_dims == 2:
return "{}W x {}H".format(self.width, self.height)
def indices(self):
r"""
Return the indices of all pixels in this image.
:type: (``n_dims``, ``n_pixels``) ndarray
"""
return indices_for_image_of_shape(self.shape)
def _as_vector(self, keep_channels=False):
r"""
The vectorized form of this image.
Parameters
----------
keep_channels : `bool`, optional
========== =============================
Value Return shape
========== =============================
`False` ``(n_channels * n_pixels,)``
`True` ``(n_channels, n_pixels)``
========== =============================
Returns
-------
vec : (See ``keep_channels`` above) `ndarray`
Flattened representation of this image, containing all pixel
and channel information.
"""
if keep_channels:
return self.pixels.reshape([self.n_channels, -1])
else:
return self.pixels.ravel()
def from_vector(self, vector, n_channels=None, copy=True):
r"""
Takes a flattened vector and returns a new image formed by reshaping
the vector to the correct pixels and channels.
The `n_channels` argument is useful for when we want to add an extra
channel to an image but maintain the shape. For example, when
calculating the gradient.
Note that landmarks are transferred in the process.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
A flattened vector of all pixels and channels of an image.
n_channels : `int`, optional
If given, will assume that vector is the same shape as this image,
but with a possibly different number of channels.
copy : `bool`, optional
If ``False``, the vector will not be copied in creating the new
image.
Returns
-------
image : :map:`Image`
New image of same shape as this image and the number of
specified channels.
Raises
------
Warning
If the ``copy=False`` flag cannot be honored
"""
# This is useful for when we want to add an extra channel to an image
# but maintain the shape. For example, when calculating the gradient
n_channels = self.n_channels if n_channels is None else n_channels
image_data = vector.reshape((n_channels,) + self.shape)
new_image = Image(image_data, copy=copy)
new_image.landmarks = self.landmarks
return new_image
def _from_vector_inplace(self, vector, copy=True):
r"""
Takes a flattened vector and update this image by
reshaping the vector to the correct dimensions.
Parameters
----------
vector : ``(n_pixels,)`` `bool ndarray`
A vector vector of all the pixels of a :map:`BooleanImage`.
copy: `bool`, optional
If ``False``, the vector will be set as the pixels. If ``True``, a
copy of the vector is taken.
Raises
------
Warning
If ``copy=False`` flag cannot be honored
Note
----
For :map:`BooleanImage` this is rebuilding a boolean image **itself**
from boolean values. The mask is in no way interpreted in performing
the operation, in contrast to :map:`MaskedImage`, where only the masked
region is used in :meth:`from_vector_inplace` and :meth:`as_vector`.
"""
image_data = vector.reshape(self.pixels.shape)
if not copy:
if not image_data.flags.c_contiguous:
warn(
"The copy flag was NOT honoured. A copy HAS been made. "
"Please ensure the data you pass is C-contiguous."
)
image_data = np.array(
image_data, copy=True, order="C", dtype=image_data.dtype
)
else:
image_data = np.array(
image_data, copy=True, order="C", dtype=image_data.dtype
)
self.pixels = image_data
def extract_channels(self, channels):
r"""
A copy of this image with only the specified channels.
Parameters
----------
channels : `int` or `[int]`
The channel index or `list` of channel indices to retain.
Returns
-------
image : `type(self)`
A copy of this image with only the channels requested.
"""
copy = self.copy()
if not isinstance(channels, list):
channels = [channels] # ensure we don't remove the channel axis
copy.pixels = self.pixels[channels]
return copy
def as_histogram(self, keep_channels=True, bins="unique"):
r"""
Histogram binning of the values of this image.
Parameters
----------
keep_channels : `bool`, optional
If set to ``False``, it returns a single histogram for all the
channels of the image. If set to ``True``, it returns a `list` of
histograms, one for each channel.
bins : ``{unique}``, positive `int` or sequence of scalars, optional
If set equal to ``'unique'``, the bins of the histograms are centred
on the unique values of each channel. If set equal to a positive
`int`, then this is the number of bins. If set equal to a
sequence of scalars, these will be used as bins centres.
Returns
-------
hist : `ndarray` or `list` with ``n_channels`` `ndarrays` inside
The histogram(s). If ``keep_channels=False``, then hist is an
`ndarray`. If ``keep_channels=True``, then hist is a `list` with
``len(hist)=n_channels``.
bin_edges : `ndarray` or `list` with `n_channels` `ndarrays` inside
An array or a list of arrays corresponding to the above histograms
that store the bins' edges.
Raises
------
ValueError
Bins can be either 'unique', positive int or a sequence of scalars.
Examples
--------
Visualizing the histogram when a list of array bin edges is provided:
>>> hist, bin_edges = image.as_histogram()
>>> for k in range(len(hist)):
>>> plt.subplot(1,len(hist),k)
>>> width = 0.7 * (bin_edges[k][1] - bin_edges[k][0])
>>> centre = (bin_edges[k][:-1] + bin_edges[k][1:]) / 2
>>> plt.bar(centre, hist[k], align='center', width=width)
"""
# parse options
if isinstance(bins, str):
if bins == "unique":
bins = 0
else:
raise ValueError(
"Bins can be either 'unique', positive int or"
"a sequence of scalars."
)
elif isinstance(bins, int) and bins < 1:
raise ValueError(
"Bins can be either 'unique', positive int or a " "sequence of scalars."
)
# compute histogram
vec = self.as_vector(keep_channels=keep_channels)
if len(vec.shape) == 1 or vec.shape[0] == 1:
if bins == 0:
bins = np.unique(vec)
hist, bin_edges = np.histogram(vec, bins=bins)
else:
hist = []
bin_edges = []
num_bins = bins
for ch in range(vec.shape[0]):
if bins == 0:
num_bins = np.unique(vec[ch, :])
h_tmp, c_tmp = np.histogram(vec[ch, :], bins=num_bins)
hist.append(h_tmp)
bin_edges.append(c_tmp)
return hist, bin_edges
def _view_2d(
self,
figure_id=None,
new_figure=False,
channels=None,
interpolation="bilinear",
cmap_name=None,
alpha=1.0,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
r"""
View the image using the default image viewer. This method will appear
on the Image as ``view`` if the Image is 2D.
Returns
-------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
return ImageViewer(
figure_id, new_figure, self.n_dims, self.pixels, channels=channels
).render(
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
def _view_landmarks_2d(
self,
channels=None,
group=None,
with_labels=None,
without_labels=None,
figure_id=None,
new_figure=False,
interpolation="bilinear",
cmap_name=None,
alpha=1.0,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour=None,
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_legend=False,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
"""
Visualize the landmarks. This method will appear on the Image as
``view_landmarks`` if the Image is 2D.
Parameters
----------
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
group : `str` or``None`` optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : ``{normal, italic, oblique}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See Below, optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Raises
------
ValueError
If both ``with_labels`` and ``without_labels`` are passed.
ValueError
If the landmark manager doesn't contain the provided group label.
"""
from menpo.visualize import view_image_landmarks
return view_image_landmarks(
self,
channels,
False,
group,
with_labels,
without_labels,
figure_id,
new_figure,
interpolation,
cmap_name,
alpha,
render_lines,
line_colour,
line_style,
line_width,
render_markers,
marker_style,
marker_size,
marker_face_colour,
marker_edge_colour,
marker_edge_width,
render_numbering,
numbers_horizontal_align,
numbers_vertical_align,
numbers_font_name,
numbers_font_size,
numbers_font_style,
numbers_font_weight,
numbers_font_colour,
render_legend,
legend_title,
legend_font_name,
legend_font_style,
legend_font_size,
legend_font_weight,
legend_marker_scale,
legend_location,
legend_bbox_to_anchor,
legend_border_axes_pad,
legend_n_columns,
legend_horizontal_spacing,
legend_vertical_spacing,
legend_border,
legend_border_padding,
legend_shadow,
legend_rounded_corners,
render_axes,
axes_font_name,
axes_font_size,
axes_font_style,
axes_font_weight,
axes_x_limits,
axes_y_limits,
axes_x_ticks,
axes_y_ticks,
figure_size,
)
def crop(
self,
min_indices,
max_indices,
constrain_to_boundary=False,
return_transform=False,
):
r"""
Return a cropped copy of this image using the given minimum and
maximum indices. Landmarks are correctly adjusted so they maintain
their position relative to the newly cropped image.
Parameters
----------
min_indices : ``(n_dims,)`` `ndarray`
The minimum index over each dimension.
max_indices : ``(n_dims,)`` `ndarray`
The maximum index over each dimension.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
cropped_image : `type(self)`
A new instance of self, but cropped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
``min_indices`` and ``max_indices`` both have to be of length
``n_dims``. All ``max_indices`` must be greater than
``min_indices``.
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices = np.floor(min_indices)
max_indices = np.ceil(max_indices)
if not (min_indices.size == max_indices.size == self.n_dims):
raise ValueError(
"Both min and max indices should be 1D numpy arrays of"
" length n_dims ({})".format(self.n_dims)
)
elif not np.all(max_indices > min_indices):
raise ValueError("All max indices must be greater that the min " "indices")
min_bounded = self.constrain_points_to_bounds(min_indices)
max_bounded = self.constrain_points_to_bounds(max_indices)
all_max_bounded = np.all(min_bounded == min_indices)
all_min_bounded = np.all(max_bounded == max_indices)
if not (constrain_to_boundary or all_max_bounded or all_min_bounded):
# points have been constrained and the user didn't want this -
raise ImageBoundaryError(min_indices, max_indices, min_bounded, max_bounded)
new_shape = (max_bounded - min_bounded).astype(int)
return self.warp_to_shape(
new_shape,
Translation(min_bounded),
order=0,
warp_landmarks=True,
return_transform=return_transform,
)
def crop_to_pointcloud(
self, pointcloud, boundary=0, constrain_to_boundary=True, return_transform=False
):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with an optional ``n_pixel`` boundary.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the bounds of the pointcloud.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices, max_indices = pointcloud.bounds(boundary=boundary)
return self.crop(
min_indices,
max_indices,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_landmarks(
self, group=None, boundary=0, constrain_to_boundary=True, return_transform=False
):
r"""
Return a copy of this image cropped so that it is bounded around a set
of landmarks with an optional ``n_pixel`` boundary
Parameters
----------
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to its landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud(
pc,
boundary=boundary,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_pointcloud_proportion(
self,
pointcloud,
boundary_proportion,
minimum=True,
constrain_to_boundary=True,
return_transform=False,
):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with a border proportional to the pointcloud spread or range.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the pointclouds' per-dimension range; if ``False`` w.r.t.
the maximum value of the pointclouds' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the border proportional to
the pointcloud spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
if minimum:
boundary = boundary_proportion * np.min(pointcloud.range())
else:
boundary = boundary_proportion * np.max(pointcloud.range())
return self.crop_to_pointcloud(
pointcloud,
boundary=boundary,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_landmarks_proportion(
self,
boundary_proportion,
group=None,
minimum=True,
constrain_to_boundary=True,
return_transform=False,
):
r"""
Crop this image to be bounded around a set of landmarks with a
border proportional to the landmark spread or range.
Parameters
----------
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the landmarks' per-dimension range; if ``False`` w.r.t. the
maximum value of the landmarks' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
This image, cropped to its landmarks with a border proportional to
the landmark spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud_proportion(
pc,
boundary_proportion,
minimum=minimum,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def constrain_points_to_bounds(self, points):
r"""
Constrains the points provided to be within the bounds of this image.
Parameters
----------
points : ``(d,)`` `ndarray`
Points to be snapped to the image boundaries.
Returns
-------
bounded_points : ``(d,)`` `ndarray`
Points snapped to not stray outside the image edges.
"""
bounded_points = points.copy()
# check we don't stray under any edges
bounded_points[bounded_points < 0] = 0
# check we don't stray over any edges
shape = np.array(self.shape)
over_image = (shape - bounded_points) < 0
bounded_points[over_image] = shape[over_image]
return bounded_points
def extract_patches(
self,
patch_centers,
patch_shape=(16, 16),
sample_offsets=None,
as_single_array=True,
order=0,
mode="constant",
cval=0.0,
):
r"""
Extract a set of patches from an image. Given a set of patch centers
and a patch size, patches are extracted from within the image, centred
on the given coordinates. Sample offsets denote a set of offsets to
extract from within a patch. This is very useful if you want to extract
a dense set of features around a set of landmarks and simply sample the
same grid of patches around the landmarks.
If sample offsets are used, to access the offsets for each patch you
need to slice the resulting `list`. So for 2 offsets, the first centers
offset patches would be ``patches[:2]``.
Currently only 2D images are supported.
Note that the default is nearest neighbour sampling for the patches
which is achieved via slicing and is much more efficient than using
sampling/interpolation. Note that a significant performance decrease
will be measured if the ``order`` or ``mode`` parameters are modified
from ``order = 0`` and ``mode = 'constant'`` as internally sampling
will be used rather than slicing.
Parameters
----------
patch_centers : :map:`PointCloud`
The centers to extract patches around.
patch_shape : ``(1, n_dims)`` `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5].
See warp_to_shape for more information.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according to
the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside the
image boundaries.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
if self.n_dims != 2:
raise ValueError(
"Only two dimensional patch extraction is " "currently supported."
)
if order == 0 and mode == "constant":
# Fast path using slicing
single_array = extract_patches_with_slice(
self.pixels,
patch_centers.points,
patch_shape,
offsets=sample_offsets,
cval=cval,
)
else:
single_array = extract_patches_by_sampling(
self.pixels,
patch_centers.points,
patch_shape,
offsets=sample_offsets,
order=order,
mode=mode,
cval=cval,
)
if as_single_array:
return single_array
else:
return [Image(o, copy=False) for p in single_array for o in p]
def extract_patches_around_landmarks(
self,
group=None,
patch_shape=(16, 16),
sample_offsets=None,
as_single_array=True,
):
r"""
Extract patches around landmarks existing on this image. Provided the
group label and optionally the landmark label extract a set of patches.
See `extract_patches` for more information.
Currently only 2D images are supported.
Parameters
----------
group : `str` or ``None``, optional
The landmark group to use as patch centres.
patch_shape : `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
return self.extract_patches(
self.landmarks[group],
patch_shape=patch_shape,
sample_offsets=sample_offsets,
as_single_array=as_single_array,
)
def set_patches(self, patches, patch_centers, offset=None, offset_index=None):
r"""
Set the values of a group of patches into the correct regions of a copy
of this image. Given an array of patches and a set of patch centers,
the patches' values are copied in the regions of the image that are
centred on the coordinates of the given centers.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
# parse arguments
if self.n_dims != 2:
raise ValueError(
"Only two dimensional patch insertion is " "currently supported."
)
if offset is None:
offset = np.zeros([1, 2], dtype=np.intp)
elif isinstance(offset, tuple) or isinstance(offset, list):
offset = np.asarray([offset])
offset = np.require(offset, dtype=np.intp)
if not offset.shape == (1, 2):
raise ValueError(
"The offset must be a tuple, a list or a "
"numpy.array with shape (1, 2)."
)
if offset_index is None:
offset_index = 0
# if patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(
patches, patch_centers.n_points
)
copy = self.copy()
# set patches
set_patches(patches, copy.pixels, patch_centers.points, offset, offset_index)
return copy
def set_patches_around_landmarks(
self, patches, group=None, offset=None, offset_index=None
):
r"""
Set the values of a group of patches around the landmarks existing in a
copy of this image. Given an array of patches, a group and a label, the
patches' values are copied in the regions of the image that are
centred on the coordinates of corresponding landmarks.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
group : `str` or ``None`` optional
The landmark group to use as patch centres.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
return self.set_patches(
patches, self.landmarks[group], offset=offset, offset_index=offset_index
)
def warp_to_mask(
self,
template_mask,
transform,
warp_landmarks=True,
order=1,
mode="constant",
cval=0.0,
batch_size=None,
return_transform=False,
):
r"""
Return a copy of this image warped into a different reference space.
Note that warping into a mask is slower than warping into a full image.
If you don't need a non-linear mask, consider :meth:``warp_to_shape``
instead.
Parameters
----------
template_mask : :map:`BooleanImage`
Defines the shape of the result, and what pixels should be sampled.
transform : :map:`Transform`
Transform **from the template space back to this image**.
Defines, for each pixel location on the template, which pixel
location should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : :map:`MaskedImage`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
if self.n_dims != transform.n_dims:
raise ValueError(
"Trying to warp a {}D image with a {}D transform "
"(they must match)".format(self.n_dims, transform.n_dims)
)
template_points = template_mask.true_indices()
points_to_sample = transform.apply(template_points, batch_size=batch_size)
sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval)
# set any nan values to 0
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_image = self._build_warp_to_mask(template_mask, sampled)
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, "path"):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def _build_warp_to_mask(self, template_mask, sampled_pixel_values):
r"""
Builds the warped image from the template mask and sampled pixel values.
Overridden for :map:`BooleanImage` as we can't use the usual
:meth:`from_vector_inplace` method. All other :map:`Image` classes
share the :map:`Image` implementation.
Parameters
----------
template_mask : :map:`BooleanImage` or 2D `bool ndarray`
Mask for warping.
sampled_pixel_values : ``(n_true_pixels_in_mask,)`` `ndarray`
Sampled value to rebuild the masked image from.
"""
from menpo.image import MaskedImage
warped_image = MaskedImage.init_blank(
template_mask.shape, n_channels=self.n_channels, mask=template_mask
)
warped_image._from_vector_inplace(sampled_pixel_values.ravel())
return warped_image
def sample(self, points_to_sample, order=1, mode="constant", cval=0.0):
r"""
Sample this image at the given sub-pixel accurate points. The input
PointCloud should have the same number of dimensions as the image e.g.
a 2D PointCloud for a 2D multi-channel image. A numpy array will be
returned the has the values for every given point across each channel
of the image.
Parameters
----------
points_to_sample : :map:`PointCloud`
Array of points to sample from the image. Should be
`(n_points, n_dims)`
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5].
See warp_to_shape for more information.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
Returns
-------
sampled_pixels : (`n_points`, `n_channels`) `ndarray`
The interpolated values taken across every channel of the image.
"""
# The public interface is a PointCloud, but when this is used internally
# a numpy array is passed. So let's just treat the PointCloud as a
# 'special case' and not document the ndarray ability.
if isinstance(points_to_sample, PointCloud):
points_to_sample = points_to_sample.points
return scipy_interpolation(
self.pixels, points_to_sample, order=order, mode=mode, cval=cval
)
def warp_to_shape(
self,
template_shape,
transform,
warp_landmarks=True,
order=1,
mode="constant",
cval=0.0,
batch_size=None,
return_transform=False,
):
"""
Return a copy of this image warped into a different reference space.
Parameters
----------
template_shape : `tuple` or `ndarray`
Defines the shape of the result, and what pixel indices should be
sampled (all of them).
transform : :map:`Transform`
Transform **from the template_shape space back to this image**.
Defines, for each index on template_shape, which pixel location
should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : `type(self)`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
template_shape = np.array(template_shape, dtype=int)
if (
isinstance(transform, Homogeneous)
and order in range(2)
and self.n_dims == 2
and cv2_perspective_interpolation is not None
):
# we couldn't do the crop, but OpenCV has an optimised
# interpolation for 2D perspective warps - let's use that
warped_pixels = cv2_perspective_interpolation(
self.pixels,
template_shape,
transform,
order=order,
mode=mode,
cval=cval,
)
else:
template_points = indices_for_image_of_shape(template_shape)
points_to_sample = transform.apply(template_points, batch_size=batch_size)
sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval)
# set any nan values to 0
# (seems that map_coordinates can produce nan values)
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_pixels = sampled.reshape((self.n_channels,) + tuple(template_shape))
return self._build_warp_to_shape(
warped_pixels, transform, warp_landmarks, return_transform
)
def _build_warp_to_shape(
self, warped_pixels, transform, warp_landmarks, return_transform
):
# factored out common logic from the different paths we can take in
# warp_to_shape. Rebuilds an image post-warp, adjusting landmarks
# as necessary.
warped_image = Image(warped_pixels, copy=False)
# warp landmarks if requested.
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, "path"):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def rescale(
self, scale, round="ceil", order=1, warp_landmarks=True, return_transform=False
):
r"""
Return a copy of this image, rescaled by a given factor.
Landmarks are rescaled appropriately.
Parameters
----------
scale : `float` or `tuple` of `floats`
The scale factor. If a tuple, the scale to apply to each dimension.
If a single `float`, the scale will be applied uniformly across
each dimension.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If less scales than dimensions are provided.
If any scale is less than or equal to 0.
"""
# Pythonic way of converting to list if we are passed a single float
try:
if len(scale) < self.n_dims:
raise ValueError(
"Must provide a scale per dimension."
"{} scales were provided, {} were expected.".format(
len(scale), self.n_dims
)
)
except TypeError: # Thrown when len() is called on a float
scale = [scale] * self.n_dims
# Make sure we have a numpy array
scale = np.asarray(scale)
for s in scale:
if s <= 0:
raise ValueError("Scales must be positive floats.")
transform = NonUniformScale(scale)
# use the scale factor to make the template mask bigger
# while respecting the users rounding preference.
template_shape = round_image_shape(transform.apply(self.shape), round)
# due to image indexing, we can't just apply the pseudoinverse
# transform to achieve the scaling we want though!
# Consider a 3x rescale on a 2x4 image. Looking at each dimension:
# H 2 -> 6 so [0-1] -> [0-5] = 5/1 = 5x
# W 4 -> 12 [0-3] -> [0-11] = 11/3 = 3.67x
# => need to make the correct scale per dimension!
shape = np.array(self.shape, dtype=float)
# scale factors = max_index_after / current_max_index
# (note that max_index = length - 1, as 0 based)
scale_factors = (scale * shape - 1) / (shape - 1)
inverse_transform = NonUniformScale(scale_factors).pseudoinverse()
# for rescaling we enforce that mode is nearest to avoid num. errors
return self.warp_to_shape(
template_shape,
inverse_transform,
warp_landmarks=warp_landmarks,
order=order,
mode="nearest",
return_transform=return_transform,
)
def rescale_to_diagonal(
self, diagonal, round="ceil", warp_landmarks=True, return_transform=False
):
r"""
Return a copy of this image, rescaled so that the it's diagonal is a
new size.
Parameters
----------
diagonal: `int`
The diagonal size of the new image.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : type(self)
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
return self.rescale(
diagonal / self.diagonal(),
round=round,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rescale_to_pointcloud(
self,
pointcloud,
group=None,
round="ceil",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rescaled so that the scale of a
particular group of landmarks matches the scale of the passed
reference pointcloud.
Parameters
----------
pointcloud: :map:`PointCloud`
The reference pointcloud to which the landmarks specified by
``group`` will be scaled to match.
group : `str`, optional
The key of the landmark set that should be used. If ``None``,
and if there is only one set of landmarks, this set will be used.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
pc = self.landmarks[group]
scale = AlignmentUniformScale(pc, pointcloud).as_vector().copy()
return self.rescale(
scale,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rescale_landmarks_to_diagonal_range(
self,
diagonal_range,
group=None,
round="ceil",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rescaled so that the ``diagonal_range`` of
the bounding box containing its landmarks matches the specified
``diagonal_range`` range.
Parameters
----------
diagonal_range: ``(n_dims,)`` `ndarray`
The diagonal_range range that we want the landmarks of the returned
image to have.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
round : ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
x, y = self.landmarks[group].range()
scale = diagonal_range / np.sqrt(x ** 2 + y ** 2)
return self.rescale(
scale,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def resize(self, shape, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, resized to a particular shape.
All image information (landmarks, and mask in the case of
:map:`MaskedImage`) is resized appropriately.
Parameters
----------
shape : `tuple`
The new shape to resize to.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the resize is also returned.
Returns
-------
resized_image : ``type(self)``
A copy of this image, resized.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If the number of dimensions of the new shape does not match
the number of dimensions of the image.
"""
shape = np.asarray(shape, dtype=float)
if len(shape) != self.n_dims:
raise ValueError(
"Dimensions must match."
"{} dimensions provided, {} were expected.".format(
shape.shape, self.n_dims
)
)
scales = shape / self.shape
# Have to round the shape when scaling to deal with floating point
# errors. For example, if we want (250, 250), we need to ensure that
# we get (250, 250) even if the number we obtain is 250 to some
# floating point inaccuracy.
return self.rescale(
scales,
round="round",
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def zoom(self, scale, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, zoomed about the centre point. ``scale``
values greater than 1.0 denote zooming **in** to the image and values
less than 1.0 denote zooming **out** of the image. The size of the
image will not change, if you wish to scale an image, please see
:meth:`rescale`.
Parameters
----------
scale : `float`
``scale > 1.0`` denotes zooming in. Thus the image will appear
larger and areas at the edge of the zoom will be 'cropped' out.
``scale < 1.0`` denotes zooming out. The image will be padded
by the value of ``cval``.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the zooming is also returned.
Returns
-------
zoomed_image : ``type(self)``
A copy of this image, zoomed.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
t = scale_about_centre(self, 1.0 / scale)
return self.warp_to_shape(
self.shape,
t,
order=order,
mode="nearest",
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rotate_ccw_about_centre(
self,
theta,
degrees=True,
retain_shape=False,
mode="constant",
cval=0.0,
round="round",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rotated counter-clockwise about its centre.
Note that the `retain_shape` argument defines the shape of the rotated
image. If ``retain_shape=True``, then the shape of the rotated image
will be the same as the one of current image, so some regions will
probably be cropped. If ``retain_shape=False``, then the returned image
has the correct size so that the whole area of the current image is
included.
Parameters
----------
theta : `float`
The angle of rotation about the centre.
degrees : `bool`, optional
If ``True``, `theta` is interpreted in degrees. If ``False``,
``theta`` is interpreted as radians.
retain_shape : `bool`, optional
If ``True``, then the shape of the rotated image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
The value to be set outside the rotated image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rotation is also returned.
Returns
-------
rotated_image : ``type(self)``
The rotated image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
Image rotation is presently only supported on 2D images
"""
if self.n_dims != 2:
raise ValueError(
"Image rotation is presently only supported on " "2D images"
)
rotation = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return self.transform_about_centre(
rotation,
retain_shape=retain_shape,
mode=mode,
cval=cval,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def transform_about_centre(
self,
transform,
retain_shape=False,
mode="constant",
cval=0.0,
round="round",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, transformed about its centre.
Note that the `retain_shape` argument defines the shape of the
transformed image. If ``retain_shape=True``, then the shape of the
transformed image will be the same as the one of current image, so some
regions will probably be cropped. If ``retain_shape=False``, then the
returned image has the correct size so that the whole area of the
current image is included.
.. note::
This method will not work for transforms that result in a transform
chain as :map:`TransformChain` is not invertible.
.. note::
Be careful when defining transforms for warping imgaes. All pixel
locations must fall within a valid range as expected by the
transform. Therefore, your transformation must accept 'negative'
pixel locations as the pixel locations provided to your transform
will have the object centre subtracted from them.
Parameters
----------
transform : :map:`ComposableTransform` and :map:`VInvertible` type
A composable transform. ``pseudoinverse`` will be invoked on the
resulting transform so it must implement a valid inverse.
retain_shape : `bool`, optional
If ``True``, then the shape of the sheared image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
The value to be set outside the sheared image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the shearing is also returned.
Returns
-------
transformed_image : ``type(self)``
The transformed image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Examples
--------
This is an example for rotating an image about its center. Let's
first load an image, create the rotation transform and then apply it ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Rotation
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
rot_tr = Rotation.init_from_2d_ccw_angle(45)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render rotated image
plt.subplot(132)
im.transform_about_centre(rot_tr).view_landmarks()
plt.title('Rotated')
# Render rotated image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(rot_tr, retain_shape=True).view_landmarks()
plt.title('Rotated (Retain original shape)')
Similarly, in order to apply a shear transform ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Affine
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
shear_tr = Affine.init_from_2d_shear(25, 10)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render sheared image
plt.subplot(132)
im.transform_about_centre(shear_tr).view_landmarks()
plt.title('Sheared')
# Render sheared image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(shear_tr,
retain_shape=True).view_landmarks()
plt.title('Sheared (Retain original shape)')
"""
if retain_shape:
shape = self.shape
applied_transform = transform_about_centre(self, transform)
else:
# Get image's bounding box coordinates
original_bbox = bounding_box((0, 0), np.array(self.shape) - 1)
# Translate to origin and apply transform
trans = Translation(-self.centre(), skip_checks=True).compose_before(
transform
)
transformed_bbox = trans.apply(original_bbox)
# Create new translation so that min bbox values go to 0
t = Translation(-transformed_bbox.bounds()[0])
applied_transform = trans.compose_before(t)
transformed_bbox = trans.apply(original_bbox)
# Output image's shape is the range of the sheared bounding box
# while respecting the users rounding preference.
shape = round_image_shape(transformed_bbox.range() + 1, round)
# Warp image
return self.warp_to_shape(
shape,
applied_transform.pseudoinverse(),
order=order,
warp_landmarks=warp_landmarks,
mode=mode,
cval=cval,
return_transform=return_transform,
)
def mirror(self, axis=1, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, mirrored/flipped about a certain axis.
Parameters
----------
axis : `int`, optional
The axis about which to mirror the image.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the mirroring is also returned.
Returns
-------
mirrored_image : ``type(self)``
The mirrored image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
axis cannot be negative
ValueError
axis={} but the image has {} dimensions
"""
# Check axis argument
if axis < 0:
raise ValueError("axis cannot be negative")
elif axis >= self.n_dims:
raise ValueError(
"axis={} but the image has {} " "dimensions".format(axis, self.n_dims)
)
# Create transform that includes ...
# ... flipping about the selected axis ...
rot_matrix = np.eye(self.n_dims)
rot_matrix[axis, axis] = -1
# ... and translating back to the image's bbox
tr_matrix = np.zeros(self.n_dims)
tr_matrix[axis] = self.shape[axis] - 1
# Create transform object
trans = Rotation(rot_matrix, skip_checks=True).compose_before(
Translation(tr_matrix, skip_checks=True)
)
# Warp image
return self.warp_to_shape(
self.shape,
trans.pseudoinverse(),
mode="nearest",
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def pyramid(self, n_levels=3, downscale=2):
r"""
Return a rescaled pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
image = self.copy()
yield image
for _ in range(n_levels - 1):
image = image.rescale(1.0 / downscale)
yield image
def gaussian_pyramid(self, n_levels=3, downscale=2, sigma=None):
r"""
Return the gaussian pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
sigma : `float`, optional
Sigma for gaussian filter. Default is ``downscale / 3.`` which
corresponds to a filter mask twice the size of the scale factor
that covers more than 99% of the gaussian distribution.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
from menpo.feature import gaussian_filter
if sigma is None:
sigma = downscale / 3.0
image = self.copy()
yield image
for level in range(n_levels - 1):
image = gaussian_filter(image, sigma).rescale(1.0 / downscale)
yield image
def as_greyscale(self, mode="luminosity", channel=None):
r"""
Returns a greyscale version of the image. If the image does *not*
represent a 2D RGB image, then the ``luminosity`` mode will fail.
Parameters
----------
mode : ``{average, luminosity, channel}``, optional
============== =====================================================
mode Greyscale Algorithm
============== =====================================================
average Equal average of all channels
luminosity Calculates the luminance using the CCIR 601 formula:
| .. math:: Y' = 0.2989 R' + 0.5870 G' + 0.1140 B'
channel A specific channel is chosen as the intensity value.
============== =====================================================
channel: `int`, optional
The channel to be taken. Only used if mode is ``channel``.
Returns
-------
greyscale_image : :map:`MaskedImage`
A copy of this image in greyscale.
"""
greyscale = self.copy()
if mode == "luminosity":
if self.n_dims != 2:
raise ValueError(
"The 'luminosity' mode only works on 2D RGB"
"images. {} dimensions found, "
"2 expected.".format(self.n_dims)
)
elif self.n_channels != 3:
raise ValueError(
"The 'luminosity' mode only works on RGB"
"images. {} channels found, "
"3 expected.".format(self.n_channels)
)
# Only compute the coefficients once.
global _greyscale_luminosity_coef
if _greyscale_luminosity_coef is None:
_greyscale_luminosity_coef = np.linalg.inv(
np.array(
[
[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.106, 1.703],
]
)
)[0, :]
# Compute greyscale via dot product
pixels = np.dot(_greyscale_luminosity_coef, greyscale.pixels.reshape(3, -1))
# Reshape image back to original shape (with 1 channel)
pixels = pixels.reshape(greyscale.shape)
elif mode == "average":
pixels = np.mean(greyscale.pixels, axis=0)
elif mode == "channel":
if channel is None:
raise ValueError(
"For the 'channel' mode you have to provide" " a channel index"
)
pixels = greyscale.pixels[channel]
else:
raise ValueError(
"Unknown mode {} - expected 'luminosity', "
"'average' or 'channel'.".format(mode)
)
# Set new pixels - ensure channel axis and maintain
greyscale.pixels = pixels[None, ...].astype(greyscale.pixels.dtype, copy=False)
return greyscale
def as_PILImage(self, out_dtype=np.uint8):
r"""
Return a PIL copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
pil_image : `PILImage`
PIL copy of image
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 is supported.
"""
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
"Can only convert greyscale or RGB 2D images. "
"Received a {} channel {}D image.".format(self.n_channels, self.n_dims)
)
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
pixels = denormalize_pixels_range(pixels, out_dtype)
return PILImage.fromarray(pixels)
def as_imageio(self, out_dtype=np.uint8):
r"""
Return an Imageio copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
imageio_image : `ndarray`
Imageio image (which is just a numpy ndarray with the channels
as the last axis).
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 and uint16
are supported.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .pixels_with_channels_at_back instead.",
MenpoDeprecationWarning,
)
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
"Can only convert greyscale or RGB 2D images. "
"Received a {} channel {}D image.".format(self.n_channels, self.n_dims)
)
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
return denormalize_pixels_range(pixels, out_dtype)
def pixels_range(self):
r"""
The range of the pixel values (min and max pixel values).
Returns
-------
min_max : ``(dtype, dtype)``
The minimum and maximum value of the pixels array.
"""
return self.pixels.min(), self.pixels.max()
def rolled_channels(self):
r"""
Deprecated - please use the equivalent ``pixels_with_channels_at_back`` method.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .pixels_with_channels_at_back() instead.",
MenpoDeprecationWarning,
)
return self.pixels_with_channels_at_back()
def pixels_with_channels_at_back(self, out_dtype=None):
r"""
Returns the pixels matrix, with the channels rolled to the back axis.
This may be required for interacting with external code bases that
require images to have channels as the last axis, rather than the
Menpo convention of channels as the first axis.
If this image is single channel, the final axis is dropped.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
rolled_channels : `ndarray`
Pixels with channels as the back (last) axis. If single channel,
the last axis will be dropped.
"""
p = channels_to_back(self.pixels)
if out_dtype is not None:
p = denormalize_pixels_range(p, out_dtype=out_dtype)
return np.squeeze(p)
def __str__(self):
return "{} {}D Image with {} channel{}".format(
self._str_shape(), self.n_dims, self.n_channels, "s" * (self.n_channels > 1)
)
def has_landmarks_outside_bounds(self):
"""
Indicates whether there are landmarks located outside the image bounds.
:type: `bool`
"""
if self.has_landmarks:
for l_group in self.landmarks:
pc = self.landmarks[l_group].points
if np.any(np.logical_or(self.shape - pc < 1, pc < 0)):
return True
return False
def constrain_landmarks_to_bounds(self):
r"""
Deprecated - please use the equivalent ``constrain_to_bounds`` method
now on PointCloud, in conjunction with the new Image ``bounds()``
method. For example:
>>> im.constrain_landmarks_to_bounds() # Equivalent to below
>>> im.landmarks['test'] = im.landmarks['test'].constrain_to_bounds(im.bounds())
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .constrain_to_bounds() instead (on PointCloud).",
MenpoDeprecationWarning,
)
for l_group in self.landmarks:
l = self.landmarks[l_group]
for k in range(l.points.shape[1]):
tmp = l.points[:, k]
tmp[tmp < 0] = 0
tmp[tmp > self.shape[k] - 1] = self.shape[k] - 1
l.points[:, k] = tmp
self.landmarks[l_group] = l
def normalize_std(self, mode="all", **kwargs):
r"""
Returns a copy of this image normalized such that its
pixel values have zero mean and unit variance.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .normalize_std() instead (features package).",
MenpoDeprecationWarning,
)
return self._normalize(np.std, mode=mode)
def normalize_norm(self, mode="all", **kwargs):
r"""
Returns a copy of this image normalized such that its pixel values
have zero mean and its norm equals 1.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
unit norm.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .normalize_norm() instead (features package).",
MenpoDeprecationWarning,
)
def scale_func(pixels, axis=None):
return np.linalg.norm(pixels, axis=axis, **kwargs)
return self._normalize(scale_func, mode=mode)
def _normalize(self, scale_func, mode="all"):
from menpo.feature import normalize
return normalize(self, scale_func=scale_func, mode=mode)
def rescale_pixels(self, minimum, maximum, per_channel=True):
r"""A copy of this image with pixels linearly rescaled to fit a range.
Note that the only pixels that will be considered and rescaled are those
that feature in the vectorized form of this image. If you want to use
this routine on all the pixels in a :map:`MaskedImage`, consider
using `as_unmasked()` prior to this call.
Parameters
----------
minimum: `float`
The minimal value of the rescaled pixels
maximum: `float`
The maximal value of the rescaled pixels
per_channel: `boolean`, optional
If ``True``, each channel will be rescaled independently. If
``False``, the scaling will be over all channels.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
v = self.as_vector(keep_channels=True).T
if per_channel:
min_, max_ = v.min(axis=0), v.max(axis=0)
else:
min_, max_ = v.min(), v.max()
sf = ((maximum - minimum) * 1.0) / (max_ - min_)
v_new = ((v - min_) * sf) + minimum
return self.from_vector(v_new.T.ravel())
def clip_pixels(self, minimum=None, maximum=None):
r"""A copy of this image with pixels linearly clipped to fit a range.
Parameters
----------
minimum: `float`, optional
The minimal value of the clipped pixels. If None is provided, the
default value will be 0.
maximum: `float`, optional
The maximal value of the clipped pixels. If None is provided, the
default value will depend on the dtype.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
if minimum is None:
minimum = 0
if maximum is None:
dtype = self.pixels.dtype
if dtype == np.uint8:
maximum = 255
elif dtype == np.uint16:
maximum = 65535
elif dtype in [np.float32, np.float64]:
maximum = 1.0
else:
m1 = "Could not recognise the dtype ({}) to set the maximum."
raise ValueError(m1.format(dtype))
copy = self.copy()
copy.pixels = copy.pixels.clip(min=minimum, max=maximum)
return copy
def rasterize_landmarks(
self,
group=None,
render_lines=True,
line_style="-",
line_colour="b",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=1,
marker_face_colour="b",
marker_edge_colour="b",
marker_edge_width=1,
backend="matplotlib",
):
r"""
This method provides the ability to rasterize 2D landmarks onto the
image. The returned image has the specified landmark groups rasterized
onto the image - which is useful for things like creating result
examples or rendering videos with annotations.
Since multiple landmark groups can be specified, all arguments can take
lists of parameters that map to the provided groups list. Therefore, the
parameters must be lists of the correct length or a single parameter to
apply to every landmark group.
Multiple backends are provided, all with different strengths. The
'pillow' backend is very fast, but not very flexible. The `matplotlib`
backend should be feature compatible with other Menpo rendering methods,
but is much slower due to the overhead of creating a figure to render
into.
Parameters
----------
group : `str` or `list` of `str`, optional
The landmark group key, or a list of keys.
render_lines : `bool`, optional
If ``True``, and the provided landmark group is a
:map:`PointDirectedGraph`, the edges are rendered.
line_style : `str`, optional
The style of the edge line. Not all backends support this argument.
line_colour : `str` or `tuple`, optional
A Matplotlib style colour or a backend dependant colour.
line_width : `int`, optional
The width of the line to rasterize.
render_markers : `bool`, optional
If ``True``, render markers at the coordinates of each landmark.
marker_style : `str`, optional
A Matplotlib marker style. Not all backends support all marker
styles.
marker_size : `int`, optional
The size of the marker - different backends use different scale
spaces so consistent output may by difficult.
marker_face_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_width : `int`, optional
The width of the marker edge. Not all backends support this.
backend : {'matplotlib', 'pillow'}, optional
The backend to use.
Returns
-------
rasterized_image : :map:`Image`
The image with the landmarks rasterized directly into the pixels.
Raises
------
ValueError
Only 2D images are supported.
ValueError
Only RGB (3-channel) or Greyscale (1-channel) images are supported.
"""
from .rasterize import rasterize_landmarks_2d
return rasterize_landmarks_2d(
self,
group=group,
render_lines=render_lines,
line_style=line_style,
line_colour=line_colour,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
backend=backend,
)
def round_image_shape(shape, round):
if round not in ["ceil", "round", "floor"]:
raise ValueError("round must be either ceil, round or floor")
# Ensure that the '+' operator means concatenate tuples
return tuple(getattr(np, round)(shape).astype(int))
def _convert_patches_list_to_single_array(patches_list, n_center):
r"""
Converts patches from a `list` of :map:`Image` objects to a single `ndarray`
with shape ``(n_center, n_offset, self.n_channels, patch_shape)``.
Note that these two are the formats returned by the `extract_patches()`
and `extract_patches_around_landmarks()` methods of :map:`Image` class.
Parameters
----------
patches_list : `list` of `n_center * n_offset` :map:`Image` objects
A `list` that contains all the patches as :map:`Image` objects.
n_center : `int`
The number of centers from which the patches are extracted.
Returns
-------
patches_array : `ndarray` ``(n_center, n_offset, n_channels, patch_shape)``
The numpy array that contains all the patches.
"""
n_offsets = int(len(patches_list) / n_center)
n_channels = patches_list[0].n_channels
height = patches_list[0].height
width = patches_list[0].width
patches_array = np.empty(
(n_center, n_offsets, n_channels, height, width),
dtype=patches_list[0].pixels.dtype,
)
total_index = 0
for p in range(n_center):
for o in range(n_offsets):
patches_array[p, o, ...] = patches_list[total_index].pixels
total_index += 1
return patches_array
def _create_patches_image(
patches, patch_centers, patches_indices=None, offset_index=None, background="black"
):
r"""
Creates an :map:`Image` object in which the patches are located on the
correct regions based on the centers. Thus, the image is a block-sparse
matrix. It has also attached a `patch_Centers` :map:`PointCloud`
object with the centers that correspond to the patches that the user
selected to set.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods of the :map:`Image` class. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that are
returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
patches_indices : `int` or `list` of `int` or ``None``, optional
Defines the patches that will be set (copied) to the image. If ``None``,
then all the patches are copied.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the index
of the second dimension from which to sample. If ``None``, then ``0`` is
used.
background : ``{'black', 'white'}``, optional
If ``'black'``, then the background is set equal to the minimum value
of `patches`. If ``'white'``, then the background is set equal to the
maximum value of `patches`.
Returns
-------
patches_image : :map:`Image`
The output patches image object.
Raises
------
ValueError
Background must be either ''black'' or ''white''.
"""
# If patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(patches, patch_centers.n_points)
# Parse inputs
if offset_index is None:
offset_index = 0
if patches_indices is None:
patches_indices = np.arange(patches.shape[0])
elif not isinstance(patches_indices, Iterable):
patches_indices = [patches_indices]
# Compute patches image's shape
n_channels = patches.shape[2]
patch_shape0 = patches.shape[3]
patch_shape1 = patches.shape[4]
top, left = np.min(patch_centers.points, 0)
bottom, right = np.max(patch_centers.points, 0)
min_0 = np.floor(top - patch_shape0)
min_1 = np.floor(left - patch_shape1)
max_0 = np.ceil(bottom + patch_shape0)
max_1 = np.ceil(right + patch_shape1)
height = max_0 - min_0 + 1
width = max_1 - min_1 + 1
# Translate the patch centers to fit in the new image
new_patch_centers = patch_centers.copy()
new_patch_centers.points = patch_centers.points - np.array([[min_0, min_1]])
# Create new image with the correct background values
if background == "black":
patches_image = Image.init_blank(
(height, width),
n_channels,
fill=np.min(patches[patches_indices]),
dtype=patches.dtype,
)
elif background == "white":
patches_image = Image.init_blank(
(height, width),
n_channels,
fill=np.max(patches[patches_indices]),
dtype=patches.dtype,
)
else:
raise ValueError("Background must be either " "black" " or " "white" ".")
# If there was no slicing on the patches, then attach the original patch
# centers. Otherwise, attach the sliced ones.
if set(patches_indices) == set(range(patches.shape[0])):
patches_image.landmarks["patch_centers"] = new_patch_centers
else:
tmp_centers = PointCloud(new_patch_centers.points[patches_indices])
patches_image.landmarks["patch_centers"] = tmp_centers
# Set the patches
return patches_image.set_patches_around_landmarks(
patches[patches_indices], group="patch_centers", offset_index=offset_index
)
| bsd-3-clause | e74d0061d0ffb4d8975c5f4fb37c4525 | 36.922633 | 92 | 0.561554 | 4.452866 | false | false | false | false |
diefenbach/django-lfs | lfs/manage/property_groups/views.py | 1 | 12530 | import json
from django.contrib.auth.decorators import permission_required
from django.core.paginator import EmptyPage
from django.core.paginator import Paginator
from django.urls import reverse
from django.db.models import Q
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
import lfs.core.utils
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import Category
from lfs.catalog.models import GroupsPropertiesRelation
from lfs.catalog.models import Product
from lfs.catalog.models import Property
from lfs.catalog.models import PropertyGroup
from lfs.core.utils import LazyEncoder
from lfs.core.signals import product_removed_property_group
from lfs.manage.property_groups.forms import PropertyGroupForm
@permission_required("core.manage_shop")
def manage_property_groups(request):
"""The main view to manage properties.
"""
try:
prop = PropertyGroup.objects.all()[0]
url = reverse("lfs_manage_property_group", kwargs={"id": prop.id})
except IndexError:
url = reverse("lfs_manage_no_property_groups")
return HttpResponseRedirect(url)
@permission_required("core.manage_shop")
def manage_property_group(request, id, template_name="manage/property_groups/property_group.html"):
"""Edits property group with given id.
"""
property_group = get_object_or_404(PropertyGroup, pk=id)
if request.method == "POST":
form = PropertyGroupForm(instance=property_group, data=request.POST)
if form.is_valid():
form.save()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_property_group", kwargs={"id": property_group.id}),
msg=_(u"Property group has been saved."),
)
else:
form = PropertyGroupForm(instance=property_group)
return render(request, template_name, {
"property_group": property_group,
"property_groups": PropertyGroup.objects.all(),
"properties": properties_inline(request, id),
"products": products_tab(request, id),
"form": form,
"current_id": int(id),
})
@permission_required("core.manage_shop")
def no_property_groups(request, template_name="manage/property_groups/no_property_groups.html"):
"""Displays that there are no property groups.
"""
return render(request, template_name, {})
@permission_required("core.manage_shop")
def properties_inline(request, id, template_name="manage/property_groups/properties_inline.html"):
"""
"""
property_group = get_object_or_404(PropertyGroup, pk=id)
gps = GroupsPropertiesRelation.objects.filter(group=id).select_related('property')
# Calculate assignable properties
# assigned_property_ids = [p.property.id for p in gps]
# assignable_properties = Property.objects.exclude(
# pk__in=assigned_property_ids).exclude(local=True)
assignable_properties = Property.objects.exclude(local=True).exclude(groupspropertiesrelation__in=gps)
assignable_properties = assignable_properties.order_by('name')
return render_to_string(template_name, request=request, context={
"property_group": property_group,
"properties": assignable_properties,
"gps": gps,
})
@permission_required("core.manage_shop")
def add_property_group(request, template_name="manage/property_groups/add_property_group.html"):
"""Adds a new property group
"""
if request.method == "POST":
form = PropertyGroupForm(data=request.POST)
if form.is_valid():
property_group = form.save()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_property_group", kwargs={"id": property_group.id}),
msg=_(u"Property group has been added."),
)
else:
form = PropertyGroupForm()
return render(request, template_name, {
"form": form,
"property_groups": PropertyGroup.objects.all(),
"came_from": (request.POST if request.method == 'POST' else request.GET).get("came_from",
reverse("lfs_manage_property_groups")),
})
@permission_required("core.manage_shop")
@require_POST
def delete_property_group(request, id):
"""Deletes the property group with passed id.
"""
property_group = get_object_or_404(PropertyGroup, pk=id)
property_group.delete()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_property_groups"),
msg=_(u"Property group has been deleted."),
)
@permission_required("core.manage_shop")
def assign_properties(request, group_id):
"""Assignes given properties (via request body) to passed group id.
"""
for property_id in request.POST.getlist("property-id"):
GroupsPropertiesRelation.objects.get_or_create(group_id=group_id, property_id=property_id)
_udpate_positions(group_id)
html = [["#properties", properties_inline(request, group_id)]]
result = json.dumps({
"html": html,
"message": _(u"Properties have been assigned.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def update_properties(request, group_id):
"""Update or Removes given properties (via request body) from passed group id.
"""
if request.POST.get("action") == "remove":
for property_id in request.POST.getlist("property-id"):
try:
gp = GroupsPropertiesRelation.objects.get(group=group_id, property=property_id)
except GroupsPropertiesRelation.DoesNotExist:
pass
else:
gp.delete()
message = _(u"Properties have been removed.")
else:
message = _(u"There are no properties to update.")
for gp in GroupsPropertiesRelation.objects.filter(group=group_id):
position = request.POST.get("position-%s" % gp.property.id, 999)
gp.position = int(position)
gp.save()
message = _(u"Properties have been updated.")
_udpate_positions(group_id)
html = [["#properties", properties_inline(request, group_id)]]
result = json.dumps({
"html": html,
"message": message
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
# Product tab
@permission_required("core.manage_shop")
def products_tab(request, product_group_id, template_name="manage/property_groups/products.html"):
"""Renders the products tab of the property groups management views.
"""
property_group = PropertyGroup.objects.get(pk=product_group_id)
inline = products_inline(request, product_group_id, as_string=True)
return render_to_string(template_name, request=request, context={
"property_group": property_group,
"products_inline": inline,
})
@permission_required("core.manage_shop")
def products_inline(request, product_group_id, as_string=False,
template_name="manage/property_groups/products_inline.html"):
"""Renders the products tab of the property groups management views.
"""
property_group = PropertyGroup.objects.get(pk=product_group_id)
group_products = property_group.products.all().select_related('parent')
r = request.POST if request.method == 'POST' else request.GET
s = request.session
# If we get the parameter ``keep-filters`` or ``page`` we take the
# filters out of the request resp. session. The request takes precedence.
# The page parameter is given if the user clicks on the next/previous page
# links. The ``keep-filters`` parameters is given is the users adds/removes
# products. In this way we keeps the current filters when we needed to. If
# the whole page is reloaded there is no ``keep-filters`` or ``page`` and
# all filters are reset as they should.
if r.get("keep-filters") or r.get("page"):
page = r.get("page", s.get("property_group_page", 1))
filter_ = r.get("filter", s.get("filter"))
category_filter = r.get("products_category_filter", s.get("products_category_filter"))
else:
page = r.get("page", 1)
filter_ = r.get("filter")
category_filter = r.get("products_category_filter")
# The current filters are saved in any case for later use.
s["property_group_page"] = page
s["filter"] = filter_
s["products_category_filter"] = category_filter
filters = Q()
if filter_:
filters &= Q(name__icontains=filter_)
if category_filter:
if category_filter == "None":
filters &= Q(categories=None)
elif category_filter == "All":
pass
else:
# First we collect all sub categories and using the `in` operator
category = lfs_get_object_or_404(Category, pk=category_filter)
categories = [category]
categories.extend(category.get_all_children())
filters &= Q(categories__in=categories)
products = Product.objects.select_related('parent').filter(filters)
paginator = Paginator(products.exclude(pk__in=group_products), 25)
try:
page = paginator.page(page)
except EmptyPage:
page = 0
result = render_to_string(template_name, request=request, context={
"property_group": property_group,
"group_products": group_products,
"page": page,
"paginator": paginator,
"filter": filter_
})
if as_string:
return result
else:
return HttpResponse(
json.dumps({
"html": [["#products-inline", result]],
}), content_type='application/json')
@permission_required("core.manage_shop")
def assign_products(request, group_id):
"""Assign products to given property group with given property_group_id.
"""
property_group = lfs_get_object_or_404(PropertyGroup, pk=group_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product"):
temp_id = temp_id.split("-")[1]
product = Product.objects.get(pk=temp_id)
property_group.products.add(product)
html = [["#products-inline", products_inline(request, group_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Products have been assigned.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def remove_products(request, group_id):
"""Remove products from given property group with given property_group_id.
"""
property_group = lfs_get_object_or_404(PropertyGroup, pk=group_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product"):
temp_id = temp_id.split("-")[1]
product = Product.objects.get(pk=temp_id)
property_group.products.remove(product)
# Notify removing
product_removed_property_group.send(sender=property_group, product=product)
html = [["#products-inline", products_inline(request, group_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Products have been removed.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
def _udpate_positions(group_id):
"""
"""
for i, gp in enumerate(GroupsPropertiesRelation.objects.filter(group=group_id)):
gp.position = (i + 1) * 10
gp.save()
@permission_required("core.manage_shop")
def sort_property_groups(request):
"""Sort property groups
"""
property_group_list = request.POST.get("serialized", "").split('&')
assert (isinstance(property_group_list, list))
if len(property_group_list) > 0:
pos = 10
for cat_str in property_group_list:
elem, pg_id = cat_str.split('=')
pg = PropertyGroup.objects.get(pk=pg_id)
pg.position = pos
pg.save()
pos += 10
result = json.dumps({
"message": _(u"The Property groups have been sorted."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
| bsd-3-clause | f71456d407e3089b746352d9a68cfd89 | 35.213873 | 124 | 0.654509 | 3.940252 | false | false | false | false |
diefenbach/django-lfs | lfs/voucher/models.py | 1 | 6149 | import datetime
from django.contrib.auth.models import User
from django.db import models
from django.db.models import F
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from lfs.tax.models import Tax
from lfs.voucher.settings import KIND_OF_CHOICES
from lfs.voucher.settings import ABSOLUTE
from lfs.voucher.settings import PERCENTAGE
from lfs.voucher.settings import MESSAGES
class VoucherOptions(models.Model):
"""Stores misc voucher options
"""
number_prefix = models.CharField(max_length=20, blank=True, default="")
number_suffix = models.CharField(max_length=20, blank=True, default="")
number_length = models.IntegerField(blank=True, null=True, default=5)
number_letters = models.CharField(max_length=100, blank=True, default="ABCDEFGHIJKLMNOPQRSTUVWXYZ")
class VoucherGroup(models.Model):
"""Groups vouchers together.
"""
name = models.CharField(max_length=100)
creator = models.ForeignKey(User, models.SET_NULL, blank=True, null=True)
creation_date = models.DateTimeField(auto_now_add=True)
position = models.PositiveSmallIntegerField(default=10)
class Meta:
ordering = ("position", )
class Voucher(models.Model):
"""A voucher.
Parameters:
- number
The unique number of the voucher. This number has to be provided
by the shop customer within the checkout in order to get the
credit.
- group
The group the voucher belongs to.
- creator
The creator of the voucher
- creation_date
The date the voucher has been created
- start_date
The date the voucher is going be valid. Before that date the
voucher can't be used.
- end_date
The date the voucher is going to expire. After that date the
voucher can't be used.
- effective_from
The cart price the voucher is from that the voucher is valid.
- kind_of
The kind of the voucher. Absolute or percentage.
- value
The value of the the voucher, which is interpreted either as an
absolute value in the current currency or a percentage quotation.
- tax
The tax of the voucher. This is only taken, when the voucher is
ABSOLUTE. If the voucher is PERCENTAGE the total tax of the
discount is taken from every single product.
- active
Only active vouchers can be redeemed.
- sums_up
Whether this voucher can be summed up with other discounts/vouchers
- used
Indicates whether a voucher has already be used. Every voucher can
only used one time.
- used_date
The date the voucher has been redeemed.
- The quantity of how often the voucher can be used. Let it empty
the voucher can be used unlimited.
"""
number = models.CharField(max_length=100, unique=True)
group = models.ForeignKey(VoucherGroup, models.SET_NULL, related_name="vouchers", blank=True, null=True)
creator = models.ForeignKey(User, models.SET_NULL, blank=True, null=True)
creation_date = models.DateTimeField(auto_now_add=True)
start_date = models.DateField(blank=True, null=True)
effective_from = models.FloatField(default=0.0)
end_date = models.DateField(blank=True, null=True)
kind_of = models.PositiveSmallIntegerField(choices=KIND_OF_CHOICES)
value = models.FloatField(default=0.0)
tax = models.ForeignKey(Tax, models.SET_NULL, verbose_name=_(u"Tax"), blank=True, null=True)
active = models.BooleanField(default=True)
used_amount = models.PositiveSmallIntegerField(default=0)
last_used_date = models.DateTimeField(blank=True, null=True)
limit = models.PositiveSmallIntegerField(blank=True, null=True, default=1)
sums_up = models.BooleanField(_(u"Sums up"), default=True, help_text=_(u'Sums up with other discounts/vouchers'))
class Meta:
ordering = ("creation_date", "number")
def __str__(self):
return self.number
def get_price_net(self, request, cart=None):
"""Returns the net price of the voucher.
"""
if self.kind_of == ABSOLUTE:
return self.value - self.get_tax(request)
else:
return cart.get_price_net(request) * (self.value / 100)
def get_price_gross(self, request, cart=None):
"""Returns the gross price of the voucher.
"""
if self.kind_of == ABSOLUTE:
return self.value
else:
return cart.get_price_gross(request) * (self.value / 100)
def get_tax(self, request, cart=None):
"""Returns the absolute tax of the voucher
"""
if self.kind_of == ABSOLUTE:
if self.tax:
return (self.tax.rate / (100 + self.tax.rate)) * self.value
else:
return 0.0
else:
return cart.get_tax(request) * (self.value / 100)
def mark_as_used(self):
"""Mark voucher as used.
"""
self.used_amount = F('used_amount') + 1
self.last_used_date = timezone.now()
self.save()
self.refresh_from_db()
def is_effective(self, request, cart):
"""Returns True if the voucher is effective.
"""
if self.active is False:
return (False, MESSAGES[1])
if (self.limit > 0) and (self.used_amount >= self.limit):
return (False, MESSAGES[2])
if self.start_date > datetime.date.today():
return (False, MESSAGES[3])
if self.end_date < datetime.date.today():
return (False, MESSAGES[4])
if self.effective_from > cart.get_price_gross(request):
return (False, MESSAGES[5])
return (True, MESSAGES[0])
def is_absolute(self):
"""Returns True if voucher is absolute.
"""
return self.kind_of == ABSOLUTE
def is_percentage(self):
"""Returns True if voucher is percentage.
"""
return self.kind_of == PERCENTAGE
| bsd-3-clause | 1b1192eb6a08935dc83248bd94bf708d | 33.9375 | 117 | 0.63181 | 4.045395 | false | false | false | false |
diefenbach/django-lfs | lfs/cart/views.py | 1 | 18548 | # python imports
import json
# django imports
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
# lfs imports
import lfs.cart.utils
import lfs.catalog.utils
from lfs.payment.models import PaymentMethod
from lfs.shipping.models import ShippingMethod
import lfs.voucher.utils
import lfs.discounts.utils
from lfs.caching.utils import lfs_get_object_or_404
from lfs.core.signals import cart_changed
from lfs.core import utils as core_utils
from lfs.catalog.models import Product, PropertyGroup
from lfs.catalog.models import Property
from lfs.cart import utils as cart_utils
from lfs.cart.models import CartItem
from lfs.core.models import Country
from lfs.core.utils import LazyEncoder
from lfs.shipping import utils as shipping_utils
from lfs.payment import utils as payment_utils
from lfs.customer import utils as customer_utils
def cart(request, template_name="lfs/cart/cart.html"):
"""
The main view of the cart.
"""
return render(request, template_name, {
"voucher_number": lfs.voucher.utils.get_current_voucher_number(request),
"cart_inline": cart_inline(request),
})
def cart_inline(request, template_name="lfs/cart/cart_inline.html"):
"""
The actual content of the cart.
This is factored out to be reused within 'normal' and ajax requests.
"""
cart = cart_utils.get_cart(request)
shopping_url = lfs.cart.utils.get_go_on_shopping_url(request)
if cart is None:
return render_to_string(template_name, request=request, context={
"shopping_url": shopping_url,
})
shop = core_utils.get_default_shop(request)
countries = shop.shipping_countries.all()
selected_country = shipping_utils.get_selected_shipping_country(request)
# Get default shipping method, so that we have a one in any case.
selected_shipping_method = shipping_utils.get_selected_shipping_method(request)
selected_payment_method = payment_utils.get_selected_payment_method(request)
shipping_costs = shipping_utils.get_shipping_costs(request, selected_shipping_method)
# Payment
payment_costs = payment_utils.get_payment_costs(request, selected_payment_method)
# Cart costs
cart_price = cart.get_price_gross(request) + shipping_costs["price_gross"] + payment_costs["price"]
cart_tax = cart.get_tax(request) + shipping_costs["tax"] + payment_costs["tax"]
# get voucher data (if voucher exists)
voucher_data = lfs.voucher.utils.get_voucher_data(request, cart)
# get discounts data
discounts_data = lfs.discounts.utils.get_discounts_data(request)
# calculate total value of discounts and voucher that sum up
summed_up_value = discounts_data['summed_up_value']
if voucher_data['sums_up']:
summed_up_value += voucher_data['voucher_value']
# initialize discounts with summed up discounts
use_voucher = voucher_data['voucher'] is not None
discounts = discounts_data['summed_up_discounts']
if voucher_data['voucher_value'] > summed_up_value or discounts_data['max_value'] > summed_up_value:
# use not summed up value
if voucher_data['voucher_value'] > discounts_data['max_value']:
# use voucher only
discounts = []
else:
# use discount only
discounts = discounts_data['max_discounts']
use_voucher = False
for discount in discounts:
cart_price -= discount["price_gross"]
cart_tax -= discount["tax"]
if use_voucher:
cart_price -= voucher_data['voucher_value']
cart_tax -= voucher_data['voucher_tax']
cart_price = max(0, cart_price)
cart_tax = max(0, cart_tax)
# Calc delivery time for cart (which is the maximum of all cart items)
max_delivery_time = cart.get_delivery_time(request)
cart_items = []
for cart_item in cart.get_items():
product = cart_item.product
quantity = product.get_clean_quantity(cart_item.amount)
cart_items.append({
"obj": cart_item,
"quantity": quantity,
"product": product,
"product_price_net": cart_item.get_price_net(request),
"product_price_gross": cart_item.get_price_gross(request),
"product_tax": cart_item.get_tax(request),
})
return render_to_string(template_name, request=request, context={
"cart": cart,
"cart_items": cart_items,
"cart_price": cart_price,
"cart_tax": cart_tax,
"shipping_methods": shipping_utils.get_valid_shipping_methods(request),
"selected_shipping_method": selected_shipping_method,
"shipping_costs": shipping_costs,
"payment_methods": payment_utils.get_valid_payment_methods(request),
"selected_payment_method": selected_payment_method,
"payment_price": payment_costs["price"],
"countries": countries,
"selected_country": selected_country,
"max_delivery_time": max_delivery_time,
"shopping_url": shopping_url,
"discounts": discounts,
"display_voucher": use_voucher,
"voucher_number": voucher_data['voucher_number'],
"voucher_value": voucher_data['voucher_value'],
"voucher_tax": voucher_data['voucher_tax'],
"voucher_message": voucher_data['voucher_message'],
})
def added_to_cart(request, template_name="lfs/cart/added_to_cart.html"):
"""
Displays the product that has been added to the cart along with the
selected accessories.
"""
cart_items = request.session.get("cart_items", [])
try:
accessories = cart_items[0].product.get_accessories()
except IndexError:
accessories = []
cart_items_count = len(cart_items)
return render(request, template_name, {
"plural": cart_items_count > 1,
"cart_items_count": cart_items_count,
"shopping_url": request.META.get("HTTP_REFERER", "/"),
"product_accessories": accessories,
"product": cart_items[0].product if cart_items else None,
"cart_items": added_to_cart_items(request),
})
def added_to_cart_items(request, template_name="lfs/cart/added_to_cart_items.html"):
"""
Displays the added items for the added-to-cart view.
"""
total = 0
cart_items = []
for cart_item in request.session.get("cart_items", []):
total += cart_item.get_price_gross(request)
product = cart_item.product
quantity = product.get_clean_quantity(cart_item.amount)
cart_items.append({
"product": product,
"obj": cart_item,
"quantity": quantity,
"product_price_net": cart_item.get_price_net(request),
"product_price_gross": cart_item.get_price_gross(request),
"product_tax": cart_item.get_tax(request),
})
return render_to_string(template_name, request=request, context={
"total": total,
"cart_items": cart_items,
})
# Actions
def add_accessory_to_cart(request, product_id):
"""
Adds the product with passed product_id as an accessory to the cart and
updates the added-to-cart view.
"""
product = lfs_get_object_or_404(Product, pk=product_id)
# for product with variants add default variant
if product.is_product_with_variants():
variant = product.get_default_variant()
if variant:
product = variant
else:
return HttpResponse(added_to_cart_items(request))
quantity = product.get_clean_quantity_value(request.POST.get("quantity", 1))
session_cart_items = request.session.get("cart_items", [])
cart = cart_utils.get_cart(request)
cart_item = cart.add(product=product, amount=quantity)
# Update session
if cart_item not in session_cart_items:
session_cart_items.append(cart_item)
else:
for session_cart_item in session_cart_items:
if cart_item.product == session_cart_item.product:
session_cart_item.amount += quantity
request.session["cart_items"] = session_cart_items
cart_changed.send(cart, request=request)
return HttpResponse(added_to_cart_items(request))
def add_to_cart(request, product_id=None):
"""
Adds the passed product with passed product_id to the cart after
some validations have been taken place. The amount is taken from the query
string.
"""
if product_id is None:
product_id = (request.POST if request.method == 'POST' else request.GET).get("product_id")
product = lfs_get_object_or_404(Product, pk=product_id)
# Only active and deliverable products can be added to the cart.
if not (product.is_active() and product.is_deliverable()):
raise Http404()
quantity = request.POST.get("quantity", "1.0")
quantity = product.get_clean_quantity_value(quantity)
# Validate properties (They are added below)
properties_dict = {}
if product.is_configurable_product():
for key, value in request.POST.items():
if key.startswith("property-"):
try:
property_group_id, property_id = key.split("-")[1:]
except IndexError:
continue
try:
prop = Property.objects.get(pk=property_id)
except Property.DoesNotExist:
continue
if property_group_id != '0':
try:
PropertyGroup.objects.get(pk=property_group_id)
except PropertyGroup.DoesNotExist:
continue
if prop.is_number_field:
try:
value = lfs.core.utils.atof(value)
except ValueError:
value = 0.0
key = '{0}_{1}'.format(property_group_id, property_id)
properties_dict[key] = {'value': value,
'property_group_id': property_group_id,
'property_id': property_id}
# validate property's value
if prop.is_number_field:
if (value < prop.unit_min) or (value > prop.unit_max):
msg = _(u"%(name)s must be between %(min)s and %(max)s %(unit)s.") % {"name": prop.title, "min": prop.unit_min, "max": prop.unit_max, "unit": prop.unit}
return lfs.core.utils.set_message_cookie(
product.get_absolute_url(), msg)
# calculate valid steps
steps = []
x = prop.unit_min
while x < prop.unit_max:
steps.append("%.2f" % x)
x += prop.unit_step
steps.append("%.2f" % prop.unit_max)
value = "%.2f" % value
if value not in steps:
msg = _(u"Your entered value for %(name)s (%(value)s) is not in valid step width, which is %(step)s.") % {"name": prop.title, "value": value, "step": prop.unit_step}
return lfs.core.utils.set_message_cookie(
product.get_absolute_url(), msg)
if product.get_active_packing_unit():
quantity = product.get_amount_by_packages(quantity)
cart = cart_utils.get_or_create_cart(request)
cart_item = cart.add(product, properties_dict, quantity)
cart_items = [cart_item]
# Check stock amount
message = ""
if product.manage_stock_amount and cart_item.amount > product.stock_amount and not product.order_time:
if product.stock_amount == 0:
message = _(u"Sorry, but '%(product)s' is not available anymore.") % {"product": product.name}
elif product.stock_amount == 1:
message = _(u"Sorry, but '%(product)s' is only one time available.") % {"product": product.name}
else:
message = _(u"Sorry, but '%(product)s' is only %(amount)s times available.") % {"product": product.name, "amount": product.stock_amount}
cart_item.amount = product.stock_amount
cart_item.save()
# Add selected accessories to cart
for key, value in request.POST.items():
if key.startswith("accessory"):
accessory_id = key.split("-")[1]
try:
accessory = Product.objects.get(pk=accessory_id)
except ObjectDoesNotExist:
continue
# for product with variants add default variant
if accessory.is_product_with_variants():
accessory_variant = accessory.get_default_variant()
if accessory_variant:
accessory = accessory_variant
else:
continue
# Get quantity
quantity = request.POST.get("quantity-%s" % accessory_id, 0)
quantity = accessory.get_clean_quantity_value(quantity)
cart_item = cart.add(product=accessory, amount=quantity)
cart_items.append(cart_item)
# Store cart items for retrieval within added_to_cart.
request.session["cart_items"] = cart_items
cart_changed.send(cart, request=request)
# Update the customer's shipping method (if appropriate)
customer = customer_utils.get_or_create_customer(request)
shipping_utils.update_to_valid_shipping_method(request, customer, save=True)
# Update the customer's payment method (if appropriate)
payment_utils.update_to_valid_payment_method(request, customer, save=True)
# Save the cart to update modification date
cart.save()
try:
url_name = settings.LFS_AFTER_ADD_TO_CART
except AttributeError:
url_name = "lfs_added_to_cart"
if message:
return lfs.core.utils.set_message_cookie(reverse(url_name), message)
else:
return HttpResponseRedirect(reverse(url_name))
def delete_cart_item(request, cart_item_id):
"""
Deletes the cart item with the given id.
"""
cart = cart_utils.get_cart(request)
if not cart:
raise Http404
item = lfs_get_object_or_404(CartItem, pk=cart_item_id)
if item.cart.id != cart.id:
raise Http404
item.delete()
cart_changed.send(cart, request=request)
return HttpResponse(cart_inline(request))
def refresh_cart(request):
"""
Refreshes the cart after some changes has been taken place, e.g.: the
amount of a product or shipping/payment method.
"""
cart = cart_utils.get_cart(request)
if not cart:
raise Http404
customer = customer_utils.get_or_create_customer(request)
# Update country
country_iso = request.POST.get("country")
if country_iso:
selected_country = Country.objects.get(code=country_iso.lower())
customer.selected_country_id = selected_country.id
if customer.selected_shipping_address:
customer.selected_shipping_address.country = selected_country
customer.selected_shipping_address.save()
customer.selected_shipping_address.save()
if customer.selected_invoice_address:
customer.selected_invoice_address.country = selected_country
customer.selected_invoice_address.save()
customer.selected_invoice_address.save()
# NOTE: The customer has to be saved already here in order to calculate
# a possible new valid shippig method below, which coulb be triggered by
# the changing of the shipping country.
customer.save()
# Update Amounts
message = ""
for item in cart.get_items():
amount = request.POST.get("amount-cart-item_%s" % item.id, "0.0")
amount = item.product.get_clean_quantity_value(amount, allow_zero=True)
if item.product.manage_stock_amount and amount > item.product.stock_amount and not item.product.order_time:
amount = item.product.stock_amount
if amount < 0:
amount = 0
if amount == 0:
message = _(u"Sorry, but '%(product)s' is not available anymore." % {"product": item.product.name})
elif amount == 1:
message = _(u"Sorry, but '%(product)s' is only one time available." % {"product": item.product.name})
else:
message = _(u"Sorry, but '%(product)s' is only %(amount)s times available.") % {"product": item.product.name, "amount": amount}
if item.product.get_active_packing_unit():
item.amount = item.product.get_amount_by_packages(float(amount))
else:
item.amount = amount
if amount == 0:
item.delete()
else:
item.save()
# IMPORTANT: We have to send the signal already here, because the valid
# shipping methods might be dependent on the price.
cart_changed.send(cart, request=request)
# Update shipping method
shipping_method = get_object_or_404(ShippingMethod, pk=request.POST.get("shipping_method"))
customer.selected_shipping_method = shipping_method
valid_shipping_methods = shipping_utils.get_valid_shipping_methods(request)
if customer.selected_shipping_method not in valid_shipping_methods:
customer.selected_shipping_method = shipping_utils.get_default_shipping_method(request)
# Update payment method
payment_method = get_object_or_404(PaymentMethod, pk=request.POST.get("payment_method"))
customer.selected_payment_method = payment_method
# Last but not least we save the customer ...
customer.save()
result = json.dumps({
"html": cart_inline(request),
"message": message,
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
def check_voucher(request):
"""
Updates the cart after the voucher number has been changed.
"""
voucher_number = lfs.voucher.utils.get_current_voucher_number(request)
lfs.voucher.utils.set_current_voucher_number(request, voucher_number)
result = json.dumps({
"html": (("#cart-inline", cart_inline(request)),)
})
return HttpResponse(result, content_type='application/json')
| bsd-3-clause | 08a305616c669041fb374d95e26a4466 | 37.008197 | 189 | 0.630149 | 3.945544 | false | false | false | false |
diefenbach/django-lfs | lfs/manage/product/accessories.py | 1 | 8811 | import json
from django.contrib.auth.decorators import permission_required
from django.core.paginator import EmptyPage
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import Category
from lfs.catalog.models import Product
from lfs.catalog.models import ProductAccessories
from lfs.catalog.settings import VARIANT
from lfs.core.signals import product_changed
from lfs.core.utils import LazyEncoder
@permission_required("core.manage_shop")
def manage_accessories(request, product_id, template_name="manage/product/accessories.html"):
"""
"""
product = Product.objects.get(pk=product_id)
inline = manage_accessories_inline(request, product_id, as_string=True)
# amount options
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("accessories-amount")
})
return render_to_string(template_name, request=request, context={
"product": product,
"accessories_inline": inline,
"amount_options": amount_options,
})
@permission_required("core.manage_shop")
def manage_accessories_inline(request, product_id, as_string=False, template_name="manage/product/accessories_inline.html"):
"""View which shows all accessories for the product with the passed id.
"""
product = Product.objects.get(pk=product_id)
product_accessories = ProductAccessories.objects.filter(product=product_id)
accessory_ids = [p.accessory.id for p in product_accessories]
r = request.POST if request.method == 'POST' else request.GET
s = request.session
# If we get the parameter ``keep-filters`` or ``page`` we take the
# filters out of the request resp. session. The request takes precedence.
# The page parameter is given if the user clicks on the next/previous page
# links. The ``keep-filters`` parameters is given is the users adds/removes
# products. In this way we keeps the current filters when we needed to. If
# the whole page is reloaded there is no ``keep-filters`` or ``page`` and
# all filters are reset as they should.
if r.get("keep-filters") or r.get("page"):
page = r.get("page", s.get("accessories_page", 1))
filter_ = r.get("filter", s.get("filter"))
category_filter = r.get("accessories_category_filter", s.get("accessories_category_filter"))
else:
page = r.get("page", 1)
filter_ = r.get("filter")
category_filter = r.get("accessories_category_filter")
# The current filters are saved in any case for later use.
s["accessories_page"] = page
s["filter"] = filter_
s["accessories_category_filter"] = category_filter
try:
s["accessories-amount"] = int(r.get("accessories-amount",
s.get("accessories-amount")))
except TypeError:
s["accessories-amount"] = 25
filters = Q()
if filter_:
filters &= Q(name__icontains=filter_)
filters |= Q(sku__icontains=filter_)
filters |= (Q(sub_type=VARIANT) & Q(active_sku=False) & Q(parent__sku__icontains=filter_))
filters |= (Q(sub_type=VARIANT) & Q(active_name=False) & Q(parent__name__icontains=filter_))
if category_filter:
if category_filter == "None":
filters &= Q(categories=None)
elif category_filter == "All":
pass
else:
# First we collect all sub categories and using the `in` operator
category = lfs_get_object_or_404(Category, pk=category_filter)
categories = [category]
categories.extend(category.get_all_children())
filters &= Q(categories__in=categories)
products = Product.objects.filter(filters).exclude(pk=product_id)
paginator = Paginator(products.exclude(pk__in=accessory_ids), s["accessories-amount"])
try:
page = paginator.page(page)
except EmptyPage:
page = 0
result = render_to_string(template_name, request=request, context={
"product": product,
"product_accessories": product_accessories,
"page": page,
"paginator": paginator,
"filter": filter_,
})
if as_string:
return result
else:
return HttpResponse(
json.dumps({
"html": [["#accessories-inline", result]],
}), content_type='application/json')
# Actions
@permission_required("core.manage_shop")
def load_tab(request, product_id):
"""
"""
accessories = manage_accessories(request, product_id)
return HttpResponse(accessories)
@permission_required("core.manage_shop")
def add_accessories(request, product_id):
"""Adds passed accessories to product with passed id.
"""
parent_product = Product.objects.get(pk=product_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
accessory = Product.objects.get(pk=temp_id)
product_accessory = ProductAccessories(product=parent_product, accessory=accessory)
product_accessory.save()
_update_positions(parent_product)
product_changed.send(parent_product)
html = [["#accessories-inline", manage_accessories_inline(request, product_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Accessories have been added.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
# TODO: Rename to "update_accessories"
@permission_required("core.manage_shop")
def remove_accessories(request, product_id):
"""Removes passed accessories from product with passed id.
"""
parent_product = Product.objects.get(pk=product_id)
if request.POST.get("action") == "remove":
for temp_id in request.POST.keys():
if temp_id.startswith("accessory") is False:
continue
temp_id = temp_id.split("-")[1]
accessory = Product.objects.get(pk=temp_id)
product_accessory = ProductAccessories.objects.filter(product=parent_product, accessory=accessory)
product_accessory.delete()
_update_positions(parent_product)
product_changed.send(parent_product)
html = [["#accessories-inline", manage_accessories_inline(request, product_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Accessories have been removed.")
}, cls=LazyEncoder)
else:
for temp_id in request.POST.keys():
if temp_id.startswith("quantity") is False:
continue
temp_id = temp_id.split("-")[1]
accessory = Product.objects.get(pk=temp_id)
product_accessory = ProductAccessories.objects.get(product=parent_product, accessory=accessory)
# Update quantity
quantity = request.POST.get("quantity-%s" % temp_id)
product_accessory.quantity = quantity
# Update position
position = request.POST.get("position-%s" % temp_id)
product_accessory.position = position
product_accessory.save()
product_changed.send(product_accessory.product)
_update_positions(parent_product)
html = [["#accessories-inline", manage_accessories_inline(request, product_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Accessories have been updated.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def update_accessories(request, product_id):
"""Updates the accessories activity state for product variants.
"""
product = Product.objects.get(pk=product_id)
if request.POST.get("active_accessories"):
product.active_accessories = True
else:
product.active_accessories = False
product.save()
html = [["#accessories-inline", manage_accessories_inline(request, product_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Accessories have been updated.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
def _update_positions(product):
"""Updates positions of product accessories for given product.
"""
for i, pa in enumerate(ProductAccessories.objects.filter(product=product)):
pa.position = (i + 1) * 10
pa.save()
| bsd-3-clause | cd7fc89b99f195e0fd01029cf311f011 | 33.964286 | 124 | 0.649302 | 3.938757 | false | false | false | false |
diefenbach/django-lfs | lfs/manage/product/categories.py | 1 | 3030 | import json
# django imports
from django.contrib.auth.decorators import permission_required
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
# lfs imports
from lfs.caching.utils import lfs_get_object_or_404
from lfs.core.utils import LazyEncoder
from lfs.core.signals import category_changed
from lfs.catalog.models import Product
from lfs.catalog.models import Category
@permission_required("core.manage_shop")
def manage_categories(request, product_id, template_name="manage/product/categories.html"):
"""Displays the manage category view.
"""
product = lfs_get_object_or_404(Product, pk=product_id)
product_category_ids = [p.id for p in product.get_categories()]
categories = []
for category in Category.objects.filter(parent=None):
children = children_categories(request, category, product_category_ids)
categories.append({
"id": category.id,
"slug": category.slug,
"name": category.name,
"url": category.get_absolute_url(),
"checked": category.id in product_category_ids,
"children": children,
})
result = render_to_string(template_name, request=request, context={
"product": product,
"categories": categories
})
return HttpResponse(result)
@permission_required("core.manage_shop")
def children_categories(request, category, product_category_ids,
template_name="manage/product/categories_children.html"):
"""Renders the children categories of given category as HTML.
"""
categories = []
for category in category.category_set.all():
children = children_categories(request, category, product_category_ids)
categories.append({
"id": category.id,
"slug": category.slug,
"name": category.name,
"url": category.get_absolute_url(),
"checked": category.id in product_category_ids,
"children": children,
})
result = render_to_string(template_name, request=request, context={
"categories": categories
})
return result
# Actions
@permission_required("core.manage_shop")
def change_categories(request, product_id):
"""Changes categories by passed request body.
"""
product = lfs_get_object_or_404(Product, pk=product_id)
# Signal that the old categories of the product have been changed.
for category in product.categories.all():
category_changed.send(category)
if request.method == "POST":
product.categories = request.POST.getlist("categories")
product.save()
# Signal that the new categories of the product have been changed.
for category in product.categories.all():
category_changed.send(category)
return HttpResponse(json.dumps({
"message": _(u"Categories have been saved."),
}, cls=LazyEncoder), content_type='application/json')
| bsd-3-clause | 3f385609e7981531a3198e5222bb6fa6 | 31.580645 | 91 | 0.672277 | 4.202497 | false | false | false | false |
diefenbach/django-lfs | lfs/marketing/tests.py | 1 | 10081 | from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from lfs.addresses.models import Address
from lfs.catalog.models import Category
from lfs.catalog.models import Product
import lfs.marketing.utils
from lfs.marketing.models import Topseller
from lfs.marketing.utils import calculate_product_sales
from lfs.order.models import Order
from lfs.order.models import OrderItem
from lfs.order.settings import CLOSED
class RatingMailTestCase(TestCase):
"""
"""
fixtures = ['lfs_shop.xml', "lfs_user.xml"]
def setUp(self):
"""
"""
self.p1 = Product.objects.create(name="Product 1", slug="product-1", active=True)
self.c1 = Category.objects.create(name="Category 1", slug="category-1")
self.c1.save()
self.c1.products.set([self.p1])
self.c1.save()
address = Address.objects.create()
self.o = Order.objects.create(invoice_address=address, shipping_address=address)
self.oi1 = OrderItem.objects.create(order=self.o, product_amount=1, product=self.p1)
def test_get_orders(self):
"""
"""
# First there is no closed order
orders = lfs.marketing.utils.get_orders()
self.assertEqual(len(orders), 0)
# Close order
self.o.state = CLOSED
self.o.save()
# But order is closed within the limit, so there is still no order for
# rating mails
orders = lfs.marketing.utils.get_orders()
self.assertEqual(len(orders), 0)
# Set the state modified date before the limit
self.o.state_modified = timezone.now() - timedelta(days=15)
self.o.save()
# Now there is a order for which there should a rating mail be sent
orders = lfs.marketing.utils.get_orders()
self.assertEqual(len(orders), 1)
class TopsellerTestCase(TestCase):
"""Tests the Topseller model
"""
fixtures = ['lfs_shop.xml', "lfs_user.xml"]
def setUp(self):
"""
"""
self.p1 = Product.objects.create(name="Product 1", slug="product-1", active=True)
self.t1 = Topseller.objects.create(product=self.p1)
def test_defaults(self):
"""
"""
self.assertEqual(self.t1.position, 1)
class TopsellerUtilsTestCase(TestCase):
"""Tests the utils of the lfs.marketing
"""
fixtures = ['lfs_shop.xml', "lfs_user.xml"]
def setUp(self):
"""
"""
self.p1 = Product.objects.create(name="Product 1", slug="product-1", active=True)
self.p2 = Product.objects.create(name="Product 2", slug="product-2", active=True)
self.p3 = Product.objects.create(name="Product 3", slug="product-3", active=True)
self.p4 = Product.objects.create(name="Product 4", slug="product-4", active=True)
self.c1 = Category.objects.create(name="Category 1", slug="category-1")
self.c1.save()
self.c11 = Category.objects.create(name="Category 11", slug="category-11", parent=self.c1)
self.c11.products.set([self.p1, self.p2])
self.c11.save()
self.c12 = Category.objects.create(name="Category 12", slug="category-12", parent=self.c1)
self.c12.products.set([self.p3, self.p4])
self.c12.save()
address = Address.objects.create()
self.o = Order.objects.create(invoice_address=address, shipping_address=address)
self.oi1 = OrderItem.objects.create(order=self.o, product_amount=1, product=self.p1)
self.oi2 = OrderItem.objects.create(order=self.o, product_amount=2, product=self.p2)
self.oi3 = OrderItem.objects.create(order=self.o, product_amount=3, product=self.p3)
self.oi4 = OrderItem.objects.create(order=self.o, product_amount=4, product=self.p4)
calculate_product_sales()
def test_calculate_product_sales(self):
calculate_product_sales()
# This should not break calculate_product_sales()
self.oi1.product = None
self.oi1.save()
calculate_product_sales()
def test_topseller_1(self):
"""Tests general topsellers.
"""
ts = lfs.marketing.utils.get_topseller(2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
def test_topseller_2(self):
"""Tests general topseller with explicitly selected products.
"""
# Explicit topseller
self.p5 = Product.objects.create(name="Product 5", slug="product-5", active=True)
t5 = Topseller.objects.create(product=self.p5, position=1)
ts = lfs.marketing.utils.get_topseller(2)
self.assertEqual(ts[0], self.p5)
self.assertEqual(ts[1], self.p4)
self.p6 = Product.objects.create(name="Product 6", slug="product-6", active=True)
t6 = Topseller.objects.create(product=self.p6, position=2)
ts = lfs.marketing.utils.get_topseller(2)
self.assertEqual(ts[0], self.p5)
self.assertEqual(ts[1], self.p6)
# Exchange positions
t5.position = 2
t5.save()
t6.position = 1
t6.save()
ts = lfs.marketing.utils.get_topseller(2)
self.assertEqual(ts[0], self.p6)
self.assertEqual(ts[1], self.p5)
# Now the position is to greater than limit, so it shouldn't be within
# topsellers at all
t6.position = 3
t6.save()
ts = lfs.marketing.utils.get_topseller(2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p5) # has to be on pasition 2
def test_topseller_3(self):
"""Tests general topseller with explicitly assigned products which
are also in calculated topsellers.
"""
ts = lfs.marketing.utils.get_topseller(2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
# Explicit topseller P4, which is already a topseller
Topseller.objects.create(product=self.p4, position=1)
# P4 should only displayed once
ts = lfs.marketing.utils.get_topseller(2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
def test_topseller_for_category_1(self):
"""Tests topseller for specific categories.
"""
# Tests the top level category
ts = lfs.marketing.utils.get_topseller_for_category(self.c1, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
# Tests the direct categories
ts = lfs.marketing.utils.get_topseller_for_category(self.c11, limit=1)
self.assertEqual(len(ts), 1)
self.assertEqual(ts[0], self.p2)
ts = lfs.marketing.utils.get_topseller_for_category(self.c12, limit=1)
self.assertEqual(len(ts), 1)
self.assertEqual(ts[0], self.p4)
def test_topseller_for_category_2(self):
"""Tests the top seller for specific categories. With explicitly
selected products
"""
# Explicit topseller for c1
self.p5 = Product.objects.create(name="Product 5", slug="product-5", active=True)
t5 = Topseller.objects.create(product=self.p5, position=1)
self.c11.products.set([self.p1, self.p2, self.p5])
# Tests the top level category
ts = lfs.marketing.utils.get_topseller_for_category(self.c1, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p5)
self.assertEqual(ts[1], self.p4)
# Tests the direct categories
ts = lfs.marketing.utils.get_topseller_for_category(self.c11, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p5)
self.assertEqual(ts[1], self.p2)
# The explicit topseller with category 1 has no impact for topsellers of
# c2
ts = lfs.marketing.utils.get_topseller_for_category(self.c12, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
# Now we add Product 5 also to c12
self.c12.products.set([self.p3, self.p4, self.p5])
self.c12.save()
# Now Product 5 is among the topseller
ts = lfs.marketing.utils.get_topseller_for_category(self.c12, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p5)
self.assertEqual(ts[1], self.p4)
# Change to position of p5 to 2
t5.position = 2
t5.save()
ts = lfs.marketing.utils.get_topseller_for_category(self.c12, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p5)
# Change to position of p5 to 3. it isn't within topsellers anymore
t5.position = 3
t5.save()
ts = lfs.marketing.utils.get_topseller_for_category(self.c12, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
def test_topseller_for_category_3(self):
"""Tests the top seller for specific categories. With explicitly
selected products and ca
"""
# Tests the top level category
ts = lfs.marketing.utils.get_topseller_for_category(self.c1, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
# Explicit topseller P4 for c1, which is already a topseller
Topseller.objects.create(product=self.p4, position=1)
# Tests the top level category
ts = lfs.marketing.utils.get_topseller_for_category(self.c1, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
# Tests the direct categories
ts = lfs.marketing.utils.get_topseller_for_category(self.c12, limit=2)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0], self.p4)
self.assertEqual(ts[1], self.p3)
| bsd-3-clause | 30ec518500c224d6601d18d6ad42dc25 | 33.642612 | 98 | 0.629203 | 3.340292 | false | true | false | false |
clips/pattern | examples/02-db/02-datasheet.py | 1 | 2054 | from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.db import Datasheet, INTEGER, STRING
from pattern.db import uid, pprint
# The main purpose of the pattern module is to facilitate automated processes
# for (text) data acquisition and (linguistical) data mining.
# Often, this involves a tangle of messy text files and custom formats to store the data.
# The Datasheet class offers a useful matrix (cfr. MS Excel) in Python code.
# It can be saved as a CSV text file that is both human/machine readable.
# See also: examples/01-web/03-twitter.py
# A Datasheet can have headers: a (name, type)-tuple for each column.
# In this case, imported columns will automatically map values to the defined type.
# Supported values that are imported and exported correctly:
# str, unicode, int, float, bool, Date, None
# For other data types, custom encoder and decoder functions can be used.
ds = Datasheet(rows=[
[uid(), "broccoli", "vegetable"],
[uid(), "turnip", "vegetable"],
[uid(), "asparagus", "vegetable"],
[uid(), "banana", "fruit"],
], fields=[
("id", INTEGER), # Define the column headers.
("name", STRING),
("type", STRING)
])
print(ds.rows[0]) # A list of rows.
print(ds.columns[1]) # A list of columns, where each column is a list of values.
print(ds.name)
print("")
# Columns can be manipulated directly like any other Python list.
# This can be slow for large tables. If you need a fast way to do matrix math,
# use numpy (http://numpy.scipy.org/) instead.
# The purpose of Table is data storage.
ds.columns.append([
"green",
"purple",
"white",
"yellow"
], field=("color", STRING))
# Save as a comma-separated (unicode) text file.
ds.save("food.txt", headers=True)
# Load a table from file.
ds = Datasheet.load("food.txt", headers=True)
pprint(ds, truncate=50, padding=" ", fill=".")
print("")
print(ds.fields)
| bsd-3-clause | 258c48a01b6b3ca41d983409f9b756f2 | 32.672131 | 89 | 0.695716 | 3.345277 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.