repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
django-extensions/django-extensions | tests/management/commands/test_print_settings.py | 1 | 1674 | # -*- coding: utf-8 -*-
import pytest
from django.core.management import CommandError, call_command
def test_without_args(capsys):
call_command('print_settings')
out, err = capsys.readouterr()
assert 'DEBUG' in out
assert 'INSTALLED_APPS' in out
def test_with_setting_args(capsys):
call_command('print_settings', 'DEBUG')
out, err = capsys.readouterr()
assert 'DEBUG' in out
assert 'INSTALLED_APPS' not in out
def test_with_setting_wildcard(capsys):
call_command('print_settings', '*_DIRS')
out, err = capsys.readouterr()
assert 'FIXTURE_DIRS' in out
assert 'STATICFILES_DIRS' in out
assert 'INSTALLED_APPS' not in out
def test_with_setting_fail(capsys):
with pytest.raises(CommandError, match='INSTALLED_APPZ not found in settings.'):
call_command('print_settings', '-f', 'INSTALLED_APPZ')
def test_with_multiple_setting_args(capsys):
call_command(
'print_settings',
'SECRET_KEY',
'DATABASES',
'INSTALLED_APPS',
)
out, err = capsys.readouterr()
assert 'DEBUG' not in out
assert 'SECRET_KEY' in out
assert 'DATABASES' in out
assert 'INSTALLED_APPS' in out
def test_format(capsys):
call_command(
'print_settings',
'DEBUG',
'--format=text',
)
out, err = capsys.readouterr()
expected = 'DEBUG = False\n'
assert expected == out
def test_format_json_without_indent(capsys):
call_command(
'print_settings',
'DEBUG',
'--format=json',
'--indent=0',
)
expected = '{\n"DEBUG": false\n}\n'
out, err = capsys.readouterr()
assert expected == out
| mit | 97b03d7c53f85c2314e8cbdbee519bab | 21.621622 | 84 | 0.626045 | 3.502092 | false | true | false | false |
django-extensions/django-extensions | django_extensions/management/commands/export_emails.py | 1 | 5565 | # -*- coding: utf-8 -*-
import sys
import csv
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import signalcommand
FORMATS = [
'address',
'emails',
'google',
'outlook',
'linkedin',
'vcard',
]
def full_name(**kwargs):
"""Return full name or username."""
first_name = kwargs.get('first_name')
last_name = kwargs.get('last_name')
name = " ".join(n for n in [first_name, last_name] if n)
if name:
return name
name = kwargs.get('name')
if name:
return name
username = kwargs.get('username')
if username:
return username
return ""
class Command(BaseCommand):
help = "Export user email address list in one of a number of formats."
args = "[output file]"
label = 'filename to save to'
can_import_settings = True
encoding = 'utf-8' # RED_FLAG: add as an option -DougN
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.UserModel = get_user_model()
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--group', '-g', action='store', dest='group', default=None,
help='Limit to users which are part of the supplied group name',
),
parser.add_argument(
'--format', '-f', action='store', dest='format', default=FORMATS[0],
help="output format. May be one of %s." % ", ".join(FORMATS),
)
def full_name(self, **kwargs):
return getattr(settings, 'EXPORT_EMAILS_FULL_NAME_FUNC', full_name)(**kwargs)
@signalcommand
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("extra arguments supplied")
group = options['group']
if group and not Group.objects.filter(name=group).count() == 1:
names = "', '".join(g['name'] for g in Group.objects.values('name'))
if names:
names = "'" + names + "'."
raise CommandError("Unknown group '" + group + "'. Valid group names are: " + names)
UserModel = get_user_model()
order_by = getattr(settings, 'EXPORT_EMAILS_ORDER_BY', ['last_name', 'first_name', 'username', 'email'])
fields = getattr(settings, 'EXPORT_EMAILS_FIELDS', ['last_name', 'first_name', 'username', 'email'])
qs = UserModel.objects.all().order_by(*order_by)
if group:
qs = qs.filter(groups__name=group).distinct()
qs = qs.values(*fields)
getattr(self, options['format'])(qs)
def address(self, qs):
"""
Single entry per line in the format of:
"full name" <my@address.com>;
"""
self.stdout.write("\n".join('"%s" <%s>;' % (self.full_name(**ent), ent.get('email', '')) for ent in qs))
self.stdout.write("\n")
def emails(self, qs):
"""
Single entry with email only in the format of:
my@address.com,
"""
self.stdout.write(",\n".join(ent['email'] for ent in qs if ent.get('email')))
self.stdout.write("\n")
def google(self, qs):
"""CSV format suitable for importing into google GMail"""
csvf = csv.writer(sys.stdout)
csvf.writerow(['Name', 'Email'])
for ent in qs:
csvf.writerow([self.full_name(**ent), ent.get('email', '')])
def linkedin(self, qs):
"""
CSV format suitable for importing into linkedin Groups.
perfect for pre-approving members of a linkedin group.
"""
csvf = csv.writer(sys.stdout)
csvf.writerow(['First Name', 'Last Name', 'Email'])
for ent in qs:
csvf.writerow([ent.get('first_name', ''), ent.get('last_name', ''), ent.get('email', '')])
def outlook(self, qs):
"""CSV format suitable for importing into outlook"""
csvf = csv.writer(sys.stdout)
columns = ['Name', 'E-mail Address', 'Notes', 'E-mail 2 Address', 'E-mail 3 Address',
'Mobile Phone', 'Pager', 'Company', 'Job Title', 'Home Phone', 'Home Phone 2',
'Home Fax', 'Home Address', 'Business Phone', 'Business Phone 2',
'Business Fax', 'Business Address', 'Other Phone', 'Other Fax', 'Other Address']
csvf.writerow(columns)
empty = [''] * (len(columns) - 2)
for ent in qs:
csvf.writerow([self.full_name(**ent), ent.get('email', '')] + empty)
def vcard(self, qs):
"""VCARD format."""
try:
import vobject
except ImportError:
print(self.style.ERROR("Please install vobject to use the vcard export format."))
sys.exit(1)
out = sys.stdout
for ent in qs:
card = vobject.vCard()
card.add('fn').value = self.full_name(**ent)
if ent.get('last_name') and ent.get('first_name'):
card.add('n').value = vobject.vcard.Name(ent['last_name'], ent['first_name'])
else:
# fallback to fullname, if both first and lastname are not declared
card.add('n').value = vobject.vcard.Name(self.full_name(**ent))
if ent.get('email'):
emailpart = card.add('email')
emailpart.value = ent['email']
emailpart.type_param = 'INTERNET'
out.write(card.serialize())
| mit | d85c166783bd2b8637b92e41e9ba28c7 | 34.44586 | 112 | 0.563702 | 3.809035 | false | false | false | false |
django-extensions/django-extensions | django_extensions/admin/widgets.py | 1 | 3191 | # -*- coding: utf-8 -*-
import urllib
from django import forms
from django.contrib.admin.sites import site
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.template.loader import render_to_string
from django.templatetags.static import static
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
class ForeignKeySearchInput(ForeignKeyRawIdWidget):
"""
Widget for displaying ForeignKeys in an autocomplete search input
instead in a <select> box.
"""
# Set in subclass to render the widget with a different template
widget_template = None
# Set this to the patch of the search view
search_path = None
def _media(self):
js_files = [
static('django_extensions/js/jquery.bgiframe.js'),
static('django_extensions/js/jquery.ajaxQueue.js'),
static('django_extensions/js/jquery.autocomplete.js'),
]
return forms.Media(
css={'all': (static('django_extensions/css/jquery.autocomplete.css'), )},
js=js_files,
)
media = property(_media)
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.model._default_manager.get(**{key: value})
return Truncator(obj).words(14, truncate='...')
def __init__(self, rel, search_fields, attrs=None):
self.search_fields = search_fields
super().__init__(rel, site, attrs)
def render(self, name, value, attrs=None, renderer=None):
if attrs is None:
attrs = {}
opts = self.rel.model._meta
app_label = opts.app_label
model_name = opts.object_name.lower()
related_url = reverse('admin:%s_%s_changelist' % (app_label, model_name))
if not self.search_path:
self.search_path = urllib.parse.urljoin(related_url, 'foreignkey_autocomplete/')
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if 'class' not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField'
# Call the TextInput render method directly to have more control
output = [forms.TextInput.render(self, name, value, attrs)]
if value:
label = self.label_for_value(value)
else:
label = ''
context = {
'url': url,
'related_url': related_url,
'search_path': self.search_path,
'search_fields': ','.join(self.search_fields),
'app_label': app_label,
'model_name': model_name,
'label': label,
'name': name,
}
output.append(render_to_string(self.widget_template or (
'django_extensions/widgets/%s/%s/foreignkey_searchinput.html' % (app_label, model_name),
'django_extensions/widgets/%s/foreignkey_searchinput.html' % app_label,
'django_extensions/widgets/foreignkey_searchinput.html',
), context))
output.reverse()
return mark_safe(''.join(output))
| mit | abd233c5d54e3cf6a4d3c47a33d43f39 | 34.455556 | 100 | 0.607333 | 3.978803 | false | false | false | false |
django-extensions/django-extensions | django_extensions/management/commands/shell_plus.py | 1 | 22891 | # -*- coding: utf-8 -*-
import inspect
import os
import sys
import traceback
import warnings
from django.db import connections
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils.datastructures import OrderedSet
from django_extensions.management.shells import import_objects
from django_extensions.management.utils import signalcommand
from django_extensions.management.debug_cursor import monkey_patch_cursordebugwrapper
def use_vi_mode():
editor = os.environ.get('EDITOR')
if not editor:
return False
editor = os.path.basename(editor)
return editor.startswith('vi') or editor.endswith('vim')
def shell_runner(flags, name, help=None):
"""
Decorates methods with information about the application they are starting
:param flags: The flags used to start this runner via the ArgumentParser.
:param name: The name of this runner for the help text for the ArgumentParser.
:param help: The optional help for the ArgumentParser if the dynamically generated help is not sufficient.
"""
def decorator(fn):
fn.runner_flags = flags
fn.runner_name = name
fn.runner_help = help
return fn
return decorator
class Command(BaseCommand):
help = "Like the 'shell' command but autoloads the models of all installed Django apps."
extra_args = None
tests_mode = False
def __init__(self):
super().__init__()
self.runners = [member for name, member in inspect.getmembers(self)
if hasattr(member, 'runner_flags')]
def add_arguments(self, parser):
super().add_arguments(parser)
group = parser.add_mutually_exclusive_group()
for runner in self.runners:
if runner.runner_help:
help = runner.runner_help
else:
help = 'Tells Django to use %s.' % runner.runner_name
group.add_argument(
*runner.runner_flags, action='store_const', dest='runner', const=runner, help=help)
parser.add_argument(
'--connection-file', action='store', dest='connection_file',
help='Specifies the connection file to use if using the --kernel option'
)
parser.add_argument(
'--no-startup', action='store_true', dest='no_startup',
default=False,
help='When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.'
)
parser.add_argument(
'--use-pythonrc', action='store_true', dest='use_pythonrc',
default=False,
help='When using plain Python, load the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.'
)
parser.add_argument(
'--print-sql', action='store_true',
default=False,
help="Print SQL queries as they're executed"
)
parser.add_argument(
'--truncate-sql', action='store', type=int,
help="Truncate SQL queries to a number of characters."
)
parser.add_argument(
'--print-sql-location', action='store_true',
default=False,
help="Show location in code where SQL query generated from"
)
parser.add_argument(
'--dont-load', action='append', dest='dont_load', default=[],
help='Ignore autoloading of some apps/models. Can be used several times.'
)
parser.add_argument(
'--quiet-load', action='store_true',
default=False,
dest='quiet_load', help='Do not display loaded models messages'
)
parser.add_argument(
'--vi', action='store_true', default=use_vi_mode(), dest='vi_mode',
help='Load Vi key bindings (for --ptpython and --ptipython)'
)
parser.add_argument(
'--no-browser', action='store_true',
default=False,
dest='no_browser',
help='Don\'t open the notebook in a browser after startup.'
)
parser.add_argument(
'-c', '--command',
help='Instead of opening an interactive shell, run a command as Django and exit.',
)
def run_from_argv(self, argv):
if '--' in argv[2:]:
idx = argv.index('--')
self.extra_args = argv[idx + 1:]
argv = argv[:idx]
return super().run_from_argv(argv)
def get_ipython_arguments(self, options):
ipython_args = 'IPYTHON_ARGUMENTS'
arguments = getattr(settings, ipython_args, [])
if not arguments:
arguments = os.environ.get(ipython_args, '').split()
return arguments
def get_notebook_arguments(self, options):
notebook_args = 'NOTEBOOK_ARGUMENTS'
arguments = getattr(settings, notebook_args, [])
if not arguments:
arguments = os.environ.get(notebook_args, '').split()
return arguments
def get_imported_objects(self, options):
imported_objects = import_objects(options, self.style)
if self.tests_mode:
# save imported objects so we can run tests against it later
self.tests_imported_objects = imported_objects
return imported_objects
@shell_runner(flags=['--kernel'], name='IPython Kernel')
def get_kernel(self, options):
try:
from IPython import release
if release.version_info[0] < 2:
print(self.style.ERROR("--kernel requires at least IPython version 2.0"))
return
from IPython import start_kernel
except ImportError:
return traceback.format_exc()
def run_kernel():
imported_objects = self.get_imported_objects(options)
kwargs = dict(
argv=[],
user_ns=imported_objects,
)
connection_file = options['connection_file']
if connection_file:
kwargs['connection_file'] = connection_file
start_kernel(**kwargs)
return run_kernel
def load_base_kernel_spec(self, app):
"""Finds and returns the base Python kernelspec to extend from."""
ksm = app.kernel_spec_manager
try_spec_names = getattr(settings, 'NOTEBOOK_KERNEL_SPEC_NAMES', [
'python3',
'python',
])
if isinstance(try_spec_names, str):
try_spec_names = [try_spec_names]
ks = None
for spec_name in try_spec_names:
try:
ks = ksm.get_kernel_spec(spec_name)
break
except Exception:
continue
if not ks:
raise CommandError("No notebook (Python) kernel specs found. Tried %r" % try_spec_names)
return ks
def generate_kernel_specs(self, app, ipython_arguments):
"""Generate an IPython >= 3.0 kernelspec that loads django extensions"""
ks = self.load_base_kernel_spec(app)
ks.argv.extend(ipython_arguments)
ks.display_name = getattr(settings, 'IPYTHON_KERNEL_DISPLAY_NAME', "Django Shell-Plus")
manage_py_dir, manage_py = os.path.split(os.path.realpath(sys.argv[0]))
if manage_py == 'manage.py' and os.path.isdir(manage_py_dir):
pythonpath = ks.env.get('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(os.pathsep)
if manage_py_dir not in pythonpath:
pythonpath.append(manage_py_dir)
ks.env['PYTHONPATH'] = os.pathsep.join(filter(None, pythonpath))
return {'django_extensions': ks}
def run_notebookapp(self, app_init, options, use_kernel_specs=True, history=True):
no_browser = options['no_browser']
if self.extra_args:
# if another '--' is found split the arguments notebook, ipython
if '--' in self.extra_args:
idx = self.extra_args.index('--')
notebook_arguments = self.extra_args[:idx]
ipython_arguments = self.extra_args[idx + 1:]
# otherwise pass the arguments to the notebook
else:
notebook_arguments = self.extra_args
ipython_arguments = []
else:
notebook_arguments = self.get_notebook_arguments(options)
ipython_arguments = self.get_ipython_arguments(options)
# Treat IPYTHON_ARGUMENTS from settings
if 'django_extensions.management.notebook_extension' not in ipython_arguments:
ipython_arguments.extend(['--ext', 'django_extensions.management.notebook_extension'])
# Treat NOTEBOOK_ARGUMENTS from settings
if no_browser and '--no-browser' not in notebook_arguments:
notebook_arguments.append('--no-browser')
if '--notebook-dir' not in notebook_arguments and not any(e.startswith('--notebook-dir=') for e in notebook_arguments):
notebook_arguments.extend(['--notebook-dir', '.'])
# IPython < 3 passes through kernel args from notebook CLI
if not use_kernel_specs:
notebook_arguments.extend(ipython_arguments)
# disable history if not already configured in some other way
if not history and not any(arg.startswith('--HistoryManager') for arg in ipython_arguments):
ipython_arguments.append('--HistoryManager.enabled=False')
if not callable(app_init):
app = app_init
warnings.warn('Initialize should be a callable not an app instance', DeprecationWarning)
app.initialize(notebook_arguments)
else:
app = app_init(notebook_arguments)
# IPython >= 3 uses kernelspecs to specify kernel CLI args
if use_kernel_specs:
ksm = app.kernel_spec_manager
for kid, ks in self.generate_kernel_specs(app, ipython_arguments).items():
roots = [os.path.dirname(ks.resource_dir), ksm.user_kernel_dir]
for root in roots:
kernel_dir = os.path.join(root, kid)
try:
if not os.path.exists(kernel_dir):
os.makedirs(kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(ks.to_json())
break
except OSError:
continue
else:
raise CommandError('Could not write kernel %r in directories %r' % (kid, roots))
app.start()
@shell_runner(flags=['--notebook'], name='IPython Notebook')
def get_notebook(self, options):
try:
from IPython import release
except ImportError:
return traceback.format_exc()
try:
from notebook.notebookapp import NotebookApp
except ImportError:
if release.version_info[0] >= 7:
return traceback.format_exc()
try:
from IPython.html.notebookapp import NotebookApp
except ImportError:
if release.version_info[0] >= 3:
return traceback.format_exc()
try:
from IPython.frontend.html.notebook import notebookapp
NotebookApp = notebookapp.NotebookApp
except ImportError:
return traceback.format_exc()
use_kernel_specs = release.version_info[0] >= 3
def app_init(*args, **kwargs):
app = NotebookApp.instance()
app.initialize(*args, **kwargs)
return app
def run_notebook():
self.run_notebookapp(app_init, options, use_kernel_specs)
return run_notebook
@shell_runner(flags=['--lab'], name='JupyterLab Notebook')
def get_jupyterlab(self, options):
try:
from jupyterlab.labapp import LabApp
except ImportError:
return traceback.format_exc()
# check for JupyterLab 3.0
try:
from notebook.notebookapp import NotebookApp
except ImportError:
NotebookApp = None
if not NotebookApp or not issubclass(LabApp, NotebookApp):
app_init = LabApp.initialize_server
else:
def app_init(*args, **kwargs):
app = LabApp.instance()
app.initialize(*args, **kwargs)
return app
def run_jupyterlab():
self.run_notebookapp(app_init, options, history=False)
return run_jupyterlab
@shell_runner(flags=['--plain'], name='plain Python')
def get_plain(self, options):
# Using normal Python shell
import code
imported_objects = self.get_imported_objects(options)
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
# Enable tab completion on systems using libedit (e.g. macOS).
# These lines are copied from Lib/site.py on Python 3.4.
readline_doc = getattr(readline, '__doc__', '')
if readline_doc is not None and 'libedit' in readline_doc:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab:complete")
use_pythonrc = options['use_pythonrc']
no_startup = options['no_startup']
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if use_pythonrc or not no_startup:
for pythonrc in OrderedSet([os.environ.get("PYTHONSTARTUP"), os.path.expanduser('~/.pythonrc.py')]):
if not pythonrc:
continue
if not os.path.isfile(pythonrc):
continue
with open(pythonrc) as handle:
pythonrc_code = handle.read()
# Match the behavior of the cpython shell where an error in
# PYTHONSTARTUP prints an exception and continues.
try:
exec(compile(pythonrc_code, pythonrc, 'exec'), imported_objects)
except Exception:
traceback.print_exc()
if self.tests_mode:
raise
def run_plain():
code.interact(local=imported_objects)
return run_plain
@shell_runner(flags=['--bpython'], name='BPython')
def get_bpython(self, options):
try:
from bpython import embed
except ImportError:
return traceback.format_exc()
def run_bpython():
imported_objects = self.get_imported_objects(options)
kwargs = {}
if self.extra_args:
kwargs['args'] = self.extra_args
embed(imported_objects, **kwargs)
return run_bpython
@shell_runner(flags=['--ipython'], name='IPython')
def get_ipython(self, options):
try:
from IPython import start_ipython
def run_ipython():
imported_objects = self.get_imported_objects(options)
ipython_arguments = self.extra_args or self.get_ipython_arguments(options)
start_ipython(argv=ipython_arguments, user_ns=imported_objects)
return run_ipython
except ImportError:
str_exc = traceback.format_exc()
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
# Notebook not supported for IPython < 0.11.
try:
from IPython.Shell import IPShell
except ImportError:
return str_exc + "\n" + traceback.format_exc()
def run_ipython():
imported_objects = self.get_imported_objects(options)
shell = IPShell(argv=[], user_ns=imported_objects)
shell.mainloop()
return run_ipython
@shell_runner(flags=['--ptpython'], name='PTPython')
def get_ptpython(self, options):
try:
from ptpython.repl import embed, run_config
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import embed, run_config
except ImportError:
return tb
def run_ptpython():
imported_objects = self.get_imported_objects(options)
history_filename = os.path.expanduser('~/.ptpython_history')
embed(globals=imported_objects, history_filename=history_filename,
vi_mode=options['vi_mode'], configure=run_config)
return run_ptpython
@shell_runner(flags=['--ptipython'], name='PT-IPython')
def get_ptipython(self, options):
try:
from ptpython.repl import run_config
from ptpython.ipython import embed
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import run_config
from prompt_toolkit.contrib.ipython import embed
except ImportError:
return tb
def run_ptipython():
imported_objects = self.get_imported_objects(options)
history_filename = os.path.expanduser('~/.ptpython_history')
embed(user_ns=imported_objects, history_filename=history_filename,
vi_mode=options['vi_mode'], configure=run_config)
return run_ptipython
@shell_runner(flags=['--idle'], name='Idle')
def get_idle(self, options):
from idlelib.pyshell import main
def run_idle():
sys.argv = [
sys.argv[0],
'-c',
"""
from django_extensions.management import shells
from django.core.management.color import no_style
for k, m in shells.import_objects({}, no_style()).items():
globals()[k] = m
""",
]
main()
return run_idle
def set_application_name(self, options):
"""
Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa
"""
supported_backends = (
'django.db.backends.postgresql',
'django.db.backends.postgresql_psycopg2',
)
opt_name = 'fallback_application_name'
default_app_name = 'django_shell'
dbs = getattr(settings, 'DATABASES', [])
for connection in connections.all():
alias = connection.alias
mro = inspect.getmro(connection.__class__)
if any(klass.__module__.startswith(supported_backends) for klass in mro):
if 'OPTIONS' not in dbs[alias] or opt_name not in dbs[alias]['OPTIONS']:
dbs[alias].setdefault('OPTIONS', {}).update({opt_name: default_app_name})
@signalcommand
def handle(self, *args, **options):
verbosity = options["verbosity"]
get_runner = options['runner']
print_sql = getattr(settings, 'SHELL_PLUS_PRINT_SQL', False)
runner = None
runner_name = None
truncate = None if options["truncate_sql"] == 0 else options["truncate_sql"]
with monkey_patch_cursordebugwrapper(print_sql=options["print_sql"] or print_sql, truncate=truncate, print_sql_location=options["print_sql_location"], confprefix="SHELL_PLUS"):
SETTINGS_SHELL_PLUS = getattr(settings, 'SHELL_PLUS', None)
def get_runner_by_flag(flag):
for runner in self.runners:
if flag in runner.runner_flags:
return runner
return None
self.set_application_name(options)
if not get_runner and SETTINGS_SHELL_PLUS:
get_runner = get_runner_by_flag('--%s' % SETTINGS_SHELL_PLUS)
if not get_runner:
runner = None
runner_name = SETTINGS_SHELL_PLUS
if get_runner:
runner = get_runner(options)
runner_name = get_runner.runner_name
else:
def try_runner(get_runner):
runner_name = get_runner.runner_name
if verbosity > 2:
print(self.style.NOTICE("Trying: %s" % runner_name))
runner = get_runner(options)
if callable(runner):
if verbosity > 1:
print(self.style.NOTICE("Using: %s" % runner_name))
return runner
return None
tried_runners = set()
# try the runners that are least unexpected (normal shell runners)
preferred_runners = ['ptipython', 'ptpython', 'bpython', 'ipython', 'plain']
for flag_suffix in preferred_runners:
get_runner = get_runner_by_flag('--%s' % flag_suffix)
tried_runners.add(get_runner)
runner = try_runner(get_runner)
if runner:
runner_name = get_runner.runner_name
break
# try any remaining runners if needed
if not runner:
for get_runner in self.runners:
if get_runner not in tried_runners:
runner = try_runner(get_runner)
if runner:
runner_name = get_runner.runner_name
break
if not callable(runner):
if runner:
print(runner)
if not runner_name:
raise CommandError("No shell runner could be found.")
raise CommandError("Could not load shell runner: '%s'." % runner_name)
if self.tests_mode:
return 130
if options['command']:
imported_objects = self.get_imported_objects(options)
exec(options['command'], imported_objects)
return None
runner()
| mit | 6eeedf4170fe19560f04ef4ec292579a | 38.331615 | 184 | 0.570748 | 4.502557 | false | false | false | false |
django-extensions/django-extensions | django_extensions/management/commands/create_jobs.py | 1 | 2457 | # -*- coding: utf-8 -*-
import os
import sys
import shutil
from typing import List
from django.core.management.base import AppCommand
from django.core.management.color import color_style
from django_extensions.management.utils import _make_writeable, signalcommand
class Command(AppCommand):
help = "Creates a Django jobs command directory structure for the given app name in the current directory."
requires_system_checks: List[str] = []
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
@signalcommand
def handle_app_config(self, app, **options):
copy_template('jobs_template', app.path, **options)
def copy_template(template_name, copy_to, **options):
"""Copy the specified template directory to the copy_to location"""
import django_extensions
style = color_style()
ERROR = getattr(style, 'ERROR', lambda x: x)
SUCCESS = getattr(style, 'SUCCESS', lambda x: x)
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
verbosity = options["verbosity"]
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f).rstrip(".tmpl")
if os.path.exists(path_new):
if verbosity > 1:
print(ERROR("%s already exists" % path_new))
continue
if verbosity > 1:
print(SUCCESS("%s" % path_new))
with open(path_old, 'r') as fp_orig:
with open(path_new, 'w') as fp_new:
fp_new.write(fp_orig.read())
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)
| mit | ae5e5b35deb6364c64142c6097f815d5 | 36.8 | 156 | 0.614571 | 3.9312 | false | false | false | false |
django-extensions/django-extensions | django_extensions/management/email_notifications.py | 1 | 5360 | # -*- coding: utf-8 -*-
import sys
import traceback
from django.conf import settings
from django.core.mail import send_mail
from django.core.management import BaseCommand
class EmailNotificationCommand(BaseCommand):
"""
A BaseCommand subclass which adds sending email fuctionality.
Subclasses will have an extra command line option ``--email-notification``
and will be able to send emails by calling ``send_email_notification()``
if SMTP host and port are specified in settings. The handling of the
command line option is left to the management command implementation.
Configuration is done in settings.EMAIL_NOTIFICATIONS dict.
Configuration example::
EMAIL_NOTIFICATIONS = {
'scripts.my_script': {
'subject': 'my_script subject',
'body': 'my_script body',
'from_email': 'from_email@example.com',
'recipients': ('recipient0@example.com',),
'no_admins': False,
'no_traceback': False,
'notification_level': 0,
'fail_silently': False
},
'scripts.another_script': {
...
},
...
}
Configuration explained:
subject: Email subject.
body: Email body.
from_email: Email from address.
recipients: Sequence of email recipient addresses.
no_admins: When True do not include ADMINS to recipients.
no_traceback: When True do not include traceback to email body.
notification_level: 0: send email on fail, 1: send email always.
fail_silently: Parameter passed to django's send_mail().
"""
def add_arguments(self, parser):
parser.add_argument('--email-notifications',
action='store_true',
default=False,
dest='email_notifications',
help='Send email notifications for command.')
parser.add_argument('--email-exception',
action='store_true',
default=False,
dest='email_exception',
help='Send email for command exceptions.')
def run_from_argv(self, argv):
"""Overriden in order to access the command line arguments."""
self.argv_string = ' '.join(argv)
super().run_from_argv(argv)
def execute(self, *args, **options):
"""
Overriden in order to send emails on unhandled exception.
If an unhandled exception in ``def handle(self, *args, **options)``
occurs and `--email-exception` is set or `self.email_exception` is
set to True send an email to ADMINS with the traceback and then
reraise the exception.
"""
try:
super().execute(*args, **options)
except Exception:
if options['email_exception'] or getattr(self, 'email_exception', False):
self.send_email_notification(include_traceback=True)
raise
def send_email_notification(self, notification_id=None, include_traceback=False, verbosity=1):
"""
Send email notifications.
Reads settings from settings.EMAIL_NOTIFICATIONS dict, if available,
using ``notification_id`` as a key or else provides reasonable
defaults.
"""
# Load email notification settings if available
if notification_id is not None:
try:
email_settings = settings.EMAIL_NOTIFICATIONS.get(notification_id, {})
except AttributeError:
email_settings = {}
else:
email_settings = {}
# Exit if no traceback found and not in 'notify always' mode
if not include_traceback and not email_settings.get('notification_level', 0):
print(self.style.ERROR("Exiting, not in 'notify always' mode."))
return
# Set email fields.
subject = email_settings.get('subject', "Django extensions email notification.")
command_name = self.__module__.split('.')[-1]
body = email_settings.get(
'body',
"Reporting execution of command: '%s'" % command_name
)
# Include traceback
if include_traceback and not email_settings.get('no_traceback', False):
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
trb = ''.join(traceback.format_tb(exc_traceback))
body += "\n\nTraceback:\n\n%s\n" % trb
finally:
del exc_traceback
# Set from address
from_email = email_settings.get('from_email', settings.DEFAULT_FROM_EMAIL)
# Calculate recipients
recipients = list(email_settings.get('recipients', []))
if not email_settings.get('no_admins', False):
recipients.extend(settings.ADMINS)
if not recipients:
if verbosity > 0:
print(self.style.ERROR("No email recipients available."))
return
# Send email...
send_mail(subject, body, from_email, recipients,
fail_silently=email_settings.get('fail_silently', True))
| mit | c9c91b32a2c69729469ea7fc528ec555 | 37.285714 | 98 | 0.573694 | 4.828829 | false | false | false | false |
django-extensions/django-extensions | tests/management/commands/test_set_default_site.py | 1 | 3980 | # -*- coding: utf-8 -*-
from io import StringIO
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from django.test.utils import override_settings
from unittest.mock import patch
class SetDefaultSiteTests(TestCase):
"""Tests for set_default_site command."""
@override_settings(SITE_ID=321)
def test_should_raise_CommandError_when_Site_object_does_not_exist(self):
with self.assertRaisesRegex(CommandError, "Default site with pk=321 does not exist"):
call_command('set_default_site')
@patch('django_extensions.management.commands.set_default_site.socket')
def test_should_raise_CommandError_if_system_fqdn_return_None(self, m_socket):
m_socket.getfqdn.return_value = None
with self.assertRaisesRegex(CommandError, "Cannot find systems FQDN"):
call_command('set_default_site', '--system-fqdn')
def test_should_raise_CommandError_if_both_domain_and_set_as_system_fqdn_are_present(self):
with self.assertRaisesRegex(CommandError, "The set_as_system_fqdn cannot be used with domain option."):
call_command('set_default_site', '--domain=foo', '--system-fqdn')
@override_settings(INSTALLED_APPS=[
app for app in settings.INSTALLED_APPS
if app != 'django.contrib.sites'])
def test_should_raise_CommandError_Sites_framework_not_installed(self):
with self.assertRaisesRegex(CommandError, "The sites framework is not installed."):
call_command('set_default_site', '--domain=foo', '--system-fqdn')
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_Nothing_to_update(self, m_stdout):
call_command('set_default_site')
self.assertIn("Nothing to update (need --name, --domain and/or --system-fqdn)\n", m_stdout.getvalue())
@patch('django_extensions.management.commands.set_default_site.socket')
def test_should_use_domain_as_name_if_system_fqdn_return_domain_and_name_is_not_provided(self, m_socket):
m_socket.getfqdn.return_value = 'test.com'
call_command('set_default_site', '--system-fqdn')
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, 'test.com')
self.assertEqual(result.domain, 'test.com')
@patch('django_extensions.management.commands.set_default_site.socket')
def test_should_set_custom_nameif_system_fqdn_return_domain_and_name_is_provided(self, m_socket):
m_socket.getfqdn.return_value = 'test.com'
call_command('set_default_site', '--system-fqdn', '--name=foo')
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, 'foo')
self.assertEqual(result.domain, 'test.com')
def test_should_set_name_and_domain_if_provided(self):
call_command('set_default_site', '--name=foo', '--domain=bar')
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, 'foo')
self.assertEqual(result.domain, 'bar')
def test_should_set_name_only(self):
call_command('set_default_site', '--name=foo')
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, 'foo')
self.assertEqual(result.domain, 'example.com')
def test_should_set_domain_only(self):
call_command('set_default_site', '--domain=bar')
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, 'example.com')
self.assertEqual(result.domain, 'bar')
def test_should_not_raise_if_sites_installed_through_appconfig(self):
with self.modify_settings(INSTALLED_APPS={
'append': 'django.contrib.sites.apps.SitesConfig',
'remove': 'django.contrib.sites',
}):
call_command('set_default_site', '--name=foo', '--domain=foo.bar')
| mit | 3b97f8c31d1fdbc6e74014c42124c24a | 42.736264 | 111 | 0.682663 | 3.644689 | false | true | false | false |
beancount/fava | tests/test_core_watcher.py | 2 | 1123 | # pylint: disable=missing-docstring
from __future__ import annotations
import time
from pathlib import Path
from fava.core.watcher import Watcher
def test_watcher_file(tmp_path: Path) -> None:
file1 = tmp_path / "file1"
file2 = tmp_path / "file2"
file1.write_text("test")
file2.write_text("test")
watcher = Watcher()
watcher.update([str(file1), str(file2)], [])
assert not watcher.check()
# time.time is too precise
time.sleep(1)
file1.write_text("test2")
assert watcher.check()
def test_watcher_deleted_file(tmp_path: Path) -> None:
file1 = tmp_path / "file1"
file1.write_text("test")
watcher = Watcher()
watcher.update([str(file1)], [])
assert not watcher.check()
file1.unlink()
assert watcher.check()
def test_watcher_folder(tmp_path: Path) -> None:
folder = tmp_path / "folder"
folder.mkdir()
(folder / "bar").mkdir()
watcher = Watcher()
watcher.update([], [str(folder)])
assert not watcher.check()
# time.time is too precise
time.sleep(1)
(folder / "bar2").mkdir()
assert watcher.check()
| mit | e7154944bf3773c2ac59395024a8a258 | 19.796296 | 54 | 0.634016 | 3.245665 | false | true | false | false |
beancount/fava | src/fava/core/query_shell.py | 2 | 7023 | """For using the Beancount shell from Fava."""
from __future__ import annotations
import contextlib
import io
import textwrap
from typing import TYPE_CHECKING
from beancount.core.data import Entries
from beancount.core.data import Query
from beancount.parser.options import OPTIONS_DEFAULTS
from beancount.query import query_compile
from beancount.query.query import run_query
from beancount.query.query_compile import CompilationError
from beancount.query.query_execute import execute_query
from beancount.query.query_parser import ParseError
from beancount.query.query_parser import RunCustom
from beancount.query.shell import BQLShell # type: ignore
from beancount.utils import pager # type: ignore
from fava.core.module_base import FavaModule
from fava.helpers import BeancountError
from fava.helpers import FavaAPIException
from fava.util.excel import HAVE_EXCEL
from fava.util.excel import to_csv
from fava.util.excel import to_excel
# mypy: ignore-errors
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
# This is to limit the size of the history file. Fava is not using readline at
# all, but Beancount somehow still is...
try:
import readline
readline.set_history_length(1000)
except ImportError:
pass
class QueryShell(BQLShell, FavaModule):
"""A light wrapper around Beancount's shell."""
# pylint: disable=too-many-instance-attributes
def __init__(self, ledger: FavaLedger):
self.buffer = io.StringIO()
BQLShell.__init__(self, True, None, self.buffer)
FavaModule.__init__(self, ledger)
self.result = None
self.stdout = self.buffer
self.entries: Entries = []
self.errors: list[BeancountError] = []
self.options_map = OPTIONS_DEFAULTS
self.queries: list[Query] = []
def load_file(self) -> None:
self.queries = self.ledger.all_entries_by_type.Query
def add_help(self) -> None:
"""Attach help functions for each of the parsed token handlers."""
for attrname, func in BQLShell.__dict__.items():
if attrname[:3] != "on_":
continue
command_name = attrname[3:]
setattr(
self.__class__,
f"help_{command_name.lower()}",
lambda _, fun=func: print(
textwrap.dedent(fun.__doc__).strip(), file=self.outfile
),
)
def _loadfun(self) -> None:
self.entries = self.ledger.all_entries
self.errors = self.ledger.errors
self.options_map = self.ledger.options
def get_pager(self):
"""No real pager, just a wrapper that doesn't close self.buffer."""
return pager.flush_only(self.buffer)
def noop(self, _) -> None:
"""Doesn't do anything in Fava's query shell."""
print(self.noop.__doc__, file=self.outfile)
on_Reload = noop
do_exit = noop
do_quit = noop
do_EOF = noop
def on_Select(self, statement):
# pylint: disable=invalid-name
try:
c_query = query_compile.compile(
statement,
self.env_targets,
self.env_postings,
self.env_entries,
)
except CompilationError as exc:
print(f"ERROR: {str(exc).rstrip('.')}.", file=self.outfile)
return
rtypes, rrows = execute_query(c_query, self.entries, self.options_map)
if not rrows:
print("(empty)", file=self.outfile)
self.result = rtypes, rrows
def execute_query(self, entries: Entries, query: str):
"""Run a query.
Arguments:
entries: The entries to run the query on.
query: A query string.
Returns:
A tuple (contents, types, rows) where either the first or the last
two entries are None. If the query result is a table, it will be
contained in ``types`` and ``rows``, otherwise the result will be
contained in ``contents`` (as a string).
"""
self._loadfun()
self.entries = entries
with contextlib.redirect_stdout(self.buffer):
self.onecmd(query)
contents = self.buffer.getvalue()
self.buffer.truncate(0)
if self.result is None:
return (contents.strip().strip("\x00"), None, None)
types, rows = self.result
self.result = None
return (None, types, rows)
def on_RunCustom(self, run_stmt):
"""Run a custom query."""
name = run_stmt.query_name
if name is None:
# List the available queries.
for query in self.queries:
print(query.name)
else:
try:
query = next(
query for query in self.queries if query.name == name
)
except StopIteration:
print(f"ERROR: Query '{name}' not found")
else:
statement = self.parser.parse(query.query_string)
self.dispatch(statement)
def query_to_file(
self, entries: Entries, query_string: str, result_format: str
):
"""Get query result as file.
Arguments:
entries: The entries to run the query on.
query_string: A string, the query to run.
result_format: The file format to save to.
Returns:
A tuple (name, data), where name is either 'query_result' or the
name of a custom query if the query string is 'run name_of_query'.
``data`` contains the file contents.
Raises:
FavaAPIException: If the result format is not supported or the
query failed.
"""
name = "query_result"
try:
statement = self.parser.parse(query_string)
except ParseError as exception:
raise FavaAPIException(str(exception)) from exception
if isinstance(statement, RunCustom):
name = statement.query_name
try:
query = next(
query for query in self.queries if query.name == name
)
except StopIteration as exc:
raise FavaAPIException(f'Query "{name}" not found.') from exc
query_string = query.query_string
try:
types, rows = run_query(
entries,
self.ledger.options,
query_string,
numberify=True,
)
except (CompilationError, ParseError) as exception:
raise FavaAPIException(str(exception)) from exception
if result_format == "csv":
data = to_csv(types, rows)
else:
if not HAVE_EXCEL:
raise FavaAPIException("Result format not supported.")
data = to_excel(types, rows, result_format, query_string)
return name, data
QueryShell.on_Select.__doc__ = BQLShell.on_Select.__doc__
| mit | 01d54c7f6da8b6bff228168e2f3c6818 | 32.127358 | 78 | 0.593194 | 4.190334 | false | false | false | false |
jswhit/pygrib | test/test_ndfd_conus.py | 1 | 1827 | import pygrib, sys
import pytest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
grbs = pygrib.open('../sampledata/ds.maxt.bin')
grb = grbs.message(1)
lats, lons = grb.latlons()
data = grb.values
globe = ccrs.Globe(ellipse='sphere', semimajor_axis=grb.projparams['a'], semiminor_axis=grb.projparams['b'])
pj = ccrs.LambertConformal(globe=globe,central_longitude=grb.projparams['lon_0'],
central_latitude=grb.projparams['lat_0'],
standard_parallels =(grb.projparams['lat_1'],grb.projparams['lat_2']))
@pytest.mark.mpl_image_compare(tolerance=20,remove_text=True)
def test_ndfd_conus():
fig = plt.figure()
ax = plt.axes([0.1,0.1,0.75,0.75],projection=pj)
coords = pj.transform_points(
ccrs.PlateCarree(), np.asarray([lons[0,0],lons[-1,-1]]), np.asarray([lats[0,0],lats[-1,-1]]))
ax.set_extent([coords[0, 0], coords[1, 0], coords[0, 1], coords[1, 1]], crs=pj)
if matplotlib.get_backend().lower() != 'agg':
# don't plot borders for image comparison
ax.coastlines()
ax.add_feature(cfeature.BORDERS, linestyle='-');
ax.add_feature(cfeature.STATES, linestyle='-');
coords = pj.transform_points(ccrs.PlateCarree(), lons, lats)
cs = ax.contourf(coords[:,:,0],coords[:,:,1],data,20,cmap=plt.cm.jet)
# new axis for colorbar.
cax = plt.axes([0.875, 0.15, 0.03, 0.65])
plt.colorbar(cs, cax, format='%g') # draw colorbar
plt.axes(ax) # make the original axes current again
plt.title('NDFD Temp CONUS %d-h forecast'% grb.forecastTime,fontsize=12)
return fig
# if running with GUI backend, show plot.
if matplotlib.get_backend().lower() != 'agg':
test_ndfd_conus()
plt.show()
| mit | 133aa22f57afc8621d350bf81c4b7631 | 41.488372 | 108 | 0.645868 | 2.942029 | false | true | false | false |
beancount/fava | src/fava/core/number.py | 2 | 3087 | """Formatting numbers."""
from __future__ import annotations
import copy
from typing import Callable
from typing import TYPE_CHECKING
from babel.core import Locale # type: ignore
from beancount.core.display_context import Precision
from beancount.core.number import Decimal
from fava.core.module_base import FavaModule
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
Formatter = Callable[[Decimal], str]
def get_locale_format(locale: Locale | None, precision: int) -> Formatter:
"""Obtain formatting pattern for the given locale and precision.
Arguments:
locale: An optional locale.
precision: The precision.
Returns:
A function that renders Decimals to strings as desired.
"""
# Set a maximum precision of 14, half the default precision of Decimal
precision = min(precision, 14)
if locale is None:
fmt_string = "{:." + str(precision) + "f}"
def fmt(num: Decimal) -> str:
return fmt_string.format(num)
return fmt
pattern = copy.copy(locale.decimal_formats.get(None))
pattern.frac_prec = (precision, precision)
def locale_fmt(num: Decimal) -> str:
return pattern.apply(num, locale) # type: ignore
return locale_fmt
class DecimalFormatModule(FavaModule):
"""Formatting numbers."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self._locale = None
self._formatters: dict[str, Formatter] = {}
self._default_pattern = get_locale_format(None, 2)
self.precisions: dict[str, int] = {}
def load_file(self) -> None:
locale = None
locale_option = self.ledger.fava_options.locale
if self.ledger.options["render_commas"] and not locale_option:
locale_option = "en"
self.ledger.fava_options.locale = locale_option
if locale_option:
locale = Locale.parse(locale_option)
dcontext = self.ledger.options["dcontext"]
precisions: dict[str, int] = {}
for currency, ccontext in dcontext.ccontexts.items():
prec = ccontext.get_fractional(Precision.MOST_COMMON)
if prec is not None:
precisions[currency] = prec
precisions.update(self.ledger.commodities.precisions)
self._locale = locale
self._default_pattern = get_locale_format(locale, 2)
self._formatters = {
currency: get_locale_format(locale, prec)
for currency, prec in precisions.items()
}
self.precisions = precisions
def __call__(self, value: Decimal, currency: str | None = None) -> str:
"""Format a decimal to the right number of decimal digits with locale.
Arguments:
value: A decimal number.
currency: A currency string or None.
Returns:
A string, the formatted decimal.
"""
if currency is None:
return self._default_pattern(value)
return self._formatters.get(currency, self._default_pattern)(value)
| mit | f342b89e581fa276c650449b254c9142 | 30.5 | 78 | 0.635569 | 4.051181 | false | false | false | false |
beancount/fava | src/fava/core/budgets.py | 2 | 5871 | """Parsing and computing budgets."""
from __future__ import annotations
import datetime
from collections import Counter
from collections import defaultdict
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import TYPE_CHECKING
from beancount.core.data import Custom
from beancount.core.number import Decimal
from fava.core.module_base import FavaModule
from fava.helpers import BeancountError
from fava.util.date import days_in_daterange
from fava.util.date import Interval
from fava.util.date import number_of_days_in_period
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
class Budget(NamedTuple):
"""A budget entry."""
account: str
date_start: datetime.date
period: Interval
number: Decimal
currency: str
BudgetDict = Dict[str, List[Budget]]
class BudgetError(BeancountError):
"""Error with a budget."""
class BudgetModule(FavaModule):
"""Parses budget entries."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self.budget_entries: BudgetDict = {}
def load_file(self) -> None:
self.budget_entries, errors = parse_budgets(
self.ledger.all_entries_by_type.Custom
)
self.ledger.errors.extend(errors)
def calculate(
self,
account: str,
begin_date: datetime.date,
end_date: datetime.date,
) -> dict[str, Decimal]:
"""Calculate the budget for an account in an interval."""
return calculate_budget(
self.budget_entries, account, begin_date, end_date
)
def calculate_children(
self,
account: str,
begin_date: datetime.date,
end_date: datetime.date,
) -> dict[str, Decimal]:
"""Calculate the budget for an account including its children."""
return calculate_budget_children(
self.budget_entries, account, begin_date, end_date
)
def __bool__(self) -> bool:
return bool(self.budget_entries)
def parse_budgets(
custom_entries: list[Custom],
) -> tuple[BudgetDict, list[BudgetError]]:
"""Parse budget directives from custom entries.
Args:
custom_entries: the Custom entries to parse budgets from.
Returns:
A dict of accounts to lists of budgets.
Example:
2015-04-09 custom "budget" Expenses:Books "monthly" 20.00 EUR
"""
budgets: BudgetDict = defaultdict(list)
errors = []
interval_map = {
"daily": Interval.DAY,
"weekly": Interval.WEEK,
"monthly": Interval.MONTH,
"quarterly": Interval.QUARTER,
"yearly": Interval.YEAR,
}
for entry in (entry for entry in custom_entries if entry.type == "budget"):
try:
interval = interval_map.get(str(entry.values[1].value))
if not interval:
errors.append(
BudgetError(
entry.meta,
"Invalid interval for budget entry",
entry,
)
)
continue
budget = Budget(
entry.values[0].value,
entry.date,
interval,
entry.values[2].value.number,
entry.values[2].value.currency,
)
budgets[budget.account].append(budget)
except (IndexError, TypeError):
errors.append(
BudgetError(entry.meta, "Failed to parse budget entry", entry)
)
return budgets, errors
def _matching_budgets(
budgets: BudgetDict, accounts: str, date_active: datetime.date
) -> dict[str, Budget]:
"""Find matching budgets.
Returns:
The budget that is active on the specified date for the
specified account.
"""
last_seen_budgets = {}
for budget in budgets[accounts]:
if budget.date_start <= date_active:
last_seen_budgets[budget.currency] = budget
else:
break
return last_seen_budgets
def calculate_budget(
budgets: BudgetDict,
account: str,
date_from: datetime.date,
date_to: datetime.date,
) -> dict[str, Decimal]:
"""Calculate budget for an account.
Args:
budgets: A list of :class:`Budget` entries.
account: An account name.
date_from: Starting date.
date_to: End date (exclusive).
Returns:
A dictionary of currency to Decimal with the budget for the
specified account and period.
"""
if account not in budgets:
return {}
currency_dict: dict[str, Decimal] = defaultdict(Decimal)
for single_day in days_in_daterange(date_from, date_to):
matches = _matching_budgets(budgets, account, single_day)
for budget in matches.values():
currency_dict[
budget.currency
] += budget.number / number_of_days_in_period(
budget.period, single_day
)
return currency_dict
def calculate_budget_children(
budgets: BudgetDict,
account: str,
date_from: datetime.date,
date_to: datetime.date,
) -> dict[str, Decimal]:
"""Calculate budget for an account including budgets of its children.
Args:
budgets: A list of :class:`Budget` entries.
account: An account name.
date_from: Starting date.
date_to: End date (exclusive).
Returns:
A dictionary of currency to Decimal with the budget for the
specified account and period.
"""
currency_dict: dict[str, Decimal] = Counter() # type: ignore
for child in budgets.keys():
if child.startswith(account):
currency_dict.update(
calculate_budget(budgets, child, date_from, date_to)
)
return currency_dict
| mit | 4eb9256e56a9c062c904e0961636c3a0 | 26.693396 | 79 | 0.607733 | 3.966892 | false | false | false | false |
qxf2/qxf2-page-object-model | endpoints/API_Player.py | 1 | 8763 | """
API_Player class does the following:
a) serves as an interface between the test and API_Interface
b) contains several useful wrappers around commonly used combination of actions
c) maintains the test context/state
"""
from base64 import b64encode
from .API_Interface import API_Interface
from utils.results import Results
import urllib.parse
import logging
from conf import api_example_conf as conf
from utils import interactive_mode
import pytest
from _pytest import python
from _pytest import config
class API_Player(Results):
"The class that maintains the test context/state"
def __init__(self, url, log_file_path=None, session_flag=False):
"Constructor"
super(API_Player, self).__init__(
level=logging.DEBUG, log_file_path=log_file_path)
self.api_obj = API_Interface(url=url, session_flag=session_flag)
def set_url(self,url):
self.url=url
return self.url
def set_auth_details(self, username, password):
"encode auth details"
user = username
password = password
b64login = b64encode(bytes('%s:%s' %(user, password),"utf-8"))
return b64login.decode('utf-8')
def set_header_details(self, auth_details=None):
"make header details"
if auth_details != '' and auth_details is not None:
headers = {'Authorization': "Basic %s"%(auth_details)}
else:
headers = {'content-type': 'application/json'}
return headers
def get_cars(self, auth_details=None):
"get available cars "
headers = self.set_header_details(auth_details)
json_response = self.api_obj.get_cars(headers=headers)
json_response = json_response['response']
result_flag = True if json_response['successful'] == True else False
self.write(msg="Fetched cars list:\n %s"%str(json_response))
self.conditional_write(result_flag,
positive="Successfully fetched cars",
negative="Could not fetch cars")
return json_response
def get_car(self, car_name, brand, auth_details=None):
"gets a given car details"
url_params = {'car_name': car_name, 'brand': brand}
url_params_encoded = urllib.parse.urlencode(url_params)
headers = self.set_header_details(auth_details)
json_response = self.api_obj.get_car(url_params=url_params_encoded,
headers=headers)
response = json_response['response']
result_flag = True if response['successful'] == True else False
self.write(msg='Fetched car details of :%s %s' % (car_name, response))
return result_flag
def add_car(self, car_details, auth_details=None):
"adds a new car"
data = car_details
headers = self.set_header_details(auth_details)
json_response = self.api_obj.add_car(data=data,
headers=headers)
result_flag = True if json_response['response']['successful'] == True else False
return result_flag
def register_car(self, car_name, brand, auth_details=None):
"register car"
url_params = {'car_name': car_name, 'brand': brand}
url_params_encoded = urllib.parse.urlencode(url_params)
customer_details = conf.customer_details
data = customer_details
headers = self.set_header_details(auth_details)
json_response = self.api_obj.register_car(url_params=url_params_encoded,
json=data,
headers=headers)
response = (json_response['response'])
result_flag = True if response['registered_car']['successful'] == True else False
return result_flag
def update_car(self, car_details, car_name='figo', auth_details=None):
"updates a car"
data = {'name': car_details['name'],
'brand': car_details['brand'],
'price_range': car_details['price_range'],
'car_type': car_details['car_type']}
headers = self.set_header_details(auth_details)
json_response = self.api_obj.update_car(car_name,
json=data,
headers=headers)
json_response = json_response['response']
result_flag = True if json_response['response']['successful'] == True else False
return result_flag
def remove_car(self, car_name, auth_details=None):
"deletes a car entry"
headers = self.set_header_details(auth_details)
json_response = self.api_obj.remove_car(car_name,
headers=headers)
result_flag = True if json_response['response']['successful'] == True else False
return result_flag
def get_registered_cars(self, auth_details=None):
"gets registered cars"
headers = self.set_header_details(auth_details)
json_response = self.api_obj.get_registered_cars(headers=headers)
response = json_response['response']
result_flag = True if response['successful'] == True else False
self.write(msg="Fetched registered cars list:\n %s"%str(json_response))
self.conditional_write(result_flag,
positive='Successfully fetched registered cars list',
negative='Could not fetch registered cars list')
return response
def delete_registered_car(self, auth_details=None):
"deletes registered car"
headers = self.set_header_details(auth_details)
json_response = self.api_obj.delete_registered_car(headers=headers)
result_flag = True if json_response['response']['successful'] == True else False
self.conditional_write(result_flag,
positive='Successfully deleted registered cars',
negative='Could not delete registered car')
def get_car_count(self,auth_details=None):
"Verify car count at the start"
self.write('\n*****Verifying car count******')
car_count = self.get_cars(auth_details)
car_count = len(car_count['cars_list'])
return car_count
def get_regi_car_count(self,auth_details=None):
"Verify registered car count"
car_count_registered = self.get_registered_cars(auth_details)
car_count_registered = len(car_count_registered['registered'])
return car_count_registered
def verify_car_count(self, expected_count, auth_details=None):
"Verify car count"
self.write('\n*****Verifying car count******')
car_count = self.get_cars(auth_details)
car_count = len(car_count['cars_list'])
result_flag = True if car_count == expected_count else False
return result_flag
def verify_registration_count(self, expected_count, auth_details=None):
"Verify registered car count"
self.write('\n******Verifying registered car count********')
car_count = self.get_registered_cars(auth_details)
car_count = len(car_count['registered'])
result_flag = True if car_count == expected_count else False
return result_flag
def get_user_list(self, auth_details=None):
"get user list"
headers = self.set_header_details(auth_details)
result = self.api_obj.get_user_list(headers=headers)
self.write("Request & Response:\n%s\n" % str(result))
try:
response = result
if response is not None:
user_list = result['user_list']
error = result['response']
except (TypeError, AttributeError) as e:
raise e
return {'user_list': result['user_list'], 'response_code': result['response']}
def check_validation_error(self, auth_details=None):
"verify validatin error 403"
result = self.get_user_list(auth_details)
response_code = result['response_code']
result_flag = False
msg = ''
if response_code == 403:
msg = "403 FORBIDDEN: Authentication successful but no access for non admin users"
elif response_code == 200:
result_flag = True
msg = "successful authentication and access permission"
elif response_code == 401:
msg = "401 UNAUTHORIZED: Authenticate with proper credentials OR Require Basic Authentication"
elif response_code == 404:
msg = "404 NOT FOUND: URL not found"
else:
msg = "unknown reason"
return {'result_flag': result_flag, 'msg': msg}
| mit | 975c3b483351973d1cf521020be61a97 | 36.131356 | 106 | 0.606299 | 4.180821 | false | false | false | false |
qxf2/qxf2-page-object-model | utils/setup_testrail.py | 1 | 6905 | """
One off utility script to setup TestRail for an automated run
This script can:
a) Add a milestone if it does not exist
b) Add a test run (even without a milestone if needed)
c) Add select test cases to the test run using the setup_testrail.conf file
d) Write out the latest run id to a 'latest_test_run.txt' file
This script will NOT:
a) Add a project if it does not exist
"""
import os,ConfigParser,time
from .Test_Rail import Test_Rail
from optparse import OptionParser
def check_file_exists(file_path):
#Check if the config file exists and is a file
conf_flag = True
if os.path.exists(file_path):
if not os.path.isfile(file_path):
print('\n****')
print('Config file provided is not a file: ')
print(file_path)
print('****')
conf_flag = False
else:
print('\n****')
print('Unable to locate the provided config file: ')
print(file_path)
print('****')
conf_flag = False
return conf_flag
def check_options(options):
"Check if the command line options are valid"
result_flag = True
if options.test_cases_conf is not None:
result_flag = check_file_exists(os.path.abspath(os.path.join(os.path.dirname(__file__),'..','conf',options.test_cases_conf)))
return result_flag
def save_new_test_run_details(filename,test_run_name,test_run_id):
"Write out latest test run name and id"
fp = open(filename,'w')
fp.write('TEST_RUN_NAME=%s\n'%test_run_name)
fp.write('TEST_RUN_ID=%s\n'%str(test_run_id))
fp.close()
def setup_testrail(project_name='POM DEMO',milestone_name=None,test_run_name=None,test_cases_conf=None,description=None,name_override_flag='N',case_ids_list=None):
"Setup TestRail for an automated run"
#1. Get project id
#2. if milestone_name is not None
# create the milestone if it does not already exist
#3. if test_run_name is not None
# create the test run if it does not already exist
# TO DO: if test_cases_conf is not None -> pass ids as parameters
#4. write out test runid to latest_test_run.txt
conf_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','conf'))
config = ConfigParser.ConfigParser()
tr_obj = Test_Rail()
#1. Get project id
project_id = tr_obj.get_project_id(project_name)
if project_id is not None: #i.e., the project exists
#2. if milestone_name is not None:
# create the milestone if it does not already exist
if milestone_name is not None:
tr_obj.create_milestone(project_name,milestone_name)
#3. if test_run_name is not None
# create the test run if it does not already exist
# if test_cases_conf is not None -> pass ids as parameters
if test_run_name is not None:
case_ids = []
#Set the case ids
if case_ids_list is not None:
#Getting case ids from command line
case_ids = case_ids_list.split(',')
else:
#Getting case ids based on given description(test name)
if description is not None:
if check_file_exists(os.path.join(conf_dir,test_cases_conf)):
config.read(os.path.join(conf_dir,test_cases_conf))
case_ids = config.get(description,'case_ids')
case_ids = case_ids.split(',')
#Set test_run_name
if name_override_flag.lower() == 'y':
test_run_name = test_run_name + "-" + time.strftime("%d/%m/%Y/%H:%M:%S") + "_for_"
#Use description as test_run_name
if description is None:
test_run_name = test_run_name + "All"
else:
test_run_name = test_run_name + str(description)
tr_obj.create_test_run(project_name,test_run_name,milestone_name=milestone_name,case_ids=case_ids,description=description)
run_id = tr_obj.get_run_id(project_name,test_run_name)
save_new_test_run_details(os.path.join(conf_dir,'latest_test_run.txt'),test_run_name,run_id)
else:
print('Project does not exist: ',project_name)
print('Stopping the script without doing anything.')
#---START OF SCRIPT
if __name__=='__main__':
#This script takes an optional command line argument for the TestRail run id
usage = '\n----\n%prog -p <OPTIONAL: Project name> -m <OPTIONAL: milestone_name> -r <OPTIONAL: Test run name> -t <OPTIONAL: test cases conf file> -d <OPTIONAL: Test run description>\n----\nE.g.: %prog -p "Secure Code Warrior - Test" -m "Pilot NetCetera" -r commit_id -t setup_testrail.conf -d Registration\n---'
parser = OptionParser(usage=usage)
parser.add_option("-p","--project",
dest="project_name",
default="POM DEMO",
help="Project name")
parser.add_option("-m","--milestone",
dest="milestone_name",
default=None,
help="Milestone name")
parser.add_option("-r","--test_run_name",
dest="test_run_name",
default=None,
help="Test run name")
parser.add_option("-t","--test_cases_conf",
dest="test_cases_conf",
default="setup_testrail.conf",
help="Test cases conf listing test names and ids you want added")
parser.add_option("-d","--test_run_description",
dest="test_run_description",
default=None,
help="The name of the test Registration_Tests/Intro_Run_Tests/Sales_Demo_Tests")
parser.add_option("-n","--name_override_flag",
dest="name_override_flag",
default="Y",
help="Y or N. 'N' if you don't want to override the default test_run_name")
parser.add_option("-c","--case_ids_list",
dest="case_ids_list",
default=None,
help="Pass all case ids with comma separated you want to add in test run")
(options,args) = parser.parse_args()
#Run the script only if the options are valid
if check_options(options):
setup_testrail(project_name=options.project_name,
milestone_name=options.milestone_name,
test_run_name=options.test_run_name,
test_cases_conf=options.test_cases_conf,
description=options.test_run_description,
name_override_flag=options.name_override_flag,
case_ids_list=options.case_ids_list)
else:
print('ERROR: Received incorrect input arguments')
print(parser.print_usage())
| mit | 3ca9ebc40ad27c6e73906ebf9ce51fff | 42.702532 | 315 | 0.584794 | 3.80022 | false | true | false | false |
qxf2/qxf2-page-object-model | utils/Image_Compare.py | 1 | 4039 | """
Qxf2 Services: Utility script to compare images
* Compare two images(actual and expected) smartly and generate a resultant image
* Get the sum of colors in an image
"""
from PIL import Image, ImageChops
import math, os
def rmsdiff(im1,im2):
"Calculate the root-mean-square difference between two images"
h = ImageChops.difference(im1, im2).histogram()
# calculate rms
return math.sqrt(sum(h*(i**2) for i, h in enumerate(h)) / (float(im1.size[0]) * im1.size[1]))
def is_equal(img_actual,img_expected,result):
"Returns true if the images are identical(all pixels in the difference image are zero)"
result_flag = False
if not os.path.exists(img_actual):
print('Could not locate the generated image: %s'%img_actual)
if not os.path.exists(img_expected):
print('Could not locate the baseline image: %s'%img_expected)
if os.path.exists(img_actual) and os.path.exists(img_expected):
actual = Image.open(img_actual)
expected = Image.open(img_expected)
result_image = ImageChops.difference(actual,expected)
color_matrix = ([0] + ([255] * 255))
result_image = result_image.convert('L')
result_image = result_image.point(color_matrix)
result_image.save(result)#Save the result image
if (ImageChops.difference(actual,expected).getbbox() is None):
result_flag = True
else:
#Let's do some interesting processing now
result_flag = analyze_difference_smartly(result)
if result_flag is False:
print("Since there is a difference in pixel value of both images, we are checking the threshold value to pass the images with minor difference")
#Now with threshhold!
result_flag = True if rmsdiff(actual,expected) < 958 else False
#For temporary debug purposes
print('RMS diff score: ',rmsdiff(actual,expected))
return result_flag
def analyze_difference_smartly(img):
"Make an evaluation of a difference image"
result_flag = False
if not os.path.exists(img):
print('Could not locate the image to analyze the difference smartly: %s'%img)
else:
my_image = Image.open(img)
#Not an ideal line, but we dont have any enormous images
pixels = list(my_image.getdata())
pixels = [1 for x in pixels if x!=0]
num_different_pixels = sum(pixels)
print('Number of different pixels in the result image: %d'%num_different_pixels)
#Rule 1: If the number of different pixels is <10, then pass the image
#This is relatively safe since all changes to objects will be more than 10 different pixels
if num_different_pixels < 10:
result_flag = True
return result_flag
def get_color_sum(img):
"Get the sum of colors in an image"
sum_color_pixels = -1
if not os.path.exists(img):
print('Could not locate the image to sum the colors: %s'%actual)
else:
my_image = Image.open(img)
color_matrix = ([0] + ([255] * 255))
my_image = my_image.convert('L')
my_image = my_image.point(color_matrix)
#Not an ideal line, but we don't have any enormous images
pixels = list(my_image.getdata())
sum_color_pixels = sum(pixels)
print('Sum of colors in the image %s is %d'%(img,sum_color_pixels))
return sum_color_pixels
#--START OF SCRIPT
if __name__=='__main__':
# Please update below img1, img2, result_img values before running this script
img1 = r'Add path of first image'
img2 = r'Add path of second image'
result_img= r'Add path of result image' #please add path along with resultant image name which you want
# Compare images and generate a resultant difference image
result_flag = is_equal(img1,img2,result_img)
if (result_flag == True):
print("Both images are matching")
else:
print("Images are not matching")
# Get the sum of colors in an image
get_color_sum(img1)
| mit | ba932bdf9817d919f09847cf1555a52d | 37.836538 | 160 | 0.653132 | 3.813975 | false | false | false | false |
cebrusfs/217gdb | pwndbg/abi.py | 3 | 6452 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import re
import gdb
import pwndbg.arch
import pwndbg.color.message as M
class ABI(object):
"""
Encapsulates information about a calling convention.
"""
#: List or registers which should be filled with arguments before
#: spilling onto the stack.
register_arguments = []
#: Minimum alignment of the stack.
#: The value used is min(context.bytes, stack_alignment)
#: This is necessary as Windows x64 frames must be 32-byte aligned.
#: "Alignment" is considered with respect to the last argument on the stack.
arg_alignment = 1
#: Minimum number of stack slots used by a function call
#: This is necessary as Windows x64 requires using 4 slots on the stack
stack_minimum = 0
#: Indicates that this ABI returns to the next address on the slot
returns = True
def __init__(self, regs, align, minimum):
self.register_arguments = regs
self.arg_alignment = align
self.stack_minimum = minimum
@staticmethod
def default():
return {
(32, 'i386', 'linux'): linux_i386,
(64, 'x86-64', 'linux'): linux_amd64,
(64, 'aarch64', 'linux'): linux_aarch64,
(32, 'arm', 'linux'): linux_arm,
(32, 'thumb', 'linux'): linux_arm,
(32, 'mips', 'linux'): linux_mips,
(32, 'powerpc', 'linux'): linux_ppc,
(64, 'powerpc', 'linux'): linux_ppc64,
}[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]
@staticmethod
def syscall():
return {
(32, 'i386', 'linux'): linux_i386_syscall,
(64, 'x86-64', 'linux'): linux_amd64_syscall,
(64, 'aarch64', 'linux'): linux_aarch64_syscall,
(32, 'arm', 'linux'): linux_arm_syscall,
(32, 'thumb', 'linux'): linux_arm_syscall,
(32, 'mips', 'linux'): linux_mips_syscall,
(32, 'powerpc', 'linux'): linux_ppc_syscall,
(64, 'powerpc', 'linux'): linux_ppc64_syscall,
}[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]
@staticmethod
def sigreturn():
return {
(32, 'i386', 'linux'): linux_i386_sigreturn,
(64, 'x86-64', 'linux'): linux_amd64_sigreturn,
(32, 'arm', 'linux'): linux_arm_sigreturn,
(32, 'thumb', 'linux'): linux_arm_sigreturn,
}[(8*pwndbg.arch.ptrsize, pwndbg.arch.current, 'linux')]
class SyscallABI(ABI):
"""
The syscall ABI treats the syscall number as the zeroth argument,
which must be loaded into the specified register.
"""
def __init__(self, register_arguments, *a, **kw):
self.syscall_register = register_arguments.pop(0)
super(SyscallABI, self).__init__(register_arguments, *a, **kw)
class SigreturnABI(SyscallABI):
"""
The sigreturn ABI is similar to the syscall ABI, except that
both PC and SP are loaded from the stack. Because of this, there
is no 'return' slot necessary on the stack.
"""
returns = False
linux_i386 = ABI([], 4, 0)
linux_amd64 = ABI(['rdi','rsi','rdx','rcx','r8','r9'], 8, 0)
linux_arm = ABI(['r0', 'r1', 'r2', 'r3'], 8, 0)
linux_aarch64 = ABI(['x0', 'x1', 'x2', 'x3'], 16, 0)
linux_mips = ABI(['$a0','$a1','$a2','$a3'], 4, 0)
linux_ppc = ABI(['r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'], 4, 0)
linux_ppc64 = ABI(['r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'], 8, 0)
linux_i386_syscall = SyscallABI(['eax', 'ebx', 'ecx', 'edx', 'esi', 'edi', 'ebp'], 4, 0)
linux_amd64_syscall = SyscallABI(['rax','rdi', 'rsi', 'rdx', 'r10', 'r8', 'r9'], 8, 0)
linux_arm_syscall = SyscallABI(['r7', 'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6'], 4, 0)
linux_aarch64_syscall = SyscallABI(['x8', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6'], 16, 0)
linux_mips_syscall = SyscallABI(['$v0', '$a0','$a1','$a2','$a3'], 4, 0)
linux_ppc_syscall = ABI(['r0', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9'], 4, 0)
linux_ppc64_syscall = ABI(['r0', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9'], 8, 0)
linux_i386_sigreturn = SigreturnABI(['eax'], 4, 0)
linux_amd64_sigreturn = SigreturnABI(['rax'], 4, 0)
linux_arm_sigreturn = SigreturnABI(['r7'], 4, 0)
# Fake ABIs used by SROP
linux_i386_srop = ABI(['eax'], 4, 0)
linux_amd64_srop = ABI(['rax'], 4, 0)
linux_arm_srop = ABI(['r7'], 4, 0)
@pwndbg.events.start
def update():
global abi
global linux
# Detect current ABI of client side by 'show osabi'
#
# Examples of strings returned by `show osabi`:
# 'The current OS ABI is "auto" (currently "GNU/Linux").\nThe default OS ABI is "GNU/Linux".\n'
# 'The current OS ABI is "GNU/Linux".\nThe default OS ABI is "GNU/Linux".\n'
# 'El actual SO ABI es «auto» (actualmente «GNU/Linux»).\nEl SO ABI predeterminado es «GNU/Linux».\n'
# 'The current OS ABI is "auto" (currently "none")'
#
# As you can see, there might be GDBs with different language versions
# and so we have to support it there too.
# Lets assume and hope that `current osabi` is returned in first line in all languages...
abi = gdb.execute('show osabi', to_string=True).split('\n')[0]
# Currently we support those osabis:
# 'GNU/Linux': linux
# 'none': bare metal
linux = 'GNU/Linux' in abi
if not linux:
msg = M.warn(
"The bare metal debugging is enabled since the gdb's osabi is '%s' which is not 'GNU/Linux'.\n"
"Ex. the page resolving and memory de-referencing ONLY works on known pages.\n"
"This option is based ib gdb client compile arguments (by default) and will be corrected if you load an ELF which has the '.note.ABI-tag' section.\n"
"If you are debuging a program that runs on Linux ABI, please select the correct gdb client."
% abi
)
print(msg)
def LinuxOnly(default=None):
"""Create a decorator that the function will be called when ABI is Linux.
Otherwise, return `default`.
"""
def decorator(func):
@functools.wraps(func)
def caller(*args, **kwargs):
if linux:
return func(*args, **kwargs)
else:
return default
return caller
return decorator
# Update when starting the gdb to show warning message for non-Linux ABI user.
update()
| mit | e72b60bb9ee973f911cf59d786ef141e | 36.045977 | 161 | 0.596649 | 3.07979 | false | false | false | false |
cebrusfs/217gdb | pwndbg/color/__init__.py | 3 | 2813 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import pwndbg.memoize
from . import theme as theme
NORMAL = "\x1b[0m"
BLACK = "\x1b[30m"
RED = "\x1b[31m"
GREEN = "\x1b[32m"
YELLOW = "\x1b[33m"
BLUE = "\x1b[34m"
PURPLE = "\x1b[35m"
CYAN = "\x1b[36m"
LIGHT_GREY = LIGHT_GRAY = "\x1b[37m"
FOREGROUND = "\x1b[39m"
GREY = GRAY = "\x1b[90m"
LIGHT_RED = "\x1b[91m"
LIGHT_GREEN = "\x1b[92m"
LIGHT_YELLOW = "\x1b[93m"
LIGHT_BLUE = "\x1b[94m"
LIGHT_PURPLE = "\x1b[95m"
LIGHT_CYAN = "\x1b[96m"
WHITE = "\x1b[97m"
BOLD = "\x1b[1m"
UNDERLINE = "\x1b[4m"
def none(x): return str(x)
def normal(x): return colorize(x, NORMAL)
def black(x): return colorize(x, BLACK)
def red(x): return colorize(x, RED)
def green(x): return colorize(x, GREEN)
def yellow(x): return colorize(x, YELLOW)
def blue(x): return colorize(x, BLUE)
def purple(x): return colorize(x, PURPLE)
def cyan(x): return colorize(x, CYAN)
def light_gray(x): return colorize(x, LIGHT_GRAY)
def foreground(x): return colorize(x, FOREGROUND)
def gray(x): return colorize(x, GRAY)
def light_red(x): return colorize(x, LIGHT_RED)
def light_green(x): return colorize(x, LIGHT_GREEN)
def light_yellow(x): return colorize(x, LIGHT_YELLOW)
def light_blue(x): return colorize(x, LIGHT_BLUE)
def light_purple(x): return colorize(x, LIGHT_PURPLE)
def light_cyan(x): return colorize(x, LIGHT_CYAN)
def white(x): return colorize(x, WHITE)
def bold(x): return colorize(x, BOLD)
def underline(x): return colorize(x, UNDERLINE)
def colorize(x, color): return color + terminateWith(str(x), color) + NORMAL
disable_colors = theme.Parameter('disable-colors', bool(os.environ.get('PWNDBG_DISABLE_COLORS')), 'whether to color the output or not')
@pwndbg.memoize.reset_on_stop
def generateColorFunctionInner(old, new):
def wrapper(text):
return new(old(text))
return wrapper
def generateColorFunction(config):
function = lambda x: x
if disable_colors:
return function
for color in config.split(','):
function = generateColorFunctionInner(function, globals()[color.lower().replace('-', '_')])
return function
def strip(x):
return re.sub('\x1b\\[\d+m', '', x)
def terminateWith(x, color):
return re.sub('\x1b\\[0m', NORMAL + color, x)
def ljust_colored(x, length, char=' '):
remaining = length - len(strip(x))
return x + ((remaining // len(char) + 1) * char)[:remaining]
def rjust_colored(x, length, char=' '):
remaining = length - len(strip(x))
return ((remaining // len(char) + 1) * char)[:remaining] + x
| mit | 34e277f69afc5ffa9235ad8777ac0284 | 29.576087 | 135 | 0.648774 | 2.850051 | false | false | false | false |
cebrusfs/217gdb | pwndbg/net.py | 5 | 7301 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Re-implements some psutil functionality to be able to get information from
remote debugging sessions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import binascii
import socket
import pwndbg.arch
import pwndbg.file
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
TCP_STATUSES = {
"01": "established",
"02": "syn_sent",
"03": "syn_recv",
"04": "fin_wait1",
"05": "fin_wait2",
"06": "time_wait",
"07": "close",
"08": "close_wait",
"09": "last_ack",
"0A": "listen",
"0B": "closing",
}
class inode(object):
inode = None
class Connection(inode):
rhost = None
lhost = None
rport = None
lport = None
inode = None
status = None
family = None
def __str__(self):
return "%s %s:%s => %s:%s (%s)" % (self.family,
self.lhost,
self.lport,
self.rhost,
self.rport,
self.status)
def __repr__(self):
return "Connection(\"%s\")" % self
class UnixSocket(inode):
path = "(anonymous)"
def __str__(self):
return "unix %r" % self.path
def __repr__(self):
return "UnixSocket(%s)" % self
def tcp():
# For reference, see:
# https://www.kernel.org/doc/Documentation/networking/proc_net_tcp.txt
"""
It will first list all listening TCP sockets, and next list all established
TCP connections. A typical entry of /proc/net/tcp would look like this (split
up into 3 parts because of the length of the line):
"""
data = pwndbg.file.get("/proc/net/tcp").decode()
if not data:
return []
result = []
for line in data.splitlines()[1:]:
fields = line.split()
"""
46: 010310AC:9C4C 030310AC:1770 01
| | | | | |--> connection state
| | | | |------> remote TCP port number
| | | |-------------> remote IPv4 address
| | |--------------------> local TCP port number
| |---------------------------> local IPv4 address
|----------------------------------> number of entry
"""
local = fields[1]
remote = fields[2]
status = fields[3]
"""
00000150:00000000 01:00000019 00000000
| | | | |--> number of unrecovered RTO timeouts
| | | |----------> number of jiffies until timer expires
| | |----------------> timer_active (see below)
| |----------------------> receive-queue
|-------------------------------> transmit-queue
"""
"""
1000 0 54165785 4 cd1e6040 25 4 27 3 -1
| | | | | | | | | |--> slow start size threshold,
| | | | | | | | | or -1 if the threshold
| | | | | | | | | is >= 0xFFFF
| | | | | | | | |----> sending congestion window
| | | | | | | |-------> (ack.quick<<1)|ack.pingpong
| | | | | | |---------> Predicted tick of soft clock
| | | | | | (delayed ACK control data)
| | | | | |------------> retransmit timeout
| | | | |------------------> location of socket in memory
| | | |-----------------------> socket reference count
| | |-----------------------------> inode
| |----------------------------------> unanswered 0-window probes
|---------------------------------------------> uid
"""
inode = fields[9]
# Actually extract the useful data
def split_hist_port(hostport):
host, port = hostport.split(':')
host = binascii.unhexlify(host)
if pwndbg.arch.endian == 'little':
host = host[::-1]
host = socket.inet_ntop(socket.AF_INET, host)
port = int(port, 16)
return host, port
c = Connection()
c.rhost, c.rport = split_hist_port(remote)
c.lhost, c.lport = split_hist_port(local)
c.inode = int(inode)
c.status = TCP_STATUSES.get(status, 'unknown')
c.family = "tcp"
result.append(c)
return result
def unix():
data = pwndbg.file.get("/proc/net/unix").decode()
if not data:
return []
result = []
for line in data.splitlines()[1:]:
"""
Num RefCount Protocol Flags Type St Inode Path
0000000000000000: 00000002 00000000 00010000 0005 01 1536 /dev/socket/msm_irqbalance
"""
fields = line.split(None, 7)
u = UnixSocket()
if len(fields) >= 8:
u.path = fields[7]
u.inode = int(fields[6])
result.append(u)
return result
NETLINK_TYPES = {
0 : "NETLINK_ROUTE", # /* Routing/device hook */
1 : "NETLINK_UNUSED", # /* Unused number */
2 : "NETLINK_USERSOCK", # /* Reserved for user mode socket protocols */
3 : "NETLINK_FIREWALL", # /* Unused number", formerly ip_queue */
4 : "NETLINK_SOCK_DIAG", # /* socket monitoring */
5 : "NETLINK_NFLOG", # /* netfilter/iptables ULOG */
6 : "NETLINK_XFRM", # /* ipsec */
7 : "NETLINK_SELINUX", # /* SELinux event notifications */
8 : "NETLINK_ISCSI", # /* Open-iSCSI */
9 : "NETLINK_AUDIT", # /* auditing */
10: "NETLINK_FIB_LOOKUP", #
11: "NETLINK_CONNECTOR", #
12: "NETLINK_NETFILTER", # /* netfilter subsystem */
13: "NETLINK_IP6_FW", #
14: "NETLINK_DNRTMSG", # /* DECnet routing messages */
15: "NETLINK_KOBJECT_UEVENT", # /* Kernel messages to userspace */
16: "NETLINK_GENERIC", #
18: "NETLINK_SCSITRANSPORT", # /* SCSI Transports */
19: "NETLINK_ECRYPTFS", #
20: "NETLINK_RDMA", #
21: "NETLINK_CRYPTO", # /* Crypto layer */
}
class Netlink(inode):
eth = 0
def __str__(self):
return NETLINK_TYPES.get(self.eth, "(unknown netlink)")
def __repr__(self):
return "Netlink(%s)" % self
def netlink():
data = pwndbg.file.get("/proc/net/netlink").decode()
if not data:
return []
result = []
for line in data.splitlines()[1:]:
# sk Eth Pid Groups Rmem Wmem Dump Locks Drops Inode [10/8747]
fields = line.split()
n = Netlink()
n.eth = int(fields[1])
n.pid = int(fields[2])
n.inode = int(fields[9])
result.append(n)
return result
| mit | a2290c456c5e53d33e8d834e863b91ed | 32.95814 | 112 | 0.444597 | 3.780943 | false | false | false | false |
cebrusfs/217gdb | pwndbg/commands/dumpargs.py | 1 | 1781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import pwndbg.arguments
import pwndbg.chain
import pwndbg.commands
import pwndbg.commands.telescope
import pwndbg.disasm
parser = argparse.ArgumentParser(
description='Prints determined arguments for call instruction.'
)
parser.add_argument('-f', '--force', action='store_true', help='Force displaying of all arguments.')
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def dumpargs(force=False):
args = (not force and call_args()) or all_args()
if args:
print('\n'.join(args))
else:
print("Couldn't resolve call arguments from registers.")
print("Detected ABI: {} ({} bit) either doesn't pass arguments through registers or is not implemented. Maybe they are passed on the stack?".format(pwndbg.arch.current, pwndbg.arch.ptrsize*8))
def call_args():
"""
Returns list of resolved call argument strings for display.
Attempts to resolve the target and determine the number of arguments.
Should be used only when being on a call instruction.
"""
results = []
for arg, value in pwndbg.arguments.get(pwndbg.disasm.one()):
code = False if arg.type == 'char' else True
pretty = pwndbg.chain.format(value, code=code)
results.append(' %-10s %s' % (arg.name+':', pretty))
return results
def all_args():
"""
Returns list of all argument strings for display.
"""
results = []
for name, value in pwndbg.arguments.arguments():
results.append('%4s = %s' % (name, pwndbg.chain.format(value)))
return results
| mit | e87f32f7c3569300b930f40ce1861989 | 29.186441 | 200 | 0.685008 | 3.765328 | false | false | false | false |
cebrusfs/217gdb | pwndbg/android.py | 4 | 2776 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gdb
import pwndbg.color.message as message
import pwndbg.events
import pwndbg.file
import pwndbg.memoize
import pwndbg.remote
@pwndbg.memoize.reset_on_start
@pwndbg.memoize.reset_on_exit
def is_android():
try:
if pwndbg.file.get('/system/etc/hosts'):
return True
except OSError:
pass
return False
@pwndbg.events.start
def sysroot():
cmd = 'set sysroot remote:/'
if is_android():
if gdb.parameter('sysroot') == 'target:':
gdb.execute(cmd)
else:
print(message.notice("sysroot is already set, skipping %r" % cmd))
KNOWN_AIDS = {
0: "AID_ROOT",
1000: "AID_SYSTEM",
1001: "AID_RADIO",
1002: "AID_BLUETOOTH",
1003: "AID_GRAPHICS",
1004: "AID_INPUT",
1005: "AID_AUDIO",
1006: "AID_CAMERA",
1007: "AID_LOG",
1008: "AID_COMPASS",
1009: "AID_MOUNT",
1010: "AID_WIFI",
1011: "AID_ADB",
1012: "AID_INSTALL",
1013: "AID_MEDIA",
1014: "AID_DHCP",
1015: "AID_SDCARD_RW",
1016: "AID_VPN",
1017: "AID_KEYSTORE",
1018: "AID_USB",
1019: "AID_DRM",
1020: "AID_MDNSR",
1021: "AID_GPS",
1022: "AID_UNUSED1",
1023: "AID_MEDIA_RW",
1024: "AID_MTP",
1025: "AID_UNUSED2",
1026: "AID_DRMRPC",
1027: "AID_NFC",
1028: "AID_SDCARD_R",
1029: "AID_CLAT",
1030: "AID_LOOP_RADIO",
1031: "AID_MEDIA_DRM",
1032: "AID_PACKAGE_INFO",
1033: "AID_SDCARD_PICS",
1034: "AID_SDCARD_AV",
1035: "AID_SDCARD_ALL",
1036: "AID_LOGD",
1037: "AID_SHARED_RELRO",
1038: "AID_DBUS",
1039: "AID_TLSDATE",
1040: "AID_MEDIA_EX",
1041: "AID_AUDIOSERVER",
1042: "AID_METRICS_COLL",
1043: "AID_METRICSD",
1044: "AID_WEBSERV",
1045: "AID_DEBUGGERD",
1046: "AID_MEDIA_CODEC",
1047: "AID_CAMERASERVER",
1048: "AID_FIREWALL",
1049: "AID_TRUNKS",
1050: "AID_NVRAM",
2001: "AID_CACHE",
2002: "AID_DIAG",
2900: "AID_OEM_RESERVED_START",
2999: "AID_OEM_RESERVED_END",
3001: "AID_NET_BT_ADMIN",
3002: "AID_NET_BT",
3003: "AID_INET",
3004: "AID_NET_RAW",
3005: "AID_NET_ADMIN",
3006: "AID_NET_BW_STATS",
3007: "AID_NET_BW_ACCT",
3008: "AID_NET_BT_STACK",
3009: "AID_READPROC",
3010: "AID_WAKELOCK",
5000: "AID_OEM_RESERVED_2_START",
5999: "AID_OEM_RESERVED_2_END",
9997: "AID_EVERYBODY",
9998: "AID_MISC",
9999: "AID_NOBODY",
10000: "AID_APP",
50000: "AID_SHARED_GID_START",
59999: "AID_SHARED_GID_END",
99000: "AID_ISOLATED_START",
99999: "AID_ISOLATED_END",
100000: "AID_USER",
}
def aid_name(uid):
if uid in KNOWN_AIDS:
return KNOWN_AIDS[uid]
for closest in sorted(KNOWN_AIDS, reverse=True):
if uid > closest:
break
else:
return str(uid)
return "%s+%s" % (KNOWN_AIDS[closest], uid-closest)
| mit | d278de10429e3260ee39dca282a3bd06 | 20.858268 | 78 | 0.648415 | 2.236906 | false | false | false | false |
python-attrs/cattrs | tests/test_factory_hooks.py | 1 | 1560 | """Tests for the factory hooks documentation."""
from attr import define, fields, has
from cattrs.gen import make_dict_structure_fn, make_dict_unstructure_fn, override
def to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
def test_snake_to_camel(converter_cls):
@define
class Inner:
a_snake_case_int: int
a_snake_case_float: float
a_snake_case_str: str
@define
class Outer:
a_snake_case_inner: Inner
converter = converter_cls()
def unstructure_adapt_to_camel_case(type):
return make_dict_unstructure_fn(
type,
converter,
**{a.name: override(rename=to_camel_case(a.name)) for a in fields(type)}
)
converter.register_unstructure_hook_factory(has, unstructure_adapt_to_camel_case)
original = Outer(Inner(0, 0.0, "str"))
unstructured = converter.unstructure(original)
assert unstructured == {
"aSnakeCaseInner": {
"aSnakeCaseInt": 0,
"aSnakeCaseFloat": 0.0,
"aSnakeCaseStr": "str",
}
}
def structure_adapt_to_camel_case(type):
overrides = {
a.name: override(rename=to_camel_case(a.name)) for a in fields(type)
}
return make_dict_structure_fn(type, converter, **overrides)
converter.register_structure_hook_factory(has, structure_adapt_to_camel_case)
structured = converter.structure(unstructured, Outer)
assert structured == original
| mit | c417999ea709853456448a7d95a6e44a | 27.888889 | 85 | 0.633333 | 3.577982 | false | false | false | false |
python-attrs/cattrs | src/cattrs/converters.py | 1 | 37598 | from collections import Counter
from collections.abc import MutableSet as AbcMutableSet
from dataclasses import Field
from enum import Enum
from functools import lru_cache
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
NoReturn,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from attr import Attribute
from attr import has as attrs_has
from attr import resolve_types
from cattrs.errors import IterableValidationError, StructureHandlerNotFoundError
from ._compat import (
FrozenSetSubscriptable,
Mapping,
MutableMapping,
MutableSequence,
OriginAbstractSet,
OriginMutableSet,
Sequence,
Set,
fields,
get_newtype_base,
get_origin,
has,
has_with_generic,
is_annotated,
is_bare,
is_counter,
is_frozenset,
is_generic,
is_generic_attrs,
is_hetero_tuple,
is_literal,
is_mapping,
is_mutable_set,
is_protocol,
is_sequence,
is_tuple,
is_union_type,
)
from .disambiguators import create_uniq_field_dis_func
from .dispatch import MultiStrategyDispatch
from .gen import (
AttributeOverride,
DictStructureFn,
HeteroTupleUnstructureFn,
IterableUnstructureFn,
MappingStructureFn,
MappingUnstructureFn,
make_dict_structure_fn,
make_dict_unstructure_fn,
make_hetero_tuple_unstructure_fn,
make_iterable_unstructure_fn,
make_mapping_structure_fn,
make_mapping_unstructure_fn,
)
NoneType = type(None)
T = TypeVar("T")
V = TypeVar("V")
class UnstructureStrategy(Enum):
"""`attrs` classes unstructuring strategies."""
AS_DICT = "asdict"
AS_TUPLE = "astuple"
def _subclass(typ: Type) -> Callable[[Type], bool]:
"""a shortcut"""
return lambda cls: issubclass(cls, typ)
def is_attrs_union(typ: Type) -> bool:
return is_union_type(typ) and all(has(get_origin(e) or e) for e in typ.__args__)
def is_attrs_union_or_none(typ: Type) -> bool:
return is_union_type(typ) and all(
e is NoneType or has(get_origin(e) or e) for e in typ.__args__
)
def is_optional(typ: Type) -> bool:
return is_union_type(typ) and NoneType in typ.__args__ and len(typ.__args__) == 2
def is_literal_containing_enums(typ: Type) -> bool:
return is_literal(typ) and any(isinstance(val, Enum) for val in typ.__args__)
class BaseConverter:
"""Converts between structured and unstructured data."""
__slots__ = (
"_dis_func_cache",
"_unstructure_func",
"_unstructure_attrs",
"_structure_attrs",
"_dict_factory",
"_union_struct_registry",
"_structure_func",
"_prefer_attrib_converters",
"detailed_validation",
)
def __init__(
self,
dict_factory: Callable[[], Any] = dict,
unstruct_strat: UnstructureStrategy = UnstructureStrategy.AS_DICT,
prefer_attrib_converters: bool = False,
detailed_validation: bool = True,
) -> None:
unstruct_strat = UnstructureStrategy(unstruct_strat)
self._prefer_attrib_converters = prefer_attrib_converters
self.detailed_validation = detailed_validation
# Create a per-instance cache.
if unstruct_strat is UnstructureStrategy.AS_DICT:
self._unstructure_attrs = self.unstructure_attrs_asdict
self._structure_attrs = self.structure_attrs_fromdict
else:
self._unstructure_attrs = self.unstructure_attrs_astuple
self._structure_attrs = self.structure_attrs_fromtuple
self._dis_func_cache = lru_cache()(self._get_dis_func)
self._unstructure_func = MultiStrategyDispatch(self._unstructure_identity)
self._unstructure_func.register_cls_list(
[(bytes, self._unstructure_identity), (str, self._unstructure_identity)]
)
self._unstructure_func.register_func_list(
[
(
is_protocol,
lambda o: self.unstructure(o, unstructure_as=o.__class__),
),
(is_mapping, self._unstructure_mapping),
(is_sequence, self._unstructure_seq),
(is_mutable_set, self._unstructure_seq),
(is_frozenset, self._unstructure_seq),
(_subclass(Enum), self._unstructure_enum),
(has, self._unstructure_attrs),
(is_union_type, self._unstructure_union),
]
)
# Per-instance register of to-attrs converters.
# Singledispatch dispatches based on the first argument, so we
# store the function and switch the arguments in self.loads.
self._structure_func = MultiStrategyDispatch(BaseConverter._structure_error)
self._structure_func.register_func_list(
[
(lambda cl: cl is Any or cl is Optional or cl is None, lambda v, _: v),
(is_generic_attrs, self._gen_structure_generic, True),
(lambda t: get_newtype_base(t) is not None, self._structure_newtype),
(is_literal, self._structure_simple_literal),
(is_literal_containing_enums, self._structure_enum_literal),
(is_sequence, self._structure_list),
(is_mutable_set, self._structure_set),
(is_frozenset, self._structure_frozenset),
(is_tuple, self._structure_tuple),
(is_mapping, self._structure_dict),
(is_attrs_union_or_none, self._gen_attrs_union_structure, True),
(
lambda t: is_union_type(t) and t in self._union_struct_registry,
self._structure_union,
),
(is_optional, self._structure_optional),
(has, self._structure_attrs),
]
)
# Strings are sequences.
self._structure_func.register_cls_list(
[
(str, self._structure_call),
(bytes, self._structure_call),
(int, self._structure_call),
(float, self._structure_call),
(Enum, self._structure_call),
]
)
self._dict_factory = dict_factory
# Unions are instances now, not classes. We use different registries.
self._union_struct_registry: Dict[Any, Callable[[Any, Type[T]], T]] = {}
def unstructure(self, obj: Any, unstructure_as: Any = None) -> Any:
return self._unstructure_func.dispatch(
obj.__class__ if unstructure_as is None else unstructure_as
)(obj)
@property
def unstruct_strat(self) -> UnstructureStrategy:
"""The default way of unstructuring ``attrs`` classes."""
return (
UnstructureStrategy.AS_DICT
if self._unstructure_attrs == self.unstructure_attrs_asdict
else UnstructureStrategy.AS_TUPLE
)
def register_unstructure_hook(self, cls: Any, func: Callable[[Any], Any]) -> None:
"""Register a class-to-primitive converter function for a class.
The converter function should take an instance of the class and return
its Python equivalent.
"""
if attrs_has(cls):
resolve_types(cls)
if is_union_type(cls):
self._unstructure_func.register_func_list([(lambda t: t == cls, func)])
elif get_newtype_base(cls) is not None:
# This is a newtype, so we handle it specially.
self._unstructure_func.register_func_list([(lambda t: t is cls, func)])
else:
self._unstructure_func.register_cls_list([(cls, func)])
def register_unstructure_hook_func(
self, check_func: Callable[[Any], bool], func: Callable[[Any], Any]
) -> None:
"""Register a class-to-primitive converter function for a class, using
a function to check if it's a match.
"""
self._unstructure_func.register_func_list([(check_func, func)])
def register_unstructure_hook_factory(
self,
predicate: Callable[[Any], bool],
factory: Callable[[Any], Callable[[Any], Any]],
) -> None:
"""
Register a hook factory for a given predicate.
A predicate is a function that, given a type, returns whether the factory
can produce a hook for that type.
A factory is a callable that, given a type, produces an unstructuring
hook for that type. This unstructuring hook will be cached.
"""
self._unstructure_func.register_func_list([(predicate, factory, True)])
def register_structure_hook(
self, cl: Any, func: Callable[[Any, Type[T]], T]
) -> None:
"""Register a primitive-to-class converter function for a type.
The converter function should take two arguments:
* a Python object to be converted,
* the type to convert to
and return the instance of the class. The type may seem redundant, but
is sometimes needed (for example, when dealing with generic classes).
"""
if attrs_has(cl):
resolve_types(cl)
if is_union_type(cl):
self._union_struct_registry[cl] = func
self._structure_func.clear_cache()
elif get_newtype_base(cl) is not None:
# This is a newtype, so we handle it specially.
self._structure_func.register_func_list([(lambda t: t is cl, func)])
else:
self._structure_func.register_cls_list([(cl, func)])
def register_structure_hook_func(
self, check_func: Callable[[Type[T]], bool], func: Callable[[Any, Type[T]], T]
) -> None:
"""Register a class-to-primitive converter function for a class, using
a function to check if it's a match.
"""
self._structure_func.register_func_list([(check_func, func)])
def register_structure_hook_factory(
self,
predicate: Callable[[Any], bool],
factory: Callable[[Any], Callable[[Any, Any], Any]],
) -> None:
"""
Register a hook factory for a given predicate.
A predicate is a function that, given a type, returns whether the factory
can produce a hook for that type.
A factory is a callable that, given a type, produces a structuring
hook for that type. This structuring hook will be cached.
"""
self._structure_func.register_func_list([(predicate, factory, True)])
def structure(self, obj: Any, cl: Type[T]) -> T:
"""Convert unstructured Python data structures to structured data."""
return self._structure_func.dispatch(cl)(obj, cl)
# Classes to Python primitives.
def unstructure_attrs_asdict(self, obj: Any) -> Dict[str, Any]:
"""Our version of `attrs.asdict`, so we can call back to us."""
attrs = fields(obj.__class__)
dispatch = self._unstructure_func.dispatch
rv = self._dict_factory()
for a in attrs:
name = a.name
v = getattr(obj, name)
rv[name] = dispatch(a.type or v.__class__)(v)
return rv
def unstructure_attrs_astuple(self, obj: Any) -> Tuple[Any, ...]:
"""Our version of `attrs.astuple`, so we can call back to us."""
attrs = fields(obj.__class__)
dispatch = self._unstructure_func.dispatch
res = list()
for a in attrs:
name = a.name
v = getattr(obj, name)
res.append(dispatch(a.type or v.__class__)(v))
return tuple(res)
def _unstructure_enum(self, obj: Enum) -> Any:
"""Convert an enum to its value."""
return obj.value
@staticmethod
def _unstructure_identity(obj: T) -> T:
"""Just pass it through."""
return obj
def _unstructure_seq(self, seq: Sequence[T]) -> Sequence[T]:
"""Convert a sequence to primitive equivalents."""
# We can reuse the sequence class, so tuples stay tuples.
dispatch = self._unstructure_func.dispatch
return seq.__class__(dispatch(e.__class__)(e) for e in seq)
def _unstructure_mapping(self, mapping: Mapping[T, V]) -> Mapping[T, V]:
"""Convert a mapping of attr classes to primitive equivalents."""
# We can reuse the mapping class, so dicts stay dicts and OrderedDicts
# stay OrderedDicts.
dispatch = self._unstructure_func.dispatch
return mapping.__class__(
(dispatch(k.__class__)(k), dispatch(v.__class__)(v))
for k, v in mapping.items()
)
# note: Use UnionType when 3.11 is released as
# the behaviour of @final is changed. This would
# affect how we can support UnionType in ._compat.py
def _unstructure_union(self, obj: Any) -> Any:
"""
Unstructure an object as a union.
By default, just unstructures the instance.
"""
return self._unstructure_func.dispatch(obj.__class__)(obj)
# Python primitives to classes.
@staticmethod
def _structure_error(_, cl: Type) -> NoReturn:
"""At the bottom of the condition stack, we explode if we can't handle it."""
msg = "Unsupported type: {0!r}. Register a structure hook for " "it.".format(cl)
raise StructureHandlerNotFoundError(msg, type_=cl)
def _gen_structure_generic(self, cl: Type[T]) -> DictStructureFn[T]:
"""Create and return a hook for structuring generics."""
fn = make_dict_structure_fn(
cl, self, _cattrs_prefer_attrib_converters=self._prefer_attrib_converters
)
return fn
def _gen_attrs_union_structure(
self, cl: Any
) -> Callable[[Any, Type[T]], Optional[Type[T]]]:
"""Generate a structuring function for a union of attrs classes (and maybe None)."""
dis_fn = self._get_dis_func(cl)
has_none = NoneType in cl.__args__
if has_none:
def structure_attrs_union(obj, _):
if obj is None:
return None
return self.structure(obj, dis_fn(obj))
else:
def structure_attrs_union(obj, _):
return self.structure(obj, dis_fn(obj))
return structure_attrs_union
@staticmethod
def _structure_call(obj: Any, cl: Type[T]) -> Any:
"""Just call ``cl`` with the given ``obj``.
This is just an optimization on the ``_structure_default`` case, when
we know we can skip the ``if`` s. Use for ``str``, ``bytes``, ``enum``,
etc.
"""
return cl(obj)
@staticmethod
def _structure_simple_literal(val, type):
if val not in type.__args__:
raise Exception(f"{val} not in literal {type}")
return val
@staticmethod
def _structure_enum_literal(val, type):
vals = {(x.value if isinstance(x, Enum) else x): x for x in type.__args__}
try:
return vals[val]
except KeyError:
raise Exception(f"{val} not in literal {type}") from None
def _structure_newtype(self, val, type):
base = get_newtype_base(type)
return self._structure_func.dispatch(base)(val, base)
# Attrs classes.
def structure_attrs_fromtuple(self, obj: Tuple[Any, ...], cl: Type[T]) -> T:
"""Load an attrs class from a sequence (tuple)."""
conv_obj = [] # A list of converter parameters.
for a, value in zip(fields(cl), obj):
# We detect the type by the metadata.
converted = self._structure_attribute(a, value)
conv_obj.append(converted)
return cl(*conv_obj)
def _structure_attribute(self, a: Union[Attribute, Field], value: Any) -> Any:
"""Handle an individual attrs attribute."""
type_ = a.type
attrib_converter = getattr(a, "converter", None)
if self._prefer_attrib_converters and attrib_converter:
# A attrib converter is defined on this attribute, and prefer_attrib_converters is set
# to give these priority over registered structure hooks. So, pass through the raw
# value, which attrs will flow into the converter
return value
if type_ is None:
# No type metadata.
return value
try:
return self._structure_func.dispatch(type_)(value, type_)
except StructureHandlerNotFoundError:
if attrib_converter:
# Return the original value and fallback to using an attrib converter.
return value
else:
raise
def structure_attrs_fromdict(self, obj: Mapping[str, Any], cl: Type[T]) -> T:
"""Instantiate an attrs class from a mapping (dict)."""
# For public use.
conv_obj = {} # Start with a fresh dict, to ignore extra keys.
for a in fields(cl):
name = a.name
try:
val = obj[name]
except KeyError:
continue
if name[0] == "_":
name = name[1:]
conv_obj[name] = self._structure_attribute(a, val)
return cl(**conv_obj)
def _structure_list(self, obj: Iterable[T], cl: Any) -> List[T]:
"""Convert an iterable to a potentially generic list."""
if is_bare(cl) or cl.__args__[0] is Any:
res = [e for e in obj]
else:
elem_type = cl.__args__[0]
handler = self._structure_func.dispatch(elem_type)
if self.detailed_validation:
errors = []
res = []
ix = 0 # Avoid `enumerate` for performance.
for e in obj:
try:
res.append(handler(e, elem_type))
except Exception as e:
msg = f"Structuring {cl} @ index {ix}"
e.__notes__ = getattr(e, "__notes__", []) + [msg]
errors.append(e)
finally:
ix += 1
if errors:
raise IterableValidationError(
f"While structuring {cl!r}", errors, cl
)
else:
res = [handler(e, elem_type) for e in obj]
return res
def _structure_set(
self, obj: Iterable[T], cl: Any, structure_to: type = set
) -> Set[T]:
"""Convert an iterable into a potentially generic set."""
if is_bare(cl) or cl.__args__[0] is Any:
return structure_to(obj)
elem_type = cl.__args__[0]
handler = self._structure_func.dispatch(elem_type)
if self.detailed_validation:
errors = []
res = set()
for e in obj:
try:
res.add(handler(e, elem_type))
except Exception as exc:
msg = f"Structuring {structure_to.__name__} @ element {e!r}"
exc.__notes__ = getattr(e, "__notes__", []) + [msg]
errors.append(exc)
if errors:
raise IterableValidationError(f"While structuring {cl!r}", errors, cl)
return res if structure_to is set else structure_to(res)
elif structure_to is set:
return {handler(e, elem_type) for e in obj}
else:
return structure_to([handler(e, elem_type) for e in obj])
def _structure_frozenset(
self, obj: Iterable[T], cl: Any
) -> FrozenSetSubscriptable[T]:
"""Convert an iterable into a potentially generic frozenset."""
return self._structure_set(obj, cl, structure_to=frozenset)
def _structure_dict(self, obj: Mapping[T, V], cl: Any) -> Dict[T, V]:
"""Convert a mapping into a potentially generic dict."""
if is_bare(cl) or cl.__args__ == (Any, Any):
return dict(obj)
else:
key_type, val_type = cl.__args__
if key_type is Any:
val_conv = self._structure_func.dispatch(val_type)
return {k: val_conv(v, val_type) for k, v in obj.items()}
elif val_type is Any:
key_conv = self._structure_func.dispatch(key_type)
return {key_conv(k, key_type): v for k, v in obj.items()}
else:
key_conv = self._structure_func.dispatch(key_type)
val_conv = self._structure_func.dispatch(val_type)
return {
key_conv(k, key_type): val_conv(v, val_type) for k, v in obj.items()
}
def _structure_optional(self, obj, union):
if obj is None:
return None
union_params = union.__args__
other = union_params[0] if union_params[1] is NoneType else union_params[1]
# We can't actually have a Union of a Union, so this is safe.
return self._structure_func.dispatch(other)(obj, other)
def _structure_union(self, obj, union):
"""Deal with structuring a union."""
handler = self._union_struct_registry[union]
return handler(obj, union)
def _structure_tuple(self, obj: Any, tup: Type[T]) -> T:
"""Deal with structuring into a tuple."""
if tup in (Tuple, tuple):
tup_params = None
else:
tup_params = tup.__args__
has_ellipsis = tup_params and tup_params[-1] is Ellipsis
if tup_params is None or (has_ellipsis and tup_params[0] is Any):
# Just a Tuple. (No generic information.)
return tuple(obj)
if has_ellipsis:
# We're dealing with a homogenous tuple, Tuple[int, ...]
tup_type = tup_params[0]
conv = self._structure_func.dispatch(tup_type)
if self.detailed_validation:
errors = []
res = []
for ix, e in enumerate(obj):
try:
res.append(conv(e, tup_type))
except Exception as exc:
msg = f"Structuring {tup} @ index {ix}"
exc.__notes__ = getattr(e, "__notes__", []) + [msg]
errors.append(exc)
if errors:
raise IterableValidationError(
f"While structuring {tup!r}", errors, tup
)
return tuple(res)
else:
return tuple(conv(e, tup_type) for e in obj)
else:
# We're dealing with a heterogenous tuple.
exp_len = len(tup_params)
try:
len_obj = len(obj)
except TypeError:
pass # most likely an unsized iterator, eg generator
else:
if len_obj > exp_len:
exp_len = len_obj
if self.detailed_validation:
errors = []
res = []
for ix, (t, e) in enumerate(zip(tup_params, obj)):
try:
conv = self._structure_func.dispatch(t)
res.append(conv(e, t))
except Exception as exc:
msg = f"Structuring {tup} @ index {ix}"
exc.__notes__ = getattr(e, "__notes__", []) + [msg]
errors.append(exc)
if len(res) < exp_len:
problem = "Not enough" if len(res) < len(tup_params) else "Too many"
exc = ValueError(
f"{problem} values in {obj!r} to structure as {tup!r}"
)
msg = f"Structuring {tup}"
exc.__notes__ = getattr(e, "__notes__", []) + [msg]
errors.append(exc)
if errors:
raise IterableValidationError(
f"While structuring {tup!r}", errors, tup
)
return tuple(res)
else:
res = tuple(
[
self._structure_func.dispatch(t)(e, t)
for t, e in zip(tup_params, obj)
]
)
if len(res) < exp_len:
problem = "Not enough" if len(res) < len(tup_params) else "Too many"
raise ValueError(
f"{problem} values in {obj!r} to structure as {tup!r}"
)
return res
@staticmethod
def _get_dis_func(union: Any) -> Callable[[Any], Type]:
"""Fetch or try creating a disambiguation function for a union."""
union_types = union.__args__
if NoneType in union_types: # type: ignore
# We support unions of attrs classes and NoneType higher in the
# logic.
union_types = tuple(
e for e in union_types if e is not NoneType # type: ignore
)
if not all(has(get_origin(e) or e) for e in union_types):
raise StructureHandlerNotFoundError(
"Only unions of attrs classes supported "
"currently. Register a loads hook manually.",
type_=union,
)
return create_uniq_field_dis_func(*union_types)
def __deepcopy__(self, _) -> "BaseConverter":
return self.copy()
def copy(
self,
dict_factory: Optional[Callable[[], Any]] = None,
unstruct_strat: Optional[UnstructureStrategy] = None,
prefer_attrib_converters: Optional[bool] = None,
detailed_validation: Optional[bool] = None,
) -> "BaseConverter":
res = self.__class__(
dict_factory if dict_factory is not None else self._dict_factory,
unstruct_strat
if unstruct_strat is not None
else (
UnstructureStrategy.AS_DICT
if self._unstructure_attrs == self.unstructure_attrs_asdict
else UnstructureStrategy.AS_TUPLE
),
prefer_attrib_converters
if prefer_attrib_converters is not None
else self._prefer_attrib_converters,
detailed_validation
if detailed_validation is not None
else self.detailed_validation,
)
self._unstructure_func.copy_to(res._unstructure_func)
self._structure_func.copy_to(res._structure_func)
return res
class Converter(BaseConverter):
"""A converter which generates specialized un/structuring functions."""
__slots__ = (
"omit_if_default",
"forbid_extra_keys",
"type_overrides",
"_unstruct_collection_overrides",
"_struct_copy_skip",
"_unstruct_copy_skip",
)
def __init__(
self,
dict_factory: Callable[[], Any] = dict,
unstruct_strat: UnstructureStrategy = UnstructureStrategy.AS_DICT,
omit_if_default: bool = False,
forbid_extra_keys: bool = False,
type_overrides: Mapping[Type, AttributeOverride] = {},
unstruct_collection_overrides: Mapping[Type, Callable] = {},
prefer_attrib_converters: bool = False,
detailed_validation: bool = True,
):
super().__init__(
dict_factory=dict_factory,
unstruct_strat=unstruct_strat,
prefer_attrib_converters=prefer_attrib_converters,
detailed_validation=detailed_validation,
)
self.omit_if_default = omit_if_default
self.forbid_extra_keys = forbid_extra_keys
self.type_overrides = dict(type_overrides)
unstruct_collection_overrides = {
get_origin(k) or k: v for k, v in unstruct_collection_overrides.items()
}
self._unstruct_collection_overrides = unstruct_collection_overrides
# Do a little post-processing magic to make things easier for users.
co = unstruct_collection_overrides
# abc.Set overrides, if defined, apply to abc.MutableSets and sets
if OriginAbstractSet in co:
if OriginMutableSet not in co:
co[OriginMutableSet] = co[OriginAbstractSet]
co[AbcMutableSet] = co[OriginAbstractSet] # For 3.7/3.8 compatibility.
if FrozenSetSubscriptable not in co:
co[FrozenSetSubscriptable] = co[OriginAbstractSet]
# abc.MutableSet overrrides, if defined, apply to sets
if OriginMutableSet in co:
if set not in co:
co[set] = co[OriginMutableSet]
if FrozenSetSubscriptable in co:
co[frozenset] = co[FrozenSetSubscriptable] # For 3.7/3.8 compatibility.
# abc.Sequence overrides, if defined, can apply to MutableSequences, lists and tuples
if Sequence in co:
if MutableSequence not in co:
co[MutableSequence] = co[Sequence]
if tuple not in co:
co[tuple] = co[Sequence]
# abc.MutableSequence overrides, if defined, can apply to lists
if MutableSequence in co:
if list not in co:
co[list] = co[MutableSequence]
# abc.Mapping overrides, if defined, can apply to MutableMappings
if Mapping in co:
if MutableMapping not in co:
co[MutableMapping] = co[Mapping]
# abc.MutableMapping overrides, if defined, can apply to dicts
if MutableMapping in co:
if dict not in co:
co[dict] = co[MutableMapping]
# builtins.dict overrides, if defined, can apply to counters
if dict in co:
if Counter not in co:
co[Counter] = co[dict]
if unstruct_strat is UnstructureStrategy.AS_DICT:
# Override the attrs handler.
self.register_unstructure_hook_factory(
has_with_generic, self.gen_unstructure_attrs_fromdict
)
self.register_structure_hook_factory(
has_with_generic, self.gen_structure_attrs_fromdict
)
self.register_unstructure_hook_factory(
is_annotated, self.gen_unstructure_annotated
)
self.register_unstructure_hook_factory(
is_hetero_tuple, self.gen_unstructure_hetero_tuple
)
self.register_unstructure_hook_factory(
is_sequence, self.gen_unstructure_iterable
)
self.register_unstructure_hook_factory(is_mapping, self.gen_unstructure_mapping)
self.register_unstructure_hook_factory(
is_mutable_set,
lambda cl: self.gen_unstructure_iterable(cl, unstructure_to=set),
)
self.register_unstructure_hook_factory(
is_frozenset,
lambda cl: self.gen_unstructure_iterable(cl, unstructure_to=frozenset),
)
self.register_unstructure_hook_factory(
lambda t: get_newtype_base(t) is not None,
lambda t: self._unstructure_func.dispatch(get_newtype_base(t)),
)
self.register_structure_hook_factory(is_annotated, self.gen_structure_annotated)
self.register_structure_hook_factory(is_mapping, self.gen_structure_mapping)
self.register_structure_hook_factory(is_counter, self.gen_structure_counter)
self.register_structure_hook_factory(
lambda t: get_newtype_base(t) is not None, self.get_structure_newtype
)
# We keep these so we can more correctly copy the hooks.
self._struct_copy_skip = self._structure_func.get_num_fns()
self._unstruct_copy_skip = self._unstructure_func.get_num_fns()
def get_structure_newtype(self, type: Type[T]) -> Callable[[Any, Any], T]:
base = get_newtype_base(type)
handler = self._structure_func.dispatch(base)
return lambda v, _: handler(v, base)
def gen_unstructure_annotated(self, type):
origin = type.__origin__
h = self._unstructure_func.dispatch(origin)
return h
def gen_structure_annotated(self, type):
origin = type.__origin__
h = self._structure_func.dispatch(origin)
return h
def gen_unstructure_attrs_fromdict(
self, cl: Type[T]
) -> Callable[[T], Dict[str, Any]]:
origin = get_origin(cl)
attribs = fields(origin or cl)
if attrs_has(cl) and any(isinstance(a.type, str) for a in attribs):
# PEP 563 annotations - need to be resolved.
resolve_types(cl)
attrib_overrides = {
a.name: self.type_overrides[a.type]
for a in attribs
if a.type in self.type_overrides
}
h = make_dict_unstructure_fn(
cl, self, _cattrs_omit_if_default=self.omit_if_default, **attrib_overrides
)
return h
def gen_structure_attrs_fromdict(
self, cl: Type[T]
) -> Callable[[Mapping[str, Any], Any], T]:
attribs = fields(get_origin(cl) if is_generic(cl) else cl)
if attrs_has(cl) and any(isinstance(a.type, str) for a in attribs):
# PEP 563 annotations - need to be resolved.
resolve_types(cl)
attrib_overrides = {
a.name: self.type_overrides[a.type]
for a in attribs
if a.type in self.type_overrides
}
h = make_dict_structure_fn(
cl,
self,
_cattrs_forbid_extra_keys=self.forbid_extra_keys,
_cattrs_prefer_attrib_converters=self._prefer_attrib_converters,
_cattrs_detailed_validation=self.detailed_validation,
**attrib_overrides,
)
# only direct dispatch so that subclasses get separately generated
return h
def gen_unstructure_iterable(
self, cl: Any, unstructure_to: Any = None
) -> IterableUnstructureFn:
unstructure_to = self._unstruct_collection_overrides.get(
get_origin(cl) or cl, unstructure_to or list
)
h = make_iterable_unstructure_fn(cl, self, unstructure_to=unstructure_to)
self._unstructure_func.register_cls_list([(cl, h)], direct=True)
return h
def gen_unstructure_hetero_tuple(
self, cl: Any, unstructure_to: Any = None
) -> HeteroTupleUnstructureFn:
unstructure_to = self._unstruct_collection_overrides.get(
get_origin(cl) or cl, unstructure_to or list
)
h = make_hetero_tuple_unstructure_fn(cl, self, unstructure_to=unstructure_to)
self._unstructure_func.register_cls_list([(cl, h)], direct=True)
return h
def gen_unstructure_mapping(
self,
cl: Any,
unstructure_to: Any = None,
key_handler: Optional[Callable[[Any, Optional[Any]], Any]] = None,
) -> MappingUnstructureFn:
unstructure_to = self._unstruct_collection_overrides.get(
get_origin(cl) or cl, unstructure_to or dict
)
h = make_mapping_unstructure_fn(
cl, self, unstructure_to=unstructure_to, key_handler=key_handler
)
self._unstructure_func.register_cls_list([(cl, h)], direct=True)
return h
def gen_structure_counter(self, cl: Any) -> MappingStructureFn[T]:
h = make_mapping_structure_fn(
cl,
self,
structure_to=Counter,
val_type=int,
detailed_validation=self.detailed_validation,
)
self._structure_func.register_cls_list([(cl, h)], direct=True)
return h
def gen_structure_mapping(self, cl: Any) -> MappingStructureFn[T]:
h = make_mapping_structure_fn(
cl, self, detailed_validation=self.detailed_validation
)
self._structure_func.register_cls_list([(cl, h)], direct=True)
return h
def copy(
self,
dict_factory: Optional[Callable[[], Any]] = None,
unstruct_strat: Optional[UnstructureStrategy] = None,
omit_if_default: Optional[bool] = None,
forbid_extra_keys: Optional[bool] = None,
type_overrides: Optional[Mapping[Type, AttributeOverride]] = None,
unstruct_collection_overrides: Optional[Mapping[Type, Callable]] = None,
prefer_attrib_converters: Optional[bool] = None,
detailed_validation: Optional[bool] = None,
) -> "Converter":
res = self.__class__(
dict_factory if dict_factory is not None else self._dict_factory,
unstruct_strat
if unstruct_strat is not None
else (
UnstructureStrategy.AS_DICT
if self._unstructure_attrs == self.unstructure_attrs_asdict
else UnstructureStrategy.AS_TUPLE
),
omit_if_default if omit_if_default is not None else self.omit_if_default,
forbid_extra_keys
if forbid_extra_keys is not None
else self.forbid_extra_keys,
type_overrides if type_overrides is not None else self.type_overrides,
unstruct_collection_overrides
if unstruct_collection_overrides is not None
else self._unstruct_collection_overrides,
prefer_attrib_converters
if prefer_attrib_converters is not None
else self._prefer_attrib_converters,
detailed_validation
if detailed_validation is not None
else self.detailed_validation,
)
self._unstructure_func.copy_to(
res._unstructure_func, skip=self._unstruct_copy_skip
)
self._structure_func.copy_to(res._structure_func, skip=self._struct_copy_skip)
return res
GenConverter = Converter
| mit | 4986ea1628dec184ee54a49989cd4d6b | 36.90121 | 98 | 0.572557 | 4.103241 | false | false | false | false |
gae-init/gae-init-babel | main/control/user.py | 1 | 12104 | # coding: utf-8
import copy
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
from google.appengine.ext import ndb
from webargs import fields as wf
from webargs.flaskparser import parser
import flask
import flask_login
import flask_wtf
import wtforms
import auth
import cache
import config
import i18n
import model
import task
import util
from main import app
###############################################################################
# User List
###############################################################################
@app.route('/admin/user/')
@auth.admin_required
def user_list():
args = parser.parse({
'email': wf.Str(missing=None),
'permissions': wf.DelimitedList(wf.Str(), delimiter=',', missing=[]),
})
user_dbs, cursors = model.User.get_dbs(
email=args['email'], prev_cursor=True,
)
permissions = list(UserUpdateForm._permission_choices)
permissions += args['permissions']
return flask.render_template(
'user/user_list.html',
html_class='user-list',
title=_('User List'),
user_dbs=user_dbs,
next_url=util.generate_next_url(cursors['next']),
prev_url=util.generate_next_url(cursors['prev']),
api_url=flask.url_for('api.admin.user.list'),
permissions=sorted(set(permissions)),
)
###############################################################################
# User Update
###############################################################################
class UserUpdateForm(i18n.Form):
username = wtforms.StringField(
model.User.username._verbose_name,
[wtforms.validators.required(), wtforms.validators.length(min=2)],
filters=[util.email_filter],
)
name = wtforms.StringField(
model.User.name._verbose_name,
[wtforms.validators.required()], filters=[util.strip_filter],
)
email = wtforms.StringField(
model.User.email._verbose_name,
[wtforms.validators.optional(), wtforms.validators.email()],
filters=[util.email_filter],
)
locale = wtforms.SelectField(
model.User.locale._verbose_name,
choices=config.LOCALE_SORTED, filters=[util.strip_filter],
)
admin = wtforms.BooleanField(model.User.admin._verbose_name)
active = wtforms.BooleanField(model.User.active._verbose_name)
verified = wtforms.BooleanField(model.User.verified._verbose_name)
permissions = wtforms.SelectMultipleField(
model.User.permissions._verbose_name,
filters=[util.sort_filter],
)
_permission_choices = set()
def __init__(self, *args, **kwds):
super(UserUpdateForm, self).__init__(*args, **kwds)
self.permissions.choices = [
(p, p) for p in sorted(UserUpdateForm._permission_choices)
]
@auth.permission_registered.connect
def _permission_registered_callback(sender, permission):
UserUpdateForm._permission_choices.add(permission)
@app.route('/admin/user/create/', methods=['GET', 'POST'])
@app.route('/admin/user/<int:user_id>/update/', methods=['GET', 'POST'])
@auth.admin_required
def user_update(user_id=0):
if user_id:
user_db = model.User.get_by_id(user_id)
else:
user_db = model.User(name='', username='')
if not user_db:
flask.abort(404)
form = UserUpdateForm(obj=user_db)
for permission in user_db.permissions:
form.permissions.choices.append((permission, permission))
form.permissions.choices = sorted(set(form.permissions.choices))
if form.validate_on_submit():
if not util.is_valid_username(form.username.data):
form.username.errors.append(_('This username is invalid.'))
elif not model.User.is_username_available(form.username.data, user_db.key):
form.username.errors.append(_('This username is already taken.'))
else:
form.populate_obj(user_db)
if auth.current_user_key() == user_db.key:
user_db.admin = True
user_db.active = True
user_db.put()
return flask.redirect(flask.url_for(
'user_list', order='-modified', active=user_db.active,
))
return flask.render_template(
'user/user_update.html',
title=user_db.name or _('New User'),
html_class='user-update',
form=form,
user_db=user_db,
api_url=flask.url_for('api.admin.user', user_key=user_db.key.urlsafe()) if user_db.key else ''
)
###############################################################################
# User Verify
###############################################################################
@app.route('/user/verify/<token>/')
@auth.login_required
def user_verify(token):
user_db = auth.current_user_db()
if user_db.token != token:
flask.flash(__('That link is either invalid or expired.'), category='danger')
return flask.redirect(flask.url_for('profile'))
user_db.verified = True
user_db.token = util.uuid()
user_db.put()
flask.flash(__('Hooray! Your email is now verified.'), category='success')
return flask.redirect(flask.url_for('profile'))
###############################################################################
# User Forgot
###############################################################################
class UserForgotForm(i18n.Form):
email = wtforms.StringField(
'Email',
[wtforms.validators.required(), wtforms.validators.email()],
filters=[util.email_filter],
)
recaptcha = flask_wtf.RecaptchaField()
@app.route('/user/forgot/', methods=['GET', 'POST'])
def user_forgot(token=None):
if not config.CONFIG_DB.has_email_authentication:
flask.abort(418)
form = auth.form_with_recaptcha(UserForgotForm(obj=auth.current_user_db()))
if form.validate_on_submit():
cache.bump_auth_attempt()
email = form.email.data
user_dbs, cursors = util.get_dbs(
model.User.query(), email=email, active=True, limit=2,
)
count = len(user_dbs)
if count == 1:
task.reset_password_notification(user_dbs[0])
return flask.redirect(flask.url_for('welcome'))
elif count == 0:
form.email.errors.append('This email was not found')
elif count == 2:
task.email_conflict_notification(email)
form.email.errors.append(
'''We are sorry but it looks like there is a conflict with your
account. Our support team is already informed and we will get back to
you as soon as possible.'''
)
if form.errors:
cache.bump_auth_attempt()
return flask.render_template(
'user/user_forgot.html',
title=_('Forgot Password?'),
html_class='user-forgot',
form=form,
)
###############################################################################
# User Reset
###############################################################################
class UserResetForm(i18n.Form):
new_password = wtforms.StringField(
_('New Password'),
[wtforms.validators.required(), wtforms.validators.length(min=6)],
)
@app.route('/user/reset/<token>/', methods=['GET', 'POST'])
@app.route('/user/reset/')
def user_reset(token=None):
user_db = model.User.get_by('token', token)
if not user_db:
flask.flash(__('That link is either invalid or expired.'), category='danger')
return flask.redirect(flask.url_for('welcome'))
if auth.is_logged_in():
flask_login.logout_user()
return flask.redirect(flask.request.path)
form = UserResetForm()
if form.validate_on_submit():
user_db.password_hash = util.password_hash(user_db, form.new_password.data)
user_db.token = util.uuid()
user_db.verified = True
user_db.put()
flask.flash(__('Your password was changed successfully.'), category='success')
return auth.signin_user_db(user_db)
return flask.render_template(
'user/user_reset.html',
title='Reset Password',
html_class='user-reset',
form=form,
user_db=user_db,
)
###############################################################################
# User Activate
###############################################################################
class UserActivateForm(i18n.Form):
name = wtforms.StringField(
model.User.name._verbose_name,
[wtforms.validators.required()], filters=[util.strip_filter],
)
password = wtforms.StringField(
_('Password'),
[wtforms.validators.required(), wtforms.validators.length(min=6)],
)
@app.route('/user/activate/<token>/', methods=['GET', 'POST'])
def user_activate(token):
if auth.is_logged_in():
flask_login.logout_user()
return flask.redirect(flask.request.path)
user_db = model.User.get_by('token', token)
if not user_db:
flask.flash(__('That link is either invalid or expired.'), category='danger')
return flask.redirect(flask.url_for('welcome'))
form = UserActivateForm(obj=user_db)
if form.validate_on_submit():
form.populate_obj(user_db)
user_db.password_hash = util.password_hash(user_db, form.password.data)
user_db.token = util.uuid()
user_db.verified = True
user_db.put()
return auth.signin_user_db(user_db)
return flask.render_template(
'user/user_activate.html',
title='Activate Account',
html_class='user-activate',
user_db=user_db,
form=form,
)
###############################################################################
# User Merge
###############################################################################
class UserMergeForm(i18n.Form):
user_key = wtforms.HiddenField('User Key', [wtforms.validators.required()])
user_keys = wtforms.HiddenField('User Keys', [wtforms.validators.required()])
username = wtforms.StringField(_('Username'), [wtforms.validators.optional()])
name = wtforms.StringField(
_('Name (merged)'),
[wtforms.validators.required()], filters=[util.strip_filter],
)
email = wtforms.StringField(
_('Email (merged)'),
[wtforms.validators.optional(), wtforms.validators.email()],
filters=[util.email_filter],
)
@app.route('/admin/user/merge/', methods=['GET', 'POST'])
@auth.admin_required
def user_merge():
args = parser.parse({
'user_key': wf.Str(missing=None),
'user_keys': wf.DelimitedList(wf.Str(), delimiter=',', required=True),
})
user_db_keys = [ndb.Key(urlsafe=k) for k in args['user_keys']]
user_dbs = ndb.get_multi(user_db_keys)
if len(user_dbs) < 2:
flask.abort(400)
user_dbs.sort(key=lambda user_db: user_db.created)
merged_user_db = user_dbs[0]
auth_ids = []
permissions = []
is_admin = False
is_active = False
for user_db in user_dbs:
auth_ids.extend(user_db.auth_ids)
permissions.extend(user_db.permissions)
is_admin = is_admin or user_db.admin
is_active = is_active or user_db.active
if user_db.key.urlsafe() == args['user_key']:
merged_user_db = user_db
auth_ids = sorted(list(set(auth_ids)))
permissions = sorted(list(set(permissions)))
merged_user_db.permissions = permissions
merged_user_db.admin = is_admin
merged_user_db.active = is_active
merged_user_db.verified = False
form_obj = copy.deepcopy(merged_user_db)
form_obj.user_key = merged_user_db.key.urlsafe()
form_obj.user_keys = ','.join(args['user_keys'])
form = UserMergeForm(obj=form_obj)
if form.validate_on_submit():
form.populate_obj(merged_user_db)
merged_user_db.auth_ids = auth_ids
merged_user_db.put()
deprecated_keys = [k for k in user_db_keys if k != merged_user_db.key]
merge_user_dbs(merged_user_db, deprecated_keys)
return flask.redirect(
flask.url_for('user_update', user_id=merged_user_db.key.id()),
)
return flask.render_template(
'user/user_merge.html',
title=_('Merge Users'),
html_class='user-merge',
user_dbs=user_dbs,
merged_user_db=merged_user_db,
form=form,
auth_ids=auth_ids,
api_url=flask.url_for('api.admin.user.list'),
)
@ndb.transactional(xg=True)
def merge_user_dbs(user_db, deprecated_keys):
# TODO: Merge possible user data before handling deprecated users
deprecated_dbs = ndb.get_multi(deprecated_keys)
for deprecated_db in deprecated_dbs:
deprecated_db.auth_ids = []
deprecated_db.active = False
deprecated_db.verified = False
if not deprecated_db.username.startswith('_'):
deprecated_db.username = '_%s' % deprecated_db.username
ndb.put_multi(deprecated_dbs)
| mit | bd0124a202711c07307e9355a7000d4e | 31.277333 | 98 | 0.61649 | 3.62721 | false | false | false | false |
auth0/auth0-python | auth0/v3/management/logs.py | 1 | 3770 | from ..rest import RestClient
class Logs(object):
"""Auth0 logs endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
rest_options (RestClientOptions): Pass an instance of
RestClientOptions to configure additional RestClient
options, such as rate-limit retries.
(defaults to None)
"""
def __init__(
self,
domain,
token,
telemetry=True,
timeout=5.0,
protocol="https",
rest_options=None,
):
self.domain = domain
self.protocol = protocol
self.client = RestClient(
jwt=token, telemetry=telemetry, timeout=timeout, options=rest_options
)
def _url(self, id=None):
url = "{}://{}/api/v2/logs".format(self.protocol, self.domain)
if id is not None:
return "{}/{}".format(url, id)
return url
def search(
self,
page=0,
per_page=50,
sort=None,
q=None,
include_totals=True,
fields=None,
from_param=None,
take=None,
include_fields=True,
):
"""Search log events.
Args:
page (int, optional): The result's page number (zero based). By default,
retrieves the first page of results.
per_page (int, optional): The amount of entries per page. By default,
retrieves 50 results per page.
sort (str, optional): The field to use for sorting.
1 == ascending and -1 == descending. (e.g: date:1)
When not set, the default value is up to the server.
q (str, optional): Query in Lucene query string syntax.
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Leave empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise. Defaults to True.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise. Defaults to True.
from_param (str, optional): Log Event Id to start retrieving logs. You can
limit the amount of logs using the take parameter.
take (int, optional): The total amount of entries to retrieve when
using the from parameter. When not set, the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Logs/get_logs
"""
params = {
"per_page": per_page,
"page": page,
"include_totals": str(include_totals).lower(),
"sort": sort,
"fields": fields and ",".join(fields) or None,
"include_fields": str(include_fields).lower(),
"q": q,
"from": from_param,
"take": take,
}
return self.client.get(self._url(), params=params)
def get(self, id):
"""Retrieves the data related to the log entry identified by id.
Args:
id (str): The log_id of the log to retrieve.
See: https://auth0.com/docs/api/management/v2#!/Logs/get_logs_by_id
"""
return self.client.get(self._url(id))
| mit | d3f7ab93908b9e33c70f58da40555a4e | 32.070175 | 94 | 0.566313 | 4.363426 | false | false | false | false |
auth0/auth0-python | auth0/v3/test/management/test_log_streams.py | 1 | 2783 | import unittest
import mock
from ...management.log_streams import LogStreams
class TestLogStreams(unittest.TestCase):
def test_init_with_optionals(self):
t = LogStreams(
domain="domain", token="jwttoken", telemetry=False, timeout=(10, 2)
)
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get("Auth0-Client", None)
self.assertEqual(telemetry_header, None)
@mock.patch("auth0.v3.management.log_streams.RestClient")
def test_list(self, mock_rc):
mock_instance = mock_rc.return_value
c = LogStreams(domain="domain", token="jwttoken")
c.list()
args, kwargs = mock_instance.get.call_args
self.assertEqual("https://domain/api/v2/log-streams", args[0])
@mock.patch("auth0.v3.management.log_streams.RestClient")
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = LogStreams(domain="domain", token="jwttoken")
c.get("an-id")
args, kwargs = mock_instance.get.call_args
self.assertEqual("https://domain/api/v2/log-streams/an-id", args[0])
@mock.patch("auth0.v3.management.log_streams.RestClient")
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = LogStreams(domain="domain", token="jwttoken")
# Sample data belongs to an `http` stream
log_stream_data = {
"name": "string",
"type": "http",
"sink": {
"httpEndpoint": "string",
"httpContentType": "string",
"httpContentFormat": "JSONLINES|JSONARRAY",
"httpAuthorization": "string",
},
}
c.create(log_stream_data)
args, kwargs = mock_instance.post.call_args
self.assertEqual("https://domain/api/v2/log-streams", args[0])
self.assertEqual(kwargs["data"], log_stream_data)
@mock.patch("auth0.v3.management.log_streams.RestClient")
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
c = LogStreams(domain="domain", token="jwttoken")
c.delete("an-id")
mock_instance.delete.assert_called_with(
"https://domain/api/v2/log-streams/an-id"
)
@mock.patch("auth0.v3.management.log_streams.RestClient")
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
log_stream_update = {"name": "string"}
c = LogStreams(domain="domain", token="jwttoken")
c.update("an-id", log_stream_update)
args, kwargs = mock_instance.patch.call_args
self.assertEqual("https://domain/api/v2/log-streams/an-id", args[0])
self.assertEqual(kwargs["data"], log_stream_update)
| mit | 01ad3f44b179ace54ea38760563cfb08 | 31.741176 | 79 | 0.61193 | 3.567949 | false | true | false | false |
auth0/auth0-python | auth0/v3/authentication/database.py | 1 | 4138 | import warnings
from .base import AuthenticationBase
class Database(AuthenticationBase):
"""Database & Active Directory / LDAP Authentication.
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def login(
self,
client_id,
username,
password,
connection,
id_token=None,
grant_type="password",
device=None,
scope="openid",
):
"""Login using username and password
Given the user credentials and the connection specified, it will do
the authentication on the provider and return a dict with the
access_token and id_token. This endpoint only works for database
connections, passwordless connections, Active Directory/LDAP,
Windows Azure AD and ADFS.
"""
warnings.warn(
"/oauth/ro will be deprecated in future releases", DeprecationWarning
)
body = {
"client_id": client_id,
"username": username,
"password": password,
"connection": connection,
"grant_type": grant_type,
"scope": scope,
}
if id_token:
body.update({"id_token": id_token})
if device:
body.update({"device": device})
return self.post(
"{}://{}/oauth/ro".format(self.protocol, self.domain), data=body
)
def signup(
self,
client_id,
email,
password,
connection,
username=None,
user_metadata=None,
given_name=None,
family_name=None,
name=None,
nickname=None,
picture=None,
):
"""Signup using email and password.
Args:
client_id (str): ID of the application to use.
email (str): The user's email address.
password (str): The user's desired password.
connection (str): The name of the database connection where this user should be created.
username (str, optional): The user's username, if required by the database connection.
user_metadata (dict, optional): Additional key-value information to store for the user.
Some limitations apply, see: https://auth0.com/docs/metadata#metadata-restrictions
given_name (str, optional): The user's given name(s).
family_name (str, optional): The user's family name(s).
name (str, optional): The user's full name.
nickname (str, optional): The user's nickname.
picture (str, optional): A URI pointing to the user's picture.
See: https://auth0.com/docs/api/authentication#signup
"""
body = {
"client_id": client_id,
"email": email,
"password": password,
"connection": connection,
}
if username:
body.update({"username": username})
if user_metadata:
body.update({"user_metadata": user_metadata})
if given_name:
body.update({"given_name": given_name})
if family_name:
body.update({"family_name": family_name})
if name:
body.update({"name": name})
if nickname:
body.update({"nickname": nickname})
if picture:
body.update({"picture": picture})
return self.post(
"{}://{}/dbconnections/signup".format(self.protocol, self.domain), data=body
)
def change_password(self, client_id, email, connection, password=None):
"""Asks to change a password for a given user.
client_id (str): ID of the application to use.
email (str): The user's email address.
connection (str): The name of the database connection where this user should be created.
"""
body = {
"client_id": client_id,
"email": email,
"connection": connection,
}
return self.post(
"{}://{}/dbconnections/change_password".format(self.protocol, self.domain),
data=body,
)
| mit | 80652d422541d2af128140f5265b843d | 28.985507 | 102 | 0.561141 | 4.572376 | false | false | false | false |
auth0/auth0-python | auth0/v3/management/actions.py | 1 | 7936 | from ..rest import RestClient
class Actions(object):
"""Auth0 Actions endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
rest_options (RestClientOptions): Pass an instance of
RestClientOptions to configure additional RestClient
options, such as rate-limit retries.
(defaults to None)
"""
def __init__(
self,
domain,
token,
telemetry=True,
timeout=5.0,
protocol="https",
rest_options=None,
):
self.domain = domain
self.protocol = protocol
self.client = RestClient(
jwt=token, telemetry=telemetry, timeout=timeout, options=rest_options
)
def _url(self, *args):
url = "{}://{}/api/v2/actions".format(self.protocol, self.domain)
for p in args:
if p is not None:
url = "{}/{}".format(url, p)
return url
def get_actions(
self,
trigger_id=None,
action_name=None,
deployed=None,
installed=False,
page=None,
per_page=None,
):
"""Get all actions.
Args:
trigger_id (str, optional): Filter the results to only actions associated
with this trigger ID.
action_name (str, optional): Filter the results to only actions with this name.
deployed (bool, optional): True to filter the results to only deployed actions.
Defaults to False.
installed (bool, optional): True to filter the results to only installed actions.
Defaults to False.
page (int, optional): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Actions/get_actions
"""
if deployed is not None:
deployed = str(deployed).lower()
params = {
"triggerId": trigger_id,
"actionName": action_name,
"deployed": deployed,
"installed": str(installed).lower(),
"page": page,
"per_page": per_page,
}
return self.client.get(self._url("actions"), params=params)
def create_action(self, body):
"""Create a new action.
Args:
body (dict): Attributes for the new action.
See: https://auth0.com/docs/api/management/v2#!/Actions/post_action
"""
return self.client.post(self._url("actions"), data=body)
def update_action(self, id, body):
"""Updates an action.
Args:
id (str): the ID of the action.
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Actions/patch_action
"""
return self.client.patch(self._url("actions", id), data=body)
def get_action(self, id):
"""Retrieves an action by its ID.
Args:
id (str): Id of action to retrieve.
See: https://auth0.com/docs/api/management/v2#!/Actions/get_action
"""
params = {}
return self.client.get(self._url("actions", id), params=params)
def delete_action(self, id, force=False):
"""Deletes an action and all of its associated versions.
Args:
id (str): ID of the action to delete.
force (bool, optional): True to force action deletion detaching bindings,
False otherwise. Defaults to False.
See: https://auth0.com/docs/api/management/v2#!/Actions/delete_action
"""
params = {"force": str(force).lower()}
return self.client.delete(self._url("actions", id), params=params)
def get_triggers(self):
"""Retrieve the set of triggers currently available within actions.
See: https://auth0.com/docs/api/management/v2#!/Actions/get_triggers
"""
params = {}
return self.client.get(self._url("triggers"), params=params)
def get_execution(self, id):
"""Get information about a specific execution of a trigger.
Args:
id (str): The ID of the execution to retrieve.
See: https://auth0.com/docs/api/management/v2#!/Actions/get_execution
"""
params = {}
return self.client.get(self._url("executions", id), params=params)
def get_action_versions(self, id, page=None, per_page=None):
"""Get all of an action's versions.
Args:
id (str): The ID of the action.
page (int, optional): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Actions/get_action_versions
"""
params = {"page": page, "per_page": per_page}
return self.client.get(self._url("actions", id, "versions"), params=params)
def get_trigger_bindings(self, id, page=None, per_page=None):
"""Get the actions that are bound to a trigger.
Args:
id (str): The trigger ID.
page (int, optional): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
See: https://auth0.com/docs/api/management/v2#!/Actions/get_bindings
"""
params = {"page": page, "per_page": per_page}
return self.client.get(self._url("triggers", id, "bindings"), params=params)
def get_action_version(self, action_id, version_id):
"""Retrieve a specific version of an action.
Args:
action_id (str): The ID of the action.
version_id (str): The ID of the version to retrieve.
See: https://auth0.com/docs/api/management/v2#!/Actions/get_action_version
"""
params = {}
return self.client.get(
self._url("actions", action_id, "versions", version_id), params=params
)
def deploy_action(self, id):
"""Deploy an action.
Args:
id (str): The ID of the action to deploy.
See: https://auth0.com/docs/api/management/v2#!/Actions/post_deploy_action
"""
return self.client.post(self._url("actions", id, "deploy"))
def rollback_action_version(self, action_id, version_id):
"""Roll back to a previous version of an action.
Args:
action_id (str): The ID of the action.
version_id (str): The ID of the version.
See: https://auth0.com/docs/api/management/v2#!/Actions/post_deploy_draft_version
"""
return self.client.post(
self._url("actions", action_id, "versions", version_id, "deploy"), data={}
)
def update_trigger_bindings(self, id, body):
"""Update a trigger's bindings.
Args:
id (str): The ID of the trigger to update.
body (dict): Attributes for the updated trigger binding.
See: https://auth0.com/docs/api/management/v2#!/Actions/patch_bindings
"""
return self.client.patch(self._url("triggers", id, "bindings"), data=body)
| mit | ac3466af28611ed33e4b7175d267db89 | 30.61753 | 92 | 0.580015 | 4.144125 | false | false | false | false |
jrief/djangocms-cascade | cmsplugin_cascade/generic/text_image.py | 1 | 4716 | from django.forms import widgets, ChoiceField, MultipleChoiceField
from django.utils.html import format_html_join
from django.utils.translation import gettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import SizeField
from cmsplugin_cascade.image import ImageFormMixin, ImagePropertyMixin
from cmsplugin_cascade.link.config import LinkPluginBase, LinkFormMixin
from cmsplugin_cascade.link.plugin_base import LinkElementMixin
from cmsplugin_cascade.utils import compute_aspect_ratio
class TextImageFormMixin(ImageFormMixin):
RESIZE_OPTIONS = [
('upscale', _("Upscale image")),
('crop', _("Crop image")),
('subject_location', _("With subject location")),
('high_resolution', _("Optimized for Retina")),
]
image_width = SizeField(
label=_("Image Width"),
allowed_units=['px'],
required=True,
help_text=_("Set the image width in pixels."),
)
image_height = SizeField(
label=_("Image Height"),
allowed_units=['px'],
required=False,
help_text=_("Set the image height in pixels."),
)
resize_options = MultipleChoiceField(
label=_("Resize Options"),
choices = RESIZE_OPTIONS,
required=False,
widget=widgets.CheckboxSelectMultiple,
help_text=_("Options to use when resizing the image."),
initial=['subject_location', 'high_resolution']
)
alignement = ChoiceField(
label=_("Alignement"),
choices=[('', _("Not aligned")), ('left', _("Left")), ('right', _("Right"))],
required=False,
widget=widgets.RadioSelect,
initial='',
)
class Meta:
entangled_fields = {'glossary': ['image_width', 'image_height', 'resize_options', 'alignement']}
class TextImagePlugin(LinkPluginBase):
name = _("Image in text")
text_enabled = True
ring_plugin = 'TextImagePlugin'
render_template = 'cascade/plugins/textimage.html'
parent_classes = ['TextPlugin']
model_mixins = (ImagePropertyMixin, LinkElementMixin)
allow_children = False
require_parent = False
form = type('TextImageForm', (LinkFormMixin, TextImageFormMixin), {'require_link': False})
html_tag_attributes = LinkPluginBase.html_tag_attributes
html_tag_attributes.update({'image_title': 'title', 'alt_tag': 'alt'})
class Media:
js = ['admin/js/jquery.init.js', 'cascade/js/admin/textimageplugin.js']
@classmethod
def requires_parent_plugin(cls, slot, page):
"""
Workaround for `PluginPool.get_all_plugins()`, otherwise TextImagePlugin is not allowed
as a child of a `TextPlugin`.
"""
return False
@classmethod
def get_inline_styles(cls, instance):
inline_styles = cls.super(TextImagePlugin, cls).get_inline_styles(instance)
alignement = instance.glossary.get('alignement')
if alignement:
inline_styles['float'] = alignement
return inline_styles
def render(self, context, instance, placeholder):
context = self.super(TextImagePlugin, self).render(context, instance, placeholder)
try:
aspect_ratio = compute_aspect_ratio(instance.image)
except Exception:
# if accessing the image file fails, abort here
return context
resize_options = instance.glossary.get('resize_options', {})
crop = 'crop' in resize_options
upscale = 'upscale' in resize_options
subject_location = instance.image.subject_location if 'subject_location' in resize_options else False
high_resolution = 'high_resolution' in resize_options
image_width = instance.glossary.get('image_width', '')
if not image_width.endswith('px'):
return context
image_width = int(image_width.rstrip('px'))
image_height = instance.glossary.get('image_height', '')
if image_height.endswith('px'):
image_height = int(image_height.rstrip('px'))
else:
image_height = int(round(image_width * aspect_ratio))
context['src'] = {
'size': (image_width, image_height),
'size2x': (image_width * 2, image_height * 2),
'crop': crop,
'upscale': upscale,
'subject_location': subject_location,
'high_resolution': high_resolution,
}
link_attributes = LinkPluginBase.get_html_tag_attributes(instance)
context['link_html_tag_attributes'] = format_html_join(' ', '{0}="{1}"',
[(attr, val) for attr, val in link_attributes.items() if val]
)
return context
plugin_pool.register_plugin(TextImagePlugin)
| mit | 8d29b338e7868209c787d6be7b2245e6 | 37.341463 | 109 | 0.636344 | 4.051546 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/norway.py | 1 | 3800 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, datetime
from dateutil import rrule
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from dateutil.relativedelta import SU
from holidays.constants import MON, THU, FRI, SUN, JAN, MAY, DEC
from holidays.holiday_base import HolidayBase
class Norway(HolidayBase):
"""
Norwegian holidays.
Note that holidays falling on a sunday is "lost",
it will not be moved to another day to make up for the collision.
In Norway, ALL sundays are considered a holiday (https://snl.no/helligdag).
Initialize this class with include_sundays=False
to not include sundays as a holiday.
Primary sources:
https://lovdata.no/dokument/NL/lov/1947-04-26-1
https://no.wikipedia.org/wiki/Helligdager_i_Norge
https://www.timeanddate.no/merkedag/norge/
"""
country = "NO"
def __init__(self, include_sundays=False, **kwargs):
"""
:param include_sundays: Whether to consider sundays as a holiday
(which they are in Norway)
:param kwargs:
"""
self.include_sundays = include_sundays
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
super()._populate(year)
if self.include_sundays: # Optionally add all Sundays of the year.
year_first_day = datetime(year, JAN, 1)
year_last_day = datetime(year, DEC, 31)
# Get all Sundays including first/last day of the year cases.
sundays = rrule.rrule(
rrule.WEEKLY, byweekday=SU, dtstart=year_first_day
).between(year_first_day, year_last_day, inc=True)
for sunday in sundays:
self[sunday.date()] = "Søndag"
# ========= Static holidays =========
self[date(year, JAN, 1)] = "Første nyttårsdag"
# Source: https://lovdata.no/dokument/NL/lov/1947-04-26-1
if year >= 1947:
self[date(year, MAY, 1)] = "Arbeidernes dag"
self[date(year, MAY, 17)] = "Grunnlovsdag"
# According to https://no.wikipedia.org/wiki/F%C3%B8rste_juledag,
# these dates are only valid from year > 1700
# Wikipedia has no source for the statement, so leaving this be for now
self[date(year, DEC, 25)] = "Første juledag"
self[date(year, DEC, 26)] = "Andre juledag"
# ========= Moving holidays =========
# NOTE: These are probably subject to the same > 1700
# restriction as the above dates. The only source I could find for how
# long Easter has been celebrated in Norway was
# https://www.hf.uio.no/ikos/tjenester/kunnskap/samlinger/norsk-folkeminnesamling/livs-og-arshoytider/paske.html
# which says
# "(...) has been celebrated for over 1000 years (...)" (in Norway)
easter_day = easter(year)
self[easter_day + rd(days=-3)] = "Skjærtorsdag"
self[easter_day + rd(days=-2)] = "Langfredag"
self[easter_day] = "Første påskedag"
self[easter_day + rd(days=+1)] = "Andre påskedag"
self[easter_day + rd(days=+39)] = "Kristi himmelfartsdag"
self[easter_day + rd(days=+49)] = "Første pinsedag"
self[easter_day + rd(days=+50)] = "Andre pinsedag"
class NO(Norway):
pass
class NOR(Norway):
pass
| mit | 1e4d2f7f2edf44d13346a0fb4d951349 | 36.166667 | 120 | 0.63598 | 3.177703 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/madagascar.py | 1 | 2186 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from dateutil.relativedelta import SU
from holidays.holiday_base import HolidayBase
class Madagascar(HolidayBase):
"""
https://www.officeholidays.com/countries/madagascar
https://www.timeanddate.com/holidays/madagascar/
"""
country = "MG"
def _populate(self, year):
super()._populate(year)
# Observed since 1947
if year <= 1946:
return
self[date(year, 1, 1)] = "Taom-baovao"
self[date(year, 3, 8)] = "Fetin'ny vehivavy"
self[date(year, 3, 29)] = "Fetin'ny mahery fo"
self[date(year, 11, 1)] = "Fetin'ny olo-masina"
self[date(year, 12, 25)] = "Fetin'ny noely"
self[easter(year)] = "fetin'ny paska"
self[easter(year) + rd(days=1)] = "Alatsinain'ny paska"
self[easter(year) + rd(days=49)] = "Pentekosta"
self[easter(year) + rd(days=50)] = "Alatsinain'ny pentekosta"
self[date(year, 6, 1) + rd(day=1, weekday=SU(3))] = "Fetin'ny ray"
self[
easter(year) + rd(days=39)
] = "Fiakaran'ny Jesosy kristy tany an-danitra"
self[date(year, 8, 15)] = "Fiakaran'ny Masina Maria tany an-danitra"
if easter(year) + rd(days=49) == date(year, 5, 1) + rd(
day=31, weekday=SU(-1)
):
self[
date(year, 5, 1) + rd(day=31, weekday=SU(-1)) + rd(days=7)
] = "Fetin'ny Reny"
else:
self[
date(year, 5, 1) + rd(day=31, weekday=SU(-1))
] = "Fetin'ny Reny"
class MG(Madagascar):
pass
class MDG(Madagascar):
pass
| mit | 46b81ddce3019e9450f98356bbfd7a1f | 31.147059 | 78 | 0.598811 | 2.883905 | false | false | false | false |
jrief/djangocms-cascade | cmsplugin_cascade/migrations/0002_auto_20150530_1018.py | 1 | 1288 | from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Segmentation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
'verbose_name': 'Segmentation',
'managed': False,
'verbose_name_plural': 'Segmentation',
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='cascadeelement',
name='glossary',
field=models.JSONField(default={}, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='pluginextrafields',
name='plugin_type',
field=models.CharField(max_length=50, verbose_name='Plugin Name', db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='sharablecascadeelement',
name='glossary',
field=models.JSONField(default={}, blank=True),
preserve_default=True,
),
]
| mit | aa494a7c793a4f07603563364980e78b | 30.414634 | 114 | 0.534938 | 4.735294 | false | false | false | false |
dr-prodigy/python-holidays | test/test_holiday_base.py | 1 | 28132 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import pathlib
import pickle
import unittest
import warnings
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta, MO
import holidays
from holidays.constants import FEB, JAN
class TestBasics(unittest.TestCase):
def setUp(self):
self.holidays = holidays.US()
def test_contains(self):
self.assertIn(date(2014, 1, 1), self.holidays)
self.assertNotIn(date(2014, 1, 2), self.holidays)
def test_getitem(self):
self.assertEqual(self.holidays[date(2014, 1, 1)], "New Year's Day")
self.assertEqual(self.holidays.get(date(2014, 1, 1)), "New Year's Day")
self.assertRaises(KeyError, lambda: self.holidays[date(2014, 1, 2)])
self.assertIsNone(self.holidays.get(date(2014, 1, 2)))
self.assertListEqual(
self.holidays[date(2013, 12, 31) : date(2014, 1, 2)],
[date(2014, 1, 1)],
)
self.assertListEqual(
self.holidays[date(2013, 12, 24) : date(2014, 1, 2)],
[date(2013, 12, 25), date(2014, 1, 1)],
)
self.assertListEqual(
self.holidays[date(2013, 12, 25) : date(2014, 1, 2) : 3],
[date(2013, 12, 25)],
)
self.assertListEqual(
self.holidays[date(2013, 12, 25) : date(2014, 1, 2) : 7],
[date(2013, 12, 25), date(2014, 1, 1)],
)
self.assertListEqual(
self.holidays[date(2014, 1, 2) : date(2013, 12, 30)],
[date(2014, 1, 1)],
)
self.assertListEqual(
self.holidays[date(2014, 1, 2) : date(2013, 12, 25)],
[date(2014, 1, 1)],
)
self.assertListEqual(
self.holidays[date(2014, 1, 2) : date(2013, 12, 24)],
[date(2014, 1, 1), date(2013, 12, 25)],
)
self.assertListEqual(
self.holidays[date(2014, 1, 1) : date(2013, 12, 24) : 3],
[date(2014, 1, 1)],
)
self.assertListEqual(
self.holidays[date(2014, 1, 1) : date(2013, 12, 24) : 7],
[date(2014, 1, 1), date(2013, 12, 25)],
)
self.assertListEqual(
self.holidays[date(2013, 12, 31) : date(2014, 1, 2) : -3], []
)
self.assertListEqual(
self.holidays[
date(2014, 1, 1) : date(2013, 12, 24) : timedelta(days=3)
],
[date(2014, 1, 1)],
)
self.assertListEqual(
self.holidays[
date(2014, 1, 1) : date(2013, 12, 24) : timedelta(days=7)
],
[date(2014, 1, 1), date(2013, 12, 25)],
)
self.assertListEqual(
self.holidays[
date(2013, 12, 31) : date(2014, 1, 2) : timedelta(days=3)
],
[],
)
self.assertRaises(
ValueError, lambda: self.holidays[date(2014, 1, 1) :]
)
self.assertRaises(
ValueError, lambda: self.holidays[: date(2014, 1, 1)]
)
self.assertRaises(
TypeError,
lambda: self.holidays[date(2014, 1, 1) : date(2014, 1, 2) : ""],
)
self.assertRaises(
ValueError,
lambda: self.holidays[date(2014, 1, 1) : date(2014, 1, 2) : 0],
)
def test_get(self):
self.assertEqual(self.holidays.get("2014-01-01"), "New Year's Day")
self.assertIsNone(self.holidays.get("2014-01-02"))
self.assertFalse(self.holidays.get("2014-01-02", False))
self.assertTrue(self.holidays.get("2014-01-02", True))
def test_pop(self):
self.assertRaises(KeyError, lambda: self.holidays.pop("2014-01-02"))
self.assertFalse(self.holidays.pop("2014-01-02", False))
self.assertTrue(self.holidays.pop("2014-01-02", True))
self.assertIn(date(2014, 1, 1), self.holidays)
self.assertEqual(self.holidays.pop("2014-01-01"), "New Year's Day")
self.assertNotIn(date(2014, 1, 1), self.holidays)
self.assertIn(date(2014, 7, 4), self.holidays)
def test_pop_named(self):
self.assertIn(date(2014, 1, 1), self.holidays)
self.holidays.pop_named("New Year's Day")
self.assertNotIn(date(2014, 1, 1), self.holidays)
self.assertRaises(
KeyError, lambda: self.holidays.pop_named("New Year's Dayz")
)
def test_setitem(self):
self.holidays = holidays.US(years=[2014])
self.assertEqual(len(self.holidays), 10)
self.holidays[date(2014, 1, 3)] = "Fake Holiday"
self.assertEqual(len(self.holidays), 11)
self.assertIn(date(2014, 1, 3), self.holidays)
self.assertEqual(self.holidays.get(date(2014, 1, 3)), "Fake Holiday")
def test_str(self):
self.holidays = holidays.US()
self.assertEqual(
str(self.holidays),
"{'observed': True, 'expand': True, 'subdiv': None, "
"'years': set()}",
)
self.holidays = holidays.US(years=1900)
self.assertEqual(
str(self.holidays),
'{datetime.date(1900, 1, 1): "New Year\'s Day", '
'datetime.date(1900, 2, 22): "Washington\'s Birthday", '
"datetime.date(1900, 5, 30): 'Memorial Day', "
"datetime.date(1900, 7, 4): 'Independence Day', "
"datetime.date(1900, 9, 3): 'Labor Day', "
"datetime.date(1900, 11, 22): 'Thanksgiving', "
"datetime.date(1900, 12, 25): 'Christmas Day'}",
)
def test_update(self):
h = holidays.HolidayBase()
h.update(
{
date(2015, 1, 1): "New Year's Day",
"2015-12-25": "Christmas Day",
}
)
self.assertIn("2015-01-01", h)
self.assertIn(date(2015, 12, 25), h)
def test_append(self):
h = holidays.HolidayBase()
h.update(
{
date(2015, 1, 1): "New Year's Day",
"2015-12-25": "Christmas Day",
}
)
h.append([date(2015, 4, 1), "2015-04-03"])
h.append(date(2015, 4, 6))
h.append("2015-04-07")
self.assertIn("2015-01-01", h)
self.assertIn(date(2015, 12, 25), h)
self.assertIn("2015-04-01", h)
self.assertNotIn("2015-04-02", h)
self.assertIn("2015-04-03", h)
self.assertNotIn("2015-04-04", h)
self.assertNotIn("2015-04-05", h)
self.assertIn("2015-04-06", h)
self.assertIn("2015-04-07", h)
def test_eq_ne(self):
us1 = holidays.UnitedStates()
us2 = holidays.US()
us3 = holidays.UnitedStates(years=[2014])
us4 = holidays.US(years=[2014])
ca1 = holidays.Canada()
ca2 = holidays.CA()
ca3 = holidays.Canada(years=[2014])
ca4 = holidays.CA(years=[2014])
self.assertEqual(us1, us2)
self.assertEqual(us3, us4)
self.assertEqual(ca1, ca2)
self.assertEqual(ca3, ca4)
self.assertNotEqual(us1, us3)
self.assertNotEqual(us1, ca1)
self.assertNotEqual(us3, ca3)
self.assertNotEqual(us1, us3)
def test_add(self):
ca = holidays.CA()
us = holidays.US()
mx = holidays.MX()
na = ca + (us + mx)
self.assertNotIn("2014-07-01", us)
self.assertIn("2014-07-01", ca)
self.assertNotIn("2014-07-04", ca)
self.assertIn("2014-07-04", us)
self.assertIn("2014-07-04", ca + us)
self.assertIn("2014-07-04", us + ca)
self.assertIn("2015-07-04", ca + us)
self.assertIn("2015-07-04", us + ca)
self.assertIn("2015-07-01", ca + us)
self.assertIn("2015-07-01", us + ca)
self.assertIn("2014-07-04", na)
self.assertIn("2015-07-04", na)
self.assertIn("2015-07-01", na)
self.assertIn("2000-02-05", na)
self.assertEqual((ca + us).subdiv, "ON")
self.assertEqual((us + ca).subdiv, "ON")
ca = holidays.CA(years=[2014], expand=False)
us = holidays.US(years=[2014, 2015], expand=True)
self.assertTrue((ca + us).expand)
self.assertEqual((ca + us).years, {2014, 2015})
self.assertEqual((us + ca).years, {2014, 2015})
na = holidays.CA()
na += holidays.US()
na += holidays.MX()
self.assertEqual(na.country, ["CA", "US", "MX"])
self.assertIn("2014-07-04", na)
self.assertIn("2014-07-04", na)
self.assertIn("2015-07-04", na)
self.assertIn("2015-07-04", na)
self.assertIn("2015-07-01", na)
self.assertIn("2015-07-01", na)
self.assertIn("2000-02-05", na)
self.assertEqual(na.subdiv, "ON")
na = holidays.CA() + holidays.US()
na += holidays.MX()
self.assertIn("2014-07-04", na)
self.assertIn("2014-07-04", na)
self.assertIn("2015-07-04", na)
self.assertIn("2015-07-04", na)
self.assertIn("2015-07-01", na)
self.assertIn("2015-07-01", na)
self.assertIn("2000-02-05", na)
self.assertEqual(na.subdiv, "ON")
self.assertRaises(TypeError, lambda: holidays.US() + {})
na = ca + (us + mx) + ca + (mx + us + holidays.CA(subdiv="BC"))
self.assertIn("2000-02-05", na)
self.assertIn("2014-02-10", na)
self.assertIn("2014-02-17", na)
self.assertIn("2014-07-04", na)
provs = holidays.CA(subdiv="ON", years=[2014]) + holidays.CA(
subdiv="BC", years=[2015]
)
self.assertIn("2015-02-09", provs)
self.assertIn("2015-02-16", provs)
self.assertEqual(provs.subdiv, ["ON", "BC"])
a = sum(holidays.CA(subdiv=x) for x in holidays.CA.subdivisions)
self.assertEqual(a.country, "CA")
self.assertEqual(a.subdiv, holidays.CA.subdivisions)
self.assertIn("2015-02-09", a)
self.assertIn("2015-02-16", a)
na = holidays.CA() + holidays.US() + holidays.MX()
self.assertIn(date(1969, 12, 25), na)
self.assertEqual(na.get(date(1969, 7, 1)), "Dominion Day")
self.assertEqual(na.get(date(1983, 7, 1)), "Canada Day")
self.assertEqual(
na.get(date(1969, 12, 25)), "Christmas Day, Navidad [Christmas]"
)
na = holidays.MX() + holidays.CA() + holidays.US()
self.assertEqual(
na.get(date(1969, 12, 25)), "Christmas Day, Navidad [Christmas]"
)
ecb = holidays.ECB()
nyse = holidays.NYSE()
ecb_nyse = ecb + nyse
self.assertEqual(len(ecb) + len(nyse), len(ecb_nyse))
self.assertEqual(ecb_nyse.market, ["ECB", "NYSE"])
def test_get_list(self):
westland = holidays.NZ(subdiv="WTL")
chathams = holidays.NZ(subdiv="CIT")
wild = westland + chathams
self.assertEqual(
wild[date(1969, 12, 1)],
("Chatham Islands Anniversary Day, West Coast Anniversary Day"),
)
self.assertEqual(
wild.get_list(date(1969, 12, 1)),
["Chatham Islands Anniversary Day", "West Coast Anniversary Day"],
)
self.assertEqual(wild.get_list(date(1969, 1, 1)), ["New Year's Day"])
self.assertEqual(
westland.get_list(date(1969, 12, 1)),
["West Coast Anniversary Day"],
)
self.assertEqual(
westland.get_list(date(1969, 1, 1)), ["New Year's Day"]
)
self.assertEqual(
chathams.get_list(date(1969, 12, 1)),
["Chatham Islands Anniversary Day"],
)
self.assertEqual(
chathams.get_list(date(1969, 1, 1)), ["New Year's Day"]
)
ca = holidays.CA()
us = holidays.US()
mx = holidays.MX()
na = ca + us + mx
self.assertIn(date(1969, 12, 25), na)
self.assertEqual(
na.get_list(date(1969, 12, 25)),
["Christmas Day", "Navidad [Christmas]"],
)
self.assertEqual(na.get_list(date(1969, 7, 1)), ["Dominion Day"])
self.assertEqual(na.get_list(date(1969, 1, 3)), [])
def test_list_supported_countries(self):
supported_countries = holidays.list_supported_countries()
countries_files = [
path
for path in pathlib.Path("holidays/countries").glob("*.py")
if not str(path).endswith("__init__.py")
]
self.assertEqual(
len(countries_files),
len(supported_countries),
)
self.assertIn("AR", supported_countries)
self.assertIn("CA", supported_countries["US"])
self.assertIn("IM", supported_countries)
self.assertIn("ZA", supported_countries)
def test_list_supported_financial(self):
supported_financial = holidays.list_supported_financial()
financial_files = [
path
for path in pathlib.Path("holidays/financial").glob("*.py")
if not str(path).endswith("__init__.py")
]
self.assertEqual(
len(financial_files),
len(supported_financial),
)
self.assertIn("ECB", supported_financial)
self.assertIn("NYSE", supported_financial)
def test_radd(self):
self.assertRaises(TypeError, lambda: 1 + holidays.US())
def test_inheritance(self):
class NoColumbusHolidays(holidays.US):
def _populate(self, year):
holidays.US._populate(self, year)
self.pop(date(year, 10, 1) + relativedelta(weekday=MO(+2)))
hdays = NoColumbusHolidays()
self.assertIn(date(2014, 10, 13), self.holidays)
self.assertNotIn(date(2014, 10, 13), hdays)
self.assertIn(date(2014, 1, 1), hdays)
self.assertIn(date(2020, 10, 12), self.holidays)
self.assertNotIn(date(2020, 10, 12), hdays)
self.assertIn(date(2020, 1, 1), hdays)
class NinjaTurtlesHolidays(holidays.US):
def _populate(self, year):
holidays.US._populate(self, year)
self[date(year, 7, 13)] = "Ninja Turtle's Day"
hdays = NinjaTurtlesHolidays()
self.assertNotIn(date(2014, 7, 13), self.holidays)
self.assertIn(date(2014, 7, 13), hdays)
self.assertIn(date(2014, 1, 1), hdays)
self.assertNotIn(date(2020, 7, 13), self.holidays)
self.assertIn(date(2020, 7, 13), hdays)
self.assertIn(date(2020, 1, 1), hdays)
class NewCountry(holidays.HolidayBase):
def _populate(self, year):
self[date(year, 1, 2)] = "New New Year's"
hdays = NewCountry()
self.assertNotIn(date(2014, 1, 1), hdays)
self.assertIn(date(2014, 1, 2), hdays)
class Dec31Holiday(holidays.HolidayBase):
def _populate(self, year):
self[date(year, 12, 31)] = "New Year's Eve"
self.assertIn(date(2014, 12, 31), Dec31Holiday())
def test_get_named(self):
us = holidays.UnitedStates(years=[2020])
# check for "New Year's Day" presence in get_named("new")
self.assertIn(date(2020, 1, 1), us.get_named("new"))
# check for searching holiday in US when the observed holiday is on
# a different year than input one
us = holidays.US(years=[2022])
us.get_named("Thanksgiving")
self.assertEqual([2022], list(us.years))
class TestArgs(unittest.TestCase):
def setUp(self):
self.holidays = holidays.US()
def test_country(self):
self.assertEqual(self.holidays.country, "US")
self.assertIn(date(2014, 7, 4), self.holidays)
self.assertNotIn(date(2014, 7, 1), self.holidays)
self.holidays = holidays.UnitedStates()
self.assertEqual(self.holidays.country, "US")
self.assertIn(date(2014, 7, 4), self.holidays)
self.assertNotIn(date(2014, 7, 1), self.holidays)
self.assertEqual(self.holidays.country, "US")
self.holidays = holidays.CA()
self.assertEqual(self.holidays.country, "CA")
self.assertEqual(self.holidays.subdiv, "ON")
self.assertIn(date(2014, 7, 1), self.holidays)
self.assertNotIn(date(2014, 7, 4), self.holidays)
self.holidays = holidays.CA(subdiv="BC")
self.assertEqual(self.holidays.country, "CA")
self.assertEqual(self.holidays.subdiv, "BC")
self.assertIn(date(2014, 7, 1), self.holidays)
self.assertNotIn(date(2014, 7, 4), self.holidays)
def test_years(self):
self.assertEqual(len(self.holidays.years), 0)
self.assertNotIn(date(2014, 1, 2), self.holidays)
self.assertEqual(len(self.holidays.years), 1)
self.assertIn(2014, self.holidays.years)
self.assertNotIn(date(2013, 1, 2), self.holidays)
self.assertNotIn(date(2014, 1, 2), self.holidays)
self.assertNotIn(date(2015, 1, 2), self.holidays)
self.assertEqual(len(self.holidays.years), 3)
self.assertIn(2013, self.holidays.years)
self.assertIn(2015, self.holidays.years)
self.holidays = holidays.US(years=range(2010, 2015 + 1))
self.assertEqual(len(self.holidays.years), 6)
self.assertNotIn(2009, self.holidays.years)
self.assertIn(2010, self.holidays.years)
self.assertIn(2015, self.holidays.years)
self.assertNotIn(2016, self.holidays.years)
self.holidays = holidays.US(years=(2013, 2015, 2015))
self.assertEqual(len(self.holidays.years), 2)
self.assertIn(2013, self.holidays.years)
self.assertNotIn(2014, self.holidays.years)
self.assertIn(2015, self.holidays.years)
self.assertIn(date(2021, 12, 31), holidays.US(years=[2021]).keys())
self.holidays = holidays.US(years=2015)
self.assertNotIn(2014, self.holidays.years)
self.assertIn(2015, self.holidays.years)
def test_expand(self):
self.holidays = holidays.US(years=(2013, 2015), expand=False)
self.assertEqual(len(self.holidays.years), 2)
self.assertIn(2013, self.holidays.years)
self.assertNotIn(2014, self.holidays.years)
self.assertIn(2015, self.holidays.years)
self.assertNotIn(date(2014, 1, 1), self.holidays)
self.assertEqual(len(self.holidays.years), 2)
self.assertNotIn(2014, self.holidays.years)
def test_observed(self):
self.holidays = holidays.US(observed=False)
self.assertIn(date(2000, 1, 1), self.holidays)
self.assertNotIn(date(1999, 12, 31), self.holidays)
self.assertIn(date(2012, 1, 1), self.holidays)
self.assertNotIn(date(2012, 1, 2), self.holidays)
self.holidays.observed = True
self.assertIn(date(2000, 1, 1), self.holidays)
self.assertIn(date(1999, 12, 31), self.holidays)
self.assertIn(date(2012, 1, 1), self.holidays)
self.assertIn(date(2012, 1, 2), self.holidays)
self.holidays.observed = False
self.assertIn(date(2000, 1, 1), self.holidays)
self.assertNotIn(date(1999, 12, 31), self.holidays)
self.assertIn(date(2012, 1, 1), self.holidays)
self.assertNotIn(date(2012, 1, 2), self.holidays)
self.holidays = holidays.US(years=[2022], observed=False)
self.assertNotIn(date(2021, 12, 31), self.holidays.keys())
self.holidays = holidays.CA(observed=False)
self.assertNotIn(date(1878, 7, 3), self.holidays)
self.holidays.observed = True
self.assertIn(date(2018, 7, 2), self.holidays)
def test_serialization(self):
dt = datetime(2020, 1, 1)
self.assertIn(dt, self.holidays)
loaded_holidays = pickle.loads(pickle.dumps(self.holidays))
self.assertEqual(loaded_holidays, self.holidays)
self.assertIn(dt, self.holidays)
def test_deprecation_warnings(self):
with self.assertWarns(Warning):
holidays.US(prov="AL")
with self.assertWarns(Warning):
holidays.US(state="WY")
class TestKeyTransforms(unittest.TestCase):
def setUp(self):
self.holidays = holidays.US()
def test_dates(self):
self.assertIn(date(2014, 1, 1), self.holidays)
self.assertEqual(self.holidays[date(2014, 1, 1)], "New Year's Day")
self.holidays[date(2014, 1, 3)] = "Fake Holiday"
self.assertIn(date(2014, 1, 3), self.holidays)
self.assertEqual(self.holidays.pop(date(2014, 1, 3)), "Fake Holiday")
self.assertNotIn(date(2014, 1, 3), self.holidays)
def test_datetimes(self):
self.assertIn(datetime(2014, 1, 1, 13, 45), self.holidays)
self.assertEqual(
self.holidays[datetime(2014, 1, 1, 13, 45)], "New Year's Day"
)
self.holidays[datetime(2014, 1, 3, 1, 1)] = "Fake Holiday"
self.assertIn(datetime(2014, 1, 3, 2, 2), self.holidays)
self.assertEqual(
self.holidays.pop(datetime(2014, 1, 3, 4, 4)), "Fake Holiday"
)
self.assertNotIn(datetime(2014, 1, 3, 2, 2), self.holidays)
def test_timestamp(self):
self.assertIn(1388552400, self.holidays)
self.assertEqual(self.holidays[1388552400], "New Year's Day")
self.assertIn(1388552400.01, self.holidays)
self.assertEqual(self.holidays[1388552400.01], "New Year's Day")
self.holidays[1388725200] = "Fake Holiday"
self.assertIn(1388725201, self.holidays)
self.assertEqual(self.holidays.pop(1388725202), "Fake Holiday")
self.assertNotIn(1388725201, self.holidays)
def test_strings(self):
self.assertIn("2014-01-01", self.holidays)
self.assertEqual(self.holidays["2014-01-01"], "New Year's Day")
self.assertIn("01/01/2014", self.holidays)
self.assertEqual(self.holidays["01/01/2014"], "New Year's Day")
self.holidays["01/03/2014"] = "Fake Holiday"
self.assertIn("01/03/2014", self.holidays)
self.assertEqual(self.holidays.pop("01/03/2014"), "Fake Holiday")
self.assertNotIn("01/03/2014", self.holidays)
def test_exceptions(self):
self.assertRaises(
(TypeError, ValueError), lambda: "abc" in self.holidays
)
self.assertRaises(
(TypeError, ValueError), lambda: self.holidays.get("abc123")
)
self.assertRaises(TypeError, lambda: self.holidays.get({"123"}))
self.assertRaises(
(TypeError, ValueError), self.holidays.__setitem__, "abc", "Test"
)
self.assertRaises((TypeError, ValueError), lambda: {} in self.holidays)
class TestCountryHolidayDeprecation(unittest.TestCase):
def test_deprecation(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
h = holidays.CountryHoliday("IT")
self.assertIsInstance(h, holidays.HolidayBase)
self.assertEqual(1, len(w))
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
class TestCountryHolidays(unittest.TestCase):
def setUp(self):
self.holidays = holidays.country_holidays("US")
def test_country(self):
self.assertEqual(self.holidays.country, "US")
def test_country_single_year(self):
h = holidays.country_holidays("US", years=2021)
self.assertEqual(h.years, {2021})
def test_country_years(self):
h = holidays.country_holidays("US", years=[2015, 2016])
self.assertEqual(h.years, {2015, 2016})
def test_country_state(self):
h = holidays.country_holidays("US", subdiv="NY")
self.assertEqual(h.subdiv, "NY")
def test_country_province(self):
h = holidays.country_holidays("AU", subdiv="NT")
self.assertEqual(h.subdiv, "NT")
def test_exceptions(self):
self.assertRaises(
NotImplementedError, lambda: holidays.country_holidays("XXXX")
)
self.assertRaises(
NotImplementedError,
lambda: holidays.country_holidays("US", subdiv="XXXX"),
)
self.assertRaises(
NotImplementedError, lambda: holidays.US(subdiv="XXXX")
)
class TestFinancialHolidays(unittest.TestCase):
def setUp(self):
self.holidays = holidays.financial_holidays("NYSE")
def test_market(self):
self.assertEqual(self.holidays.market, "NYSE")
def test_market_single_year(self):
h = holidays.financial_holidays("NYSE", years=2021)
self.assertEqual(h.years, {2021})
def test_market_years(self):
h = holidays.financial_holidays("NYSE", years=[2015, 2016])
self.assertEqual(h.years, {2015, 2016})
def test_exceptions(self):
self.assertRaises(
NotImplementedError, lambda: holidays.financial_holidays("XXXX")
)
self.assertRaises(
NotImplementedError,
lambda: holidays.financial_holidays("NYSE", subdiv="XXXX"),
)
class TestAllInSameYear(unittest.TestCase):
"""Test that only holidays in the year(s) requested are returned."""
country: str
hol: datetime
year: int
def setUp(self):
self.countries = holidays.list_supported_countries()
def tearDown(self):
"""https://stackoverflow.com/questions/4414234/"""
def list2reason(exc_list):
if exc_list and exc_list[-1][0] is self:
return exc_list[-1][1]
result = self.defaultTestResult()
self._feedErrorsToResult(result, self._outcome.errors)
error = list2reason(result.errors)
failure = list2reason(result.failures)
text = error if error else failure
if text:
print(
f"{text.splitlines()[-1]} in country {self.country}: "
f"holiday {self.hol} returned for year {self.year}"
)
print(
holidays.country_holidays(
self.country, subdiv=None, years=[self.year]
).get_list(self.hol)
)
def test_all_countries(self):
"""
Only holidays in the year(s) requested should be returned. This
ensures that we avoid triggering a "RuntimeError: dictionary changed
size during iteration" error.
Here we test all countries for the 12-year period starting ten years
ago and ending 2 years from now.
This is logic test and not a code compatibility test, so for expediency
we only run it once on the latest Python version.
"""
for self.country in self.countries:
for self.year in range(
date.today().year - 10, date.today().year + 3
):
hols = holidays.country_holidays(
self.country, years=[self.year]
)
for self.hol in hols:
self.assertEqual(self.hol.year, self.year)
class TestCountrySpecialHolidays(unittest.TestCase):
def setUp(self):
self.holidays = holidays.country_holidays("US")
def test_populate_special_holidays(self):
self.holidays._populate(1111) # special_holidays is empty.
self.assertEqual(0, len(self.holidays))
self.holidays.special_holidays = {
1111: ((JAN, 1, "Test holiday"),),
2222: ((FEB, 2, "Test holiday"),),
3333: (),
}
self.assertNotIn(3333, self.holidays.years)
self.assertIn("1111-01-01", self.holidays)
self.assertIn("2222-02-02", self.holidays)
self.assertEqual(13, len(self.holidays))
self.holidays._populate(1111)
self.holidays._populate(2222)
self.assertIn("1111-01-01", self.holidays)
self.assertIn("2222-02-02", self.holidays)
self.assertEqual(13, len(self.holidays))
| mit | 2694551c12a8e48a989312e459d8b502 | 37.326975 | 79 | 0.585205 | 3.541735 | false | true | false | false |
dr-prodigy/python-holidays | test/countries/test_austria.py | 1 | 3161 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
from dateutil.relativedelta import relativedelta
import holidays
class TestAT(unittest.TestCase):
def setUp(self):
self.holidays = holidays.AT()
def test_new_years(self):
for year in range(1900, 2100):
dt = date(year, 1, 1)
self.assertIn(dt, self.holidays)
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(dt + relativedelta(days=+1), self.holidays)
def test_christmas(self):
for year in range(1900, 2100):
dt = date(year, 12, 25)
self.assertIn(dt, self.holidays)
self.assertIn(dt + relativedelta(days=+1), self.holidays)
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(dt + relativedelta(days=+2), self.holidays)
def test_easter_monday(self):
for dt in [
date(1900, 4, 16),
date(1901, 4, 8),
date(1902, 3, 31),
date(1999, 4, 5),
date(2000, 4, 24),
date(2010, 4, 5),
date(2018, 4, 2),
date(2019, 4, 22),
date(2020, 4, 13),
]:
self.assertIn(dt, self.holidays)
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(dt + relativedelta(days=+1), self.holidays)
def test_national_day(self):
for year in range(1919, 1934):
dt = date(year, 11, 12)
self.assertIn(dt, self.holidays)
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(dt + relativedelta(days=+1), self.holidays)
for year in range(1967, 2100):
dt = date(year, 10, 26)
self.assertIn(dt, self.holidays)
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(dt + relativedelta(days=+1), self.holidays)
def test_all_holidays_present(self):
at_2015 = holidays.AT(years=[2015])
all_holidays = [
"Neujahr",
"Heilige Drei Könige",
"Ostermontag",
"Staatsfeiertag",
"Christi Himmelfahrt",
"Pfingstmontag",
"Fronleichnam",
"Mariä Himmelfahrt",
"Nationalfeiertag",
"Allerheiligen",
"Mariä Empfängnis",
"Christtag",
"Stefanitag",
]
for holiday in all_holidays:
self.assertIn(holiday, at_2015.values())
def test_subdiv(self):
at_holidays = holidays.AT(subdiv=1)
self.assertEqual("1", at_holidays.subdiv)
| mit | a9e5682bffaacabf489cc52d7129f2c0 | 34.47191 | 78 | 0.579031 | 3.603881 | false | true | false | false |
jrief/djangocms-cascade | cmsplugin_cascade/bootstrap4/utils.py | 1 | 7432 | import logging
from django.utils.translation import gettext_lazy as _
from cmsplugin_cascade import app_settings
from cmsplugin_cascade.utils import (compute_aspect_ratio, get_image_size, parse_responsive_length,
compute_aspect_ratio_with_glossary)
logger = logging.getLogger('cascade')
IMAGE_RESIZE_OPTIONS = [
('upscale', _("Upscale image")),
('crop', _("Crop image")),
('subject_location', _("With subject location")),
('high_resolution', _("Optimized for Retina")),
]
IMAGE_SHAPE_CHOICES = [
('img-fluid', _("Responsive")),
('rounded', _('Rounded')),
('rounded-circle', _('Circle')),
('img-thumbnail', _('Thumbnail')),
]
def get_image_tags(instance):
"""
Create a context returning the tags to render an ``<img ...>`` element with
``sizes``, ``srcset``, a fallback ``src`` and if required inline styles.
"""
if hasattr(instance, 'image') and hasattr(instance.image, 'exif'):
aspect_ratio = compute_aspect_ratio(instance.image)
elif 'image' in instance.glossary and 'width' in instance.glossary['image']:
aspect_ratio = compute_aspect_ratio_with_glossary(instance.glossary)
else:
# if accessing the image file fails or fake image fails, abort here
raise FileNotFoundError("Unable to compute aspect ratio of image")
is_responsive = 'img-fluid' in instance.glossary.get('image_shapes', [])
resize_options = instance.glossary.get('resize_options', {})
crop = 'crop' in resize_options
upscale = 'upscale' in resize_options
if 'subject_location' in resize_options and hasattr(instance.image, 'subject_location'):
subject_location = instance.image.subject_location
else:
subject_location = None
tags = {'sizes': [], 'srcsets': {}, 'is_responsive': is_responsive, 'extra_styles': {}}
if is_responsive:
image_width = parse_responsive_length(instance.glossary.get('image_width_responsive') or '100%')
assert(image_width[1]), "The given image has no valid width"
if image_width[1] != 1.0:
tags['extra_styles'].update({'max-width': '{:.0f}%'.format(100 * image_width[1])})
else:
image_width = parse_responsive_length(instance.glossary['image_width_fixed'])
if not image_width[0]:
image_width = (instance.image.width, image_width[1])
try:
image_height = parse_responsive_length(instance.glossary['image_height'])
except KeyError:
image_height = (None, None)
if is_responsive:
column_bounds_min = instance.glossary['column_bounds']['min']
if 'high_resolution' in resize_options:
column_bounds_max = 2 * instance.glossary['column_bounds']['max']
else:
column_bounds_max = instance.glossary['column_bounds']['max']
num_steps = min(int((column_bounds_max - column_bounds_min) / app_settings.RESPONSIVE_IMAGE_STEP_SIZE),
app_settings.RESPONSIVE_IMAGE_MAX_STEPS)
step_width, max_width = (column_bounds_max - column_bounds_min) / num_steps, 0
for step in range(0, num_steps + 1):
width = round(column_bounds_min + step_width * step)
max_width = max(max_width, width)
size = get_image_size(width, image_height, aspect_ratio)
key = '{0}w'.format(*size)
tags['srcsets'][key] = {'size': size, 'crop': crop, 'upscale': upscale,
'subject_location': subject_location}
tags['sizes'] = instance.glossary['media_queries'].values()
# use an existing image as fallback for the <img ...> element
if not max_width > 0:
logger.warning('image tags: image max width is zero')
size = (int(round(max_width)), int(round(max_width * aspect_ratio)))
else:
size = get_image_size(image_width[0], image_height, aspect_ratio)
if 'high_resolution' in resize_options:
tags['srcsets']['1x'] = {'size': size, 'crop': crop, 'upscale': upscale,
'subject_location': subject_location}
tags['srcsets']['2x'] = dict(tags['srcsets']['1x'], size=(size[0] * 2, size[1] * 2))
tags['src'] = {'size': size, 'crop': crop, 'upscale': upscale, 'subject_location': subject_location}
return tags
def get_picture_elements(instance):
"""
Create a context, used to render a <picture> together with all its ``<source>`` elements:
It returns a list of HTML elements, each containing the information to render a ``<source>``
element.
The purpose of this HTML entity is to display images with art directions. For normal images use
the ``<img>`` element.
"""
if hasattr(instance, 'image') and hasattr(instance.image, 'exif'):
aspect_ratio = compute_aspect_ratio(instance.image)
elif 'image' in instance.glossary and 'width' in instance.glossary['image']:
aspect_ratio = compute_aspect_ratio_with_glossary(instance.glossary)
else:
# if accessing the image file fails or fake image fails, abort here
logger.warning("Unable to compute aspect ratio of image '{}'".format(instance.image))
return
# container_max_heights = instance.glossary.get('container_max_heights', {})
resize_options = instance.glossary.get('resize_options', {})
crop = 'crop' in resize_options
upscale = 'upscale' in resize_options
if 'subject_location' in resize_options and hasattr(instance.image, 'subject_location'):
subject_location = instance.image.subject_location
else:
subject_location = None
max_width = 0
max_zoom = 0
elements = []
for bp, media_query in instance.glossary['media_queries'].items():
width, media = media_query['width'], media_query['media']
max_width = max(max_width, width)
size = None
try:
image_height = parse_responsive_length(instance.glossary['responsive_heights'][bp])
except KeyError:
image_height = (None, None)
if image_height[0]: # height was given in px
size = (int(width), image_height[0])
elif image_height[1]: # height was given in %
size = (int(width), int(round(width * aspect_ratio * image_height[1])))
try:
zoom = int(
instance.glossary['responsive_zoom'][bp].strip().rstrip('%')
)
except (AttributeError, KeyError, ValueError):
zoom = 0
max_zoom = max(max_zoom, zoom)
if size is None:
# as fallback, adopt height to current width
size = (int(width), int(round(width * aspect_ratio)))
elem = {'tag': 'source', 'size': size, 'zoom': zoom, 'crop': crop,
'upscale': upscale, 'subject_location': subject_location, 'media': media}
if 'high_resolution' in resize_options:
elem['size2'] = (size[0] * 2, size[1] * 2)
elements.append(elem)
# add a fallback image for old browsers which can't handle the <source> tags inside a <picture> element
if image_height[1]:
size = (int(max_width), int(round(max_width * aspect_ratio * image_height[1])))
else:
size = (int(max_width), int(round(max_width * aspect_ratio)))
elements.append({'tag': 'img', 'size': size, 'zoom': max_zoom, 'crop': crop,
'upscale': upscale, 'subject_location': subject_location})
return elements
| mit | 9c0c72acb06886fd2f5f0e0553098efd | 46.948387 | 111 | 0.620694 | 3.747857 | false | false | false | false |
dr-prodigy/python-holidays | test/countries/test_japan.py | 1 | 21637 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
import holidays
class TestJapan(unittest.TestCase):
def setUp(self):
self.holidays = holidays.Japan(observed=False)
def test_not_implemented(self):
with self.assertRaises(NotImplementedError):
holidays.Japan(years=[1945])
with self.assertRaises(NotImplementedError):
holidays.Japan(years=[2100])
def test_new_years_day(self):
for year in range(1949, 2050):
self.assertIn(date(year, 1, 1), self.holidays)
def test_coming_of_age(self):
for year in range(1949, 2000):
self.assertIn(date(year, 1, 15), self.holidays)
for dt in (
(2000, 1, 10),
(2001, 1, 8),
(2002, 1, 14),
(2003, 1, 13),
(2004, 1, 12),
(2005, 1, 10),
(2006, 1, 9),
(2007, 1, 8),
(2008, 1, 14),
(2009, 1, 12),
(2010, 1, 11),
(2011, 1, 10),
(2012, 1, 9),
(2013, 1, 14),
(2014, 1, 13),
(2015, 1, 12),
(2016, 1, 11),
(2017, 1, 9),
(2018, 1, 8),
(2019, 1, 14),
(2020, 1, 13),
(2021, 1, 11),
(2022, 1, 10),
(2023, 1, 9),
(2024, 1, 8),
(2025, 1, 13),
(2026, 1, 12),
(2027, 1, 11),
(2028, 1, 10),
(2029, 1, 8),
(2030, 1, 14),
(2031, 1, 13),
(2032, 1, 12),
(2033, 1, 10),
(2034, 1, 9),
(2035, 1, 8),
(2036, 1, 14),
(2037, 1, 12),
(2038, 1, 11),
(2039, 1, 10),
(2040, 1, 9),
(2041, 1, 14),
(2042, 1, 13),
(2043, 1, 12),
(2044, 1, 11),
(2045, 1, 9),
(2046, 1, 8),
(2047, 1, 14),
(2048, 1, 13),
(2049, 1, 11),
(2050, 1, 10),
):
self.assertIn(date(*dt), self.holidays)
self.assertNotIn(date(2000, 1, 15), self.holidays)
self.assertNotIn(date(2017, 1, 15), self.holidays)
self.assertNotIn(date(2030, 1, 15), self.holidays)
def test_foundation_day(self):
self.assertNotIn(date(1949, 2, 11), self.holidays)
self.assertNotIn(date(1966, 2, 11), self.holidays)
for year in range(1969, 2051):
self.assertIn(date(year, 2, 11), self.holidays)
def test_vernal_equinox_day(self):
for dt in (
(1949, 3, 21),
(1950, 3, 21),
(1951, 3, 21),
(1952, 3, 21),
(1953, 3, 21),
(1954, 3, 21),
(1955, 3, 21),
(1956, 3, 21),
(1957, 3, 21),
(1958, 3, 21),
(1959, 3, 21),
(1960, 3, 20),
(1961, 3, 21),
(1962, 3, 21),
(1963, 3, 21),
(1964, 3, 20),
(1965, 3, 21),
(1966, 3, 21),
(1967, 3, 21),
(1968, 3, 20),
(1969, 3, 21),
(1970, 3, 21),
(1971, 3, 21),
(1972, 3, 20),
(1973, 3, 21),
(1974, 3, 21),
(1975, 3, 21),
(1976, 3, 20),
(1977, 3, 21),
(1978, 3, 21),
(1979, 3, 21),
(1980, 3, 20),
(1981, 3, 21),
(1982, 3, 21),
(1983, 3, 21),
(1984, 3, 20),
(1985, 3, 21),
(1986, 3, 21),
(1987, 3, 21),
(1988, 3, 20),
(1989, 3, 21),
(1990, 3, 21),
(1991, 3, 21),
(1992, 3, 20),
(1993, 3, 20),
(1994, 3, 21),
(1995, 3, 21),
(1996, 3, 20),
(1997, 3, 20),
(1998, 3, 21),
(1999, 3, 21),
(2000, 3, 20),
(2001, 3, 20),
(2002, 3, 21),
(2003, 3, 21),
(2004, 3, 20),
(2005, 3, 20),
(2006, 3, 21),
(2007, 3, 21),
(2008, 3, 20),
(2009, 3, 20),
(2010, 3, 21),
(2011, 3, 21),
(2012, 3, 20),
(2013, 3, 20),
(2014, 3, 21),
(2015, 3, 21),
(2016, 3, 20),
(2017, 3, 20),
(2018, 3, 21),
(2019, 3, 21),
(2020, 3, 20),
(2021, 3, 20),
(2022, 3, 21),
(2023, 3, 21),
(2024, 3, 20),
(2025, 3, 20),
(2026, 3, 20),
(2027, 3, 21),
(2028, 3, 20),
(2029, 3, 20),
(2030, 3, 20),
(2031, 3, 21),
(2032, 3, 20),
(2033, 3, 20),
(2034, 3, 20),
(2035, 3, 21),
(2036, 3, 20),
(2037, 3, 20),
(2038, 3, 20),
(2039, 3, 21),
(2040, 3, 20),
(2041, 3, 20),
(2042, 3, 20),
(2043, 3, 21),
(2044, 3, 20),
(2045, 3, 20),
(2046, 3, 20),
(2047, 3, 21),
(2048, 3, 20),
(2049, 3, 20),
(2050, 3, 20),
):
self.assertIn(date(*dt), self.holidays)
self.assertIn(date(2092, 3, 19), self.holidays)
def test_showa_day(self):
for year in range(1949, 2007):
self.assertIn(date(year, 4, 29), self.holidays)
self.assertNotEqual(self.holidays[date(year, 4, 29)], "昭和の日")
for year in range(2007, 2051):
self.assertIn(date(year, 4, 29), self.holidays)
self.assertEqual(self.holidays[date(year, 4, 29)], "昭和の日")
def test_constitution_memorial_day(self):
for year in range(1949, 2051):
self.assertIn(date(year, 5, 3), self.holidays)
self.assertEqual(self.holidays[date(year, 5, 3)], "憲法記念日")
def test_greenery_day(self):
for year in range(1949, 1989):
self.assertIn(date(year, 4, 29), self.holidays)
self.assertNotIn(self.holidays[date(year, 4, 29)], "みどりの日")
for year in range(1989, 2007):
self.assertIn(date(year, 4, 29), self.holidays)
self.assertEqual(self.holidays[date(year, 4, 29)], "みどりの日")
for year in range(2007, 2051):
self.assertIn(date(year, 5, 4), self.holidays)
self.assertEqual(self.holidays[date(year, 5, 4)], "みどりの日")
def test_national_holiday(self):
for year in (
1988,
1989,
1990,
1991,
1993,
1994,
1995,
1996,
1999,
2000,
2001,
2002,
2004,
2005,
2006,
):
self.assertIn(date(year, 5, 4), self.holidays)
self.assertEqual(self.holidays[date(year, 5, 4)], "国民の休日")
for dt in (
(2009, 9, 22),
(2015, 9, 22),
(2026, 9, 22),
(2032, 9, 21),
(2037, 9, 22),
(2043, 9, 22),
(2049, 9, 21),
):
self.assertIn(date(*dt), self.holidays)
self.assertEqual(self.holidays[date(*dt)], "国民の休日")
def test_childrens_day(self):
for year in range(1949, 2051):
self.assertIn(date(year, 5, 5), self.holidays)
self.assertEqual(self.holidays[date(year, 5, 5)], "こどもの日")
def test_marine_day(self):
for dt in (
(1996, 7, 20),
(1997, 7, 20),
(1998, 7, 20),
(1999, 7, 20),
(2000, 7, 20),
(2001, 7, 20),
(2002, 7, 20),
(2003, 7, 21),
(2004, 7, 19),
(2005, 7, 18),
(2006, 7, 17),
(2007, 7, 16),
(2008, 7, 21),
(2009, 7, 20),
(2010, 7, 19),
(2011, 7, 18),
(2012, 7, 16),
(2013, 7, 15),
(2014, 7, 21),
(2015, 7, 20),
(2016, 7, 18),
(2017, 7, 17),
(2018, 7, 16),
(2019, 7, 15),
(2020, 7, 23),
(2021, 7, 22),
(2022, 7, 18),
(2023, 7, 17),
(2024, 7, 15),
(2025, 7, 21),
(2026, 7, 20),
(2027, 7, 19),
(2028, 7, 17),
(2029, 7, 16),
(2030, 7, 15),
(2031, 7, 21),
(2032, 7, 19),
(2033, 7, 18),
(2034, 7, 17),
(2035, 7, 16),
(2036, 7, 21),
(2037, 7, 20),
(2038, 7, 19),
(2039, 7, 18),
(2040, 7, 16),
(2041, 7, 15),
(2042, 7, 21),
(2043, 7, 20),
(2044, 7, 18),
(2045, 7, 17),
(2046, 7, 16),
(2047, 7, 15),
(2048, 7, 20),
(2049, 7, 19),
(2050, 7, 18),
):
self.assertIn(date(*dt), self.holidays)
self.assertEqual(self.holidays[date(*dt)], "海の日")
self.assertNotIn(date(1950, 7, 20), self.holidays)
def test_mountain_day(self):
for year in range(1949, 2016):
self.assertNotIn(date(year, 8, 11), self.holidays)
for year in range(2016, 2051):
if year == 2020:
self.assertIn(date(year, 8, 10), self.holidays)
self.assertEqual(self.holidays[date(year, 8, 10)], "山の日")
elif year == 2021:
self.assertIn(date(year, 8, 8), self.holidays)
self.assertEqual(self.holidays[date(year, 8, 8)], "山の日")
else:
self.assertIn(date(year, 8, 11), self.holidays)
self.assertEqual(self.holidays[date(year, 8, 11)], "山の日")
def test_respect_for_the_aged_day(self):
for year in range(1949, 1966):
self.assertNotIn(date(year, 9, 15), self.holidays)
for year in range(1966, 2004):
self.assertIn(date(year, 9, 15), self.holidays)
self.assertEqual(self.holidays[date(year, 9, 15)], "敬老の日")
for dt in (
(2004, 9, 20),
(2005, 9, 19),
(2006, 9, 18),
(2007, 9, 17),
(2008, 9, 15),
(2009, 9, 21),
(2010, 9, 20),
(2011, 9, 19),
(2012, 9, 17),
(2013, 9, 16),
(2014, 9, 15),
(2015, 9, 21),
(2016, 9, 19),
(2017, 9, 18),
(2018, 9, 17),
(2019, 9, 16),
(2020, 9, 21),
(2021, 9, 20),
(2022, 9, 19),
(2023, 9, 18),
(2024, 9, 16),
(2025, 9, 15),
(2026, 9, 21),
(2027, 9, 20),
(2028, 9, 18),
(2029, 9, 17),
(2030, 9, 16),
(2031, 9, 15),
(2032, 9, 20),
(2033, 9, 19),
(2034, 9, 18),
(2035, 9, 17),
(2036, 9, 15),
(2037, 9, 21),
(2038, 9, 20),
(2039, 9, 19),
(2040, 9, 17),
(2041, 9, 16),
(2042, 9, 15),
(2043, 9, 21),
(2044, 9, 19),
(2045, 9, 18),
(2046, 9, 17),
(2047, 9, 16),
(2048, 9, 21),
(2049, 9, 20),
(2050, 9, 19),
):
self.assertIn(date(*dt), self.holidays)
self.assertEqual(self.holidays[date(*dt)], "敬老の日")
def test_autumnal_equinox_day(self):
for dt in (
(1949, 9, 23),
(1950, 9, 23),
(1951, 9, 24),
(1952, 9, 23),
(1953, 9, 23),
(1954, 9, 23),
(1955, 9, 24),
(1956, 9, 23),
(1957, 9, 23),
(1958, 9, 23),
(1959, 9, 24),
(1960, 9, 23),
(1961, 9, 23),
(1962, 9, 23),
(1963, 9, 24),
(1964, 9, 23),
(1965, 9, 23),
(1966, 9, 23),
(1967, 9, 24),
(1968, 9, 23),
(1969, 9, 23),
(1970, 9, 23),
(1971, 9, 24),
(1972, 9, 23),
(1973, 9, 23),
(1974, 9, 23),
(1975, 9, 24),
(1976, 9, 23),
(1977, 9, 23),
(1978, 9, 23),
(1979, 9, 24),
(1980, 9, 23),
(1981, 9, 23),
(1982, 9, 23),
(1983, 9, 23),
(1984, 9, 23),
(1985, 9, 23),
(1986, 9, 23),
(1987, 9, 23),
(1988, 9, 23),
(1989, 9, 23),
(1990, 9, 23),
(1991, 9, 23),
(1992, 9, 23),
(1993, 9, 23),
(1994, 9, 23),
(1995, 9, 23),
(1996, 9, 23),
):
self.assertIn(date(*dt), self.holidays)
self.assertEqual(self.holidays[date(*dt)], "秋分の日")
def test_health_and_sports_day(self):
for year in range(1949, 1966):
self.assertNotIn(date(year, 10, 10), self.holidays)
for year in range(1966, 2000):
self.assertIn(date(year, 10, 10), self.holidays)
self.assertEqual(self.holidays[date(year, 10, 10)], "体育の日")
for dt in (
(2000, 10, 9),
(2001, 10, 8),
(2002, 10, 14),
(2003, 10, 13),
(2004, 10, 11),
(2005, 10, 10),
(2006, 10, 9),
(2007, 10, 8),
(2008, 10, 13),
(2009, 10, 12),
(2010, 10, 11),
(2011, 10, 10),
(2012, 10, 8),
(2013, 10, 14),
(2014, 10, 13),
(2015, 10, 12),
(2016, 10, 10),
(2017, 10, 9),
(2018, 10, 8),
(2019, 10, 14),
):
self.assertIn(date(*dt), self.holidays)
self.assertEqual(self.holidays[date(*dt)], "体育の日")
for dt in (
(2020, 7, 24),
(2021, 7, 23),
(2022, 10, 10),
(2023, 10, 9),
(2024, 10, 14),
(2025, 10, 13),
(2026, 10, 12),
(2027, 10, 11),
(2028, 10, 9),
(2029, 10, 8),
(2030, 10, 14),
(2031, 10, 13),
(2032, 10, 11),
(2033, 10, 10),
(2034, 10, 9),
(2035, 10, 8),
(2036, 10, 13),
(2037, 10, 12),
(2038, 10, 11),
(2039, 10, 10),
(2040, 10, 8),
(2041, 10, 14),
(2042, 10, 13),
(2043, 10, 12),
(2044, 10, 10),
(2045, 10, 9),
(2046, 10, 8),
(2047, 10, 14),
(2048, 10, 12),
(2049, 10, 11),
(2050, 10, 10),
):
self.assertIn(date(*dt), self.holidays)
self.assertEqual(self.holidays[date(*dt)], "スポーツの日")
self.assertNotIn(date(2000, 10, 10), self.holidays)
def test_culture_day(self):
for year in range(1949, 2050):
self.assertIn(date(year, 11, 3), self.holidays)
self.assertEqual(self.holidays[date(year, 11, 3)], "文化の日")
def test_labour_thanks_giving_day(self):
for year in range(1949, 2050):
self.assertIn(date(year, 11, 23), self.holidays)
self.assertEqual(self.holidays[date(year, 11, 23)], "勤労感謝の日")
def test_emperors_birthday(self):
for year in range(1949, 1989):
self.assertIn(date(year, 4, 29), self.holidays)
self.assertEqual(self.holidays[date(year, 4, 29)], "天皇誕生日")
for year in range(1989, 2019):
self.assertIn(date(year, 12, 23), self.holidays)
self.assertEqual(self.holidays[date(year, 12, 23)], "天皇誕生日")
for year in range(2020, 2051):
self.assertIn(date(year, 2, 23), self.holidays)
self.assertEqual(self.holidays[date(year, 2, 23)], "天皇誕生日")
self.assertNotIn(date(2019, 12, 23), self.holidays)
def test_showa_emperor_holidays(self):
self.assertIn(date(1989, 2, 24), self.holidays)
def test_heisei_emperor_holidays(self):
self.assertIn(date(1959, 4, 10), self.holidays)
self.assertIn(date(1990, 11, 12), self.holidays)
def test_reiwa_emperor_holidays(self):
self.assertIn(date(1993, 6, 9), self.holidays)
self.assertIn(date(2019, 4, 30), self.holidays)
self.assertIn(date(2019, 5, 1), self.holidays)
self.assertIn(date(2019, 5, 2), self.holidays)
self.assertIn(date(2019, 10, 22), self.holidays)
def test_national_holidays(self):
self.assertIn(date(2032, 9, 21), self.holidays)
def test_invalid_years(self):
self.assertRaises(
NotImplementedError, lambda: date(1948, 1, 1) in self.holidays
)
self.assertRaises(
NotImplementedError, lambda: date(2100, 1, 1) in self.holidays
)
def test_substitute_holidays(self):
for dt in (
(1973, 4, 30),
(1973, 9, 24),
(1974, 5, 6),
(1974, 9, 16),
(1974, 11, 4),
(1975, 11, 24),
(1976, 10, 11),
(1978, 1, 2),
(1978, 1, 16),
(1979, 2, 12),
(1979, 4, 30),
(1980, 11, 24),
(1981, 5, 4),
(1982, 3, 22),
(1982, 10, 11),
(1984, 1, 2),
(1984, 1, 16),
(1984, 4, 30),
(1984, 9, 24),
(1985, 5, 6),
(1985, 9, 16),
(1985, 11, 4),
(1986, 11, 24),
(1987, 5, 4),
(1988, 3, 21),
(1989, 1, 2),
(1989, 1, 16),
(1990, 2, 12),
(1990, 4, 30),
(1990, 9, 24),
(1990, 12, 24),
(1991, 5, 6),
(1991, 9, 16),
(1991, 11, 4),
(1992, 5, 4),
(1993, 10, 11),
(1995, 1, 2),
(1995, 1, 16),
(1996, 2, 12),
(1996, 5, 6),
(1996, 9, 16),
(1996, 11, 4),
(1997, 7, 21),
(1997, 11, 24),
(1998, 5, 4),
(1999, 3, 22),
(1999, 10, 11),
(2001, 2, 12),
(2001, 4, 30),
(2001, 9, 24),
(2001, 12, 24),
(2002, 5, 6),
(2002, 9, 16),
(2002, 11, 4),
(2003, 11, 24),
(2005, 3, 21),
(2006, 1, 2),
(2007, 2, 12),
(2007, 4, 30),
(2007, 9, 24),
(2007, 12, 24),
(2008, 5, 6),
(2008, 11, 24),
(2009, 5, 6),
(2010, 3, 22),
(2012, 1, 2),
(2012, 4, 30),
(2012, 12, 24),
(2013, 5, 6),
(2013, 11, 4),
(2014, 5, 6),
(2014, 11, 24),
(2015, 5, 6),
(2016, 3, 21),
(2017, 1, 2),
(2018, 2, 12),
(2018, 4, 30),
(2018, 9, 24),
(2018, 12, 24),
(2019, 5, 6),
(2019, 8, 12),
(2019, 11, 4),
(2020, 2, 24),
(2020, 5, 6),
(2023, 1, 2),
(2024, 2, 12),
(2024, 5, 6),
(2024, 8, 12),
(2024, 9, 23),
(2024, 11, 4),
(2025, 2, 24),
(2025, 5, 6),
(2025, 11, 24),
(2026, 5, 6),
(2027, 3, 22),
(2029, 2, 12),
(2029, 4, 30),
(2029, 9, 24),
(2030, 5, 6),
(2030, 8, 12),
(2030, 11, 4),
(2031, 2, 24),
(2031, 5, 6),
(2031, 11, 24),
(2033, 3, 21),
(2034, 1, 2),
(2035, 2, 12),
(2035, 4, 30),
(2035, 9, 24),
(2036, 5, 6),
(2036, 11, 24),
(2037, 5, 6),
(2040, 1, 2),
(2040, 4, 30),
(2041, 5, 6),
(2041, 8, 12),
(2041, 11, 4),
(2042, 2, 24),
(2042, 5, 6),
(2042, 11, 24),
(2043, 5, 6),
(2044, 3, 21),
(2045, 1, 2),
(2046, 2, 12),
(2046, 4, 30),
(2046, 9, 24),
(2047, 5, 6),
(2047, 8, 12),
(2047, 11, 4),
(2048, 2, 24),
(2048, 5, 6),
(2050, 3, 21),
):
self.assertIn(date(*dt), self.holidays)
| mit | e16d014462baa45cc0bfcf266472d654 | 29.304102 | 78 | 0.381704 | 3.224229 | false | false | false | false |
jrief/djangocms-cascade | cmsplugin_cascade/migrations/0028_cascade_clipboard.py | 1 | 1135 | from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cmsplugin_cascade', '0027_version_1'),
]
operations = [
migrations.AddField(
model_name='cascadeclipboard',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='Created at'),
preserve_default=False,
),
migrations.AddField(
model_name='cascadeclipboard',
name='created_by',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Created by'),
preserve_default=False,
),
migrations.AddField(
model_name='cascadeclipboard',
name='last_accessed_at',
field=models.DateTimeField(default=None, editable=False, null=True, verbose_name='Last accessed at'),
),
]
| mit | 5af5d076de7a5102570528495bd9c952 | 35.612903 | 165 | 0.637004 | 4.283019 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/luxembourg.py | 1 | 1748 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from dateutil.relativedelta import MO
from holidays.constants import JAN, MAY, JUN, AUG, NOV, DEC
from holidays.holiday_base import HolidayBase
class Luxembourg(HolidayBase):
"""
https://en.wikipedia.org/wiki/Public_holidays_in_Luxembourg
"""
country = "LU"
def _populate(self, year):
super()._populate(year)
# Public holidays
self[date(year, JAN, 1)] = "Neijoerschdag"
self[easter(year) + rd(weekday=MO)] = "Ouschterméindeg"
self[date(year, MAY, 1)] = "Dag vun der Aarbecht"
if year >= 2019:
# Europe Day: not in legislation yet, but introduced starting 2019
self[date(year, MAY, 9)] = "Europadag"
self[easter(year) + rd(days=39)] = "Christi Himmelfaart"
self[easter(year) + rd(days=50)] = "Péngschtméindeg"
self[date(year, JUN, 23)] = "Nationalfeierdag"
self[date(year, AUG, 15)] = "Léiffrawëschdag"
self[date(year, NOV, 1)] = "Allerhellgen"
self[date(year, DEC, 25)] = "Chrëschtdag"
self[date(year, DEC, 26)] = "Stiefesdag"
class LU(Luxembourg):
pass
class LUX(Luxembourg):
pass
| mit | 21a1193f9ff5386d7d6ffd0d5db04ef5 | 31.867925 | 78 | 0.65729 | 2.967632 | false | false | false | false |
jrief/djangocms-cascade | cmsplugin_cascade/models.py | 1 | 12117 | import json
import os
import shutil
from collections import OrderedDict
from urllib.parse import urljoin
from pathlib import Path
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from filer.fields.file import FilerFileField
from cms.extensions import PageExtension
from cms.extensions.extension_pool import extension_pool
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.models_base import CascadeModelBase
from cmsplugin_cascade import app_settings
class SharedGlossary(models.Model):
"""
A model class to hold glossary data shared among different plugins.
"""
plugin_type = models.CharField(
_("Plugin Name"),
max_length=50,
db_index=True,
editable=False,
)
identifier = models.CharField(
_("Identifier"),
max_length=50,
unique=True,
)
glossary = models.JSONField(
null=True,
blank=True,
default=dict,
)
class Meta:
unique_together = ['plugin_type', 'identifier']
verbose_name_plural = verbose_name = _("Shared between Plugins")
def __str__(self):
return self.identifier
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
"""
Only entries which are declared as sharable, shall be stored in the sharable glossary.
"""
plugin_instance = plugin_pool.get_plugin(self.plugin_type)
glossary = dict((key, value) for key, value in self.glossary.items()
if key in plugin_instance.sharable_fields)
self.glossary = glossary
super().save(force_insert, force_update, using, update_fields)
class CascadeElement(CascadeModelBase):
"""
The concrete model class to store arbitrary data for plugins derived from CascadePluginBase.
"""
shared_glossary = models.ForeignKey(
SharedGlossary,
blank=True,
null=True,
on_delete=models.SET_NULL,
)
class Meta:
db_table = 'cmsplugin_cascade_element'
verbose_name = _("Element")
verbose_name_plural = _("Elements")
def copy_relations(self, oldinstance):
def init_element(inline_element):
inline_element.pk = None
inline_element.cascade_element = self
inline_element.save()
for inline_element in oldinstance.inline_elements.all():
init_element(inline_element)
for sortinline_element in oldinstance.sortinline_elements.all():
init_element(sortinline_element)
class SharableCascadeElement(CascadeElement):
"""
A proxy model which takes care of merging the glossary with its shared instance.
"""
class Meta:
proxy = True
def __getattribute__(self, name):
"""
Update glossary with content from SharedGlossary model if that exists.
"""
attribute = object.__getattribute__(self, name)
if name == 'glossary' and self.shared_glossary:
attribute.update(self.shared_glossary.glossary)
return attribute
class InlineCascadeElement(models.Model):
cascade_element = models.ForeignKey(
CascadeElement,
related_name='inline_elements',
on_delete=models.CASCADE,
)
glossary = models.JSONField(
blank=True,
default=dict,
)
class Meta:
db_table = 'cmsplugin_cascade_inline'
class SortableInlineCascadeElement(models.Model):
cascade_element = models.ForeignKey(
CascadeElement,
related_name='sortinline_elements',
on_delete=models.CASCADE,
)
glossary = models.JSONField(
blank=True,
default=dict,
)
order = models.PositiveIntegerField(
verbose_name=_("Sort by"),
db_index=True,
)
class Meta:
db_table = 'cmsplugin_cascade_sortinline'
ordering = ['order']
def __str__(self):
return ""
class PluginExtraFields(models.Model):
"""
Store a set of allowed extra CSS classes and inline styles to be used for Cascade plugins
inheriting from `ExtraFieldsMixin`. Also store if individual ``id=""`` tags are allowed.
"""
plugin_type = models.CharField(
_("Plugin Name"),
max_length=50,
db_index=True,
)
site = models.ForeignKey(
Site,
verbose_name=_("Site"),
on_delete=models.CASCADE,
)
allow_id_tag = models.BooleanField(default=False)
css_classes = models.JSONField(
null=True,
blank=True,
default=dict,
)
inline_styles = models.JSONField(
null=True,
blank=True,
default=dict,
)
class Meta:
verbose_name = verbose_name_plural = _("Custom CSS classes and styles")
unique_together = ['plugin_type', 'site']
def __str__(self):
return str(self.name)
@cached_property
def name(self):
return plugin_pool.get_plugin(self.plugin_type).name
class TextEditorConfigFields(models.Model):
ELEMENT_CHOICES = [(c, c) for c in [
'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'pre', 'address', 'div', 'span', 'ol', 'ul']
]
name = models.CharField(
_("Name"),
max_length=50,
)
element_type = models.CharField(
_("Element Type"),
choices=ELEMENT_CHOICES,
max_length=12,
)
css_classes = models.CharField(
_("CSS classes"),
max_length=250,
help_text=_("Freely selectable CSS classnames for this Text-Editor Style, separated by spaces."),
)
class Meta:
verbose_name = _("Text Editor Config")
def get_config(self):
config = {
'name': self.name,
'element': self.element_type,
'attributes': {'class': self.css_classes},
}
return json.dumps(config)
class Segmentation(models.Model):
class Meta:
verbose_name = _("Segmentation")
managed = False # it's a dummy model
db_table = None
class CascadeClipboard(models.Model):
"""
A model class to persist, export and re-import the clipboard's content.
"""
identifier = models.CharField(
_("Identifier"),
max_length=50,
unique=True,
)
data = models.JSONField(
null=True,
blank=True,
default=dict,
)
created_by = models.ForeignKey(
get_user_model(),
verbose_name=_("Created by"),
on_delete=models.SET_NULL,
editable=False,
null=True,
)
created_at = models.DateTimeField(
_("Created at"),
auto_now_add=True,
editable=False,
)
last_accessed_at = models.DateTimeField(
_("Last accessed at"),
null=True,
default=None,
editable=False,
)
class Meta:
verbose_name = _("Persisted Clipboard Content")
verbose_name_plural = _("Persisted Clipboard Contents")
def __str__(self):
return self.identifier
class FilePathField(models.FilePathField):
"""
Implementation of `models.FilePathField` which configures the `path` argument by default
to avoid the creation of a migration file for each change in local settings.
"""
def __init__(self, **kwargs):
kwargs.setdefault('path', app_settings.CMSPLUGIN_CASCADE['icon_font_root'])
super().__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['path']
return name, path, args, kwargs
class IconFont(models.Model):
"""
Instances of uploaded icon fonts, such as FontAwesone, MaterialIcons, etc.
"""
identifier = models.CharField(
_("Identifier"),
max_length=50,
unique=True,
help_text=_("A unique identifier to distinguish this icon font."),
)
config_data = models.JSONField()
zip_file = FilerFileField(
on_delete=models.CASCADE,
help_text=_('Upload a zip file created on <a href="http://fontello.com/" target="_blank">Fontello</a> containing fonts.')
)
font_folder = FilePathField(allow_files=False, allow_folders=True)
is_default = models.BooleanField(
_("Default Font"),
default=False,
help_text=_("Use this font as default, unless an icon font is set for the current page."),
)
class Meta:
verbose_name = _("Uploaded Icon Font")
verbose_name_plural = _("Uploaded Icon Fonts")
def __str__(self):
return self.identifier
def get_icon_families(self):
"""
Return an ordered dict of css classes required to render these icons
"""
families = OrderedDict()
for glyph in self.config_data['glyphs']:
src = glyph.pop('src', 'default')
families.setdefault(src, [])
css = glyph.get('css')
if css:
families[src].append(css)
return families
def get_stylesheet_url(self):
icon_font_url = os.path.relpath(app_settings.CMSPLUGIN_CASCADE['icon_font_root'], settings.MEDIA_ROOT)
name = self.config_data.get('name') or 'fontello'
parts = (icon_font_url, Path(self.font_folder).as_posix(), 'css/{}.css'.format(name))
return urljoin(settings.MEDIA_URL, '/'.join(parts))
def config_data_as_json(self):
data = dict(self.config_data)
data.pop('glyphs', None)
data['families'] = self.get_icon_families()
return json.dumps(data)
@classmethod
def delete_icon_font(cls, instance=None, **kwargs):
if isinstance(instance, cls):
font_folder = os.path.join(app_settings.CMSPLUGIN_CASCADE['icon_font_root'], instance.font_folder)
shutil.rmtree(font_folder, ignore_errors=True)
try:
temp_folder = os.path.abspath(os.path.join(font_folder, os.path.pardir))
os.rmdir(temp_folder)
except FileNotFoundError:
pass
models.signals.pre_delete.connect(IconFont.delete_icon_font, dispatch_uid='delete_icon_font')
class CascadePage(PageExtension):
"""
Keep arbitrary data tightly coupled to the CMS page.
"""
settings = models.JSONField(
blank=True,
default=dict,
help_text=_("User editable settings for this page."),
)
glossary = models.JSONField(
blank=True,
default=dict,
help_text=_("Store for arbitrary page data."),
)
icon_font = models.ForeignKey(
IconFont,
null=True,
blank=True,
on_delete=models.SET_NULL,
verbose_name=_("Icon Font"),
)
menu_symbol = models.CharField(
_("Menu Symbol"),
blank=True,
null=True,
max_length=32,
help_text=_("Symbol to be used with the menu title for this page."),
)
class Meta:
db_table = 'cmsplugin_cascade_page'
verbose_name = verbose_name_plural = _("Cascade Page Settings")
def __str__(self):
return self.get_page().get_title()
@classmethod
def assure_relation(cls, cms_page):
"""
Assure that we have a foreign key relation, pointing from CascadePage onto CMSPage.
"""
try:
cms_page.cascadepage
except cls.DoesNotExist:
cls.objects.create(extended_object=cms_page)
@classmethod
def delete_cascade_element(cls, instance=None, **kwargs):
if isinstance(instance, CascadeModelBase):
try:
instance.placeholder.page.cascadepage.glossary['element_ids'][instance.language].pop(str(instance.pk))
instance.placeholder.page.cascadepage.save()
except (AttributeError, KeyError):
pass
extension_pool.register(CascadePage)
models.signals.pre_delete.connect(CascadePage.delete_cascade_element, dispatch_uid='delete_cascade_element')
| mit | cedd936ec078f2fba9f7aacd323157fc | 27.510588 | 129 | 0.614509 | 4.022908 | false | false | false | false |
jrief/djangocms-cascade | cmsplugin_cascade/sphinx/cms_apps.py | 1 | 1950 | import io
import mimetypes
import os
from django.conf import settings
from django.core.exceptions import ViewDoesNotExist
from django.http.response import HttpResponse
from django.views.generic import TemplateView
from django.urls import re_path
from django.utils.cache import patch_cache_control
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class SphinxDocsView(TemplateView):
def get(self, request, *args, **kwargs):
slug = kwargs.get('slug', '')
_, extension = os.path.splitext(slug)
if extension in ['.png', '.jpg', '.jpeg', '.gif']:
filename = os.path.join(settings.SPHINX_DOCS_ROOT, slug)
content_type, _ = mimetypes.guess_type(filename)
with io.open(filename, 'rb') as fd:
response = HttpResponse(content=fd.read(), content_type=content_type)
patch_cache_control(response, cache_control='max-age=86400')
return response
return super().get(request, page=slug, *args, **kwargs)
def get_template_names(self):
return [self.request.current_page.get_template()]
def get_context_data(self, page='index.html', **kwargs):
context = super().get_context_data(**kwargs)
filename = os.path.join(settings.SPHINX_DOCS_ROOT, page, 'index.html')
if not os.path.exists(filename):
raise ViewDoesNotExist("{} does not exist".format(page))
with io.open(filename, encoding='utf-8') as fd:
context.update(page_content=mark_safe(fd.read()))
return context
@apphook_pool.register
class SphinxDocsApp(CMSApp):
name = _("Sphinx Documentation")
def get_urls(self, page=None, language=None, **kwargs):
return [
re_path(r'^(?P<slug>\S+)/$', SphinxDocsView.as_view(), name='sphinx-documentation'),
]
| mit | f8a99e97a909e666b7458c100a44236a | 38 | 96 | 0.664615 | 3.816047 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/nigeria.py | 1 | 3886 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import SAT, JAN, MAY, JUN, OCT, DEC
from holidays.holiday_base import HolidayBase
from holidays.utils import _islamic_to_gre
class Nigeria(HolidayBase):
"""
https://en.wikipedia.org/wiki/Public_holidays_in_Nigeria
"""
country = "NG"
def _populate(self, year):
super()._populate(year)
if year > 1978:
def _add_holiday(dt: date, hol: str) -> None:
"""Only add if in current year; prevents adding holidays across
years (handles multi-day Islamic holidays that straddle
Gregorian years).
"""
if dt.year == year:
self[dt] = hol
# New Year's Day
self[date(year, JAN, 1)] = "New Year's day"
# Calculate Easter for given year
# followed by easter related holidays
e = easter(year)
good_friday = e - rd(days=2)
self[good_friday] = "Good Friday"
easter_monday = e + rd(days=1)
self[easter_monday] = "Easter Monday"
# Worker's day
self[date(year, MAY, 1)] = "Workers' day"
# Eid al-Fitr - Feast Festive
# This is an estimate
# date of observance is announced yearly
for yr in (year - 1, year):
for date_obs in _islamic_to_gre(yr, 10, 1):
hol_date = date_obs
_add_holiday(hol_date, "Eid al-Fitr")
_add_holiday(hol_date + rd(days=1), "Eid al-Fitr Holiday")
# Arafat Day & Eid al-Adha - Scarfice Festive
# This is an estimate
# date of observance is announced yearly
for yr in (year - 1, year):
for date_obs in _islamic_to_gre(yr, 12, 10):
hol_date = date_obs
_add_holiday(hol_date, "Eid al-Adha")
_add_holiday(hol_date + rd(days=1), "Eid al-Adha Holiday")
# Independence Day
self[date(year, OCT, 1)] = "National day"
# Christmas day
self[date(year, DEC, 25)] = "Christmas day"
# Boxing day
self[date(year, DEC, 26)] = "Boxing day"
# Democracy day moved around after its inception in 2000
# Initally it fell on May 29th
if 2019 > year > 1999:
self[date(year, MAY, 29)] = "Democracy day"
# In 2018 it was announced that the holiday
# will move to June 12th from 2019
if year >= 2019:
self[date(year, JUN, 12)] = "Democracy day"
# Observed holidays
for k, v in list(self.items()):
# If a holiday falls on a Saturday the
# following Monday is Observed as a public holiday
if (
self.observed
and year > 2015
and k.weekday() == SAT
and k.year == year
and v.upper() in ("WORKER'S DAY", "DEMOCRACY DAY")
):
# Add the (Observed) holiday
self[k + rd(days=2)] = v + " (Observed)"
class NG(Nigeria):
pass
class NGA(Nigeria):
pass
| mit | 3d5dfa87aa840abc3c7ec514563863fd | 33.087719 | 79 | 0.533711 | 3.608171 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/canada.py | 1 | 10225 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from dateutil.relativedelta import MO, FR, SU
from holidays.constants import (
FRI,
SUN,
WEEKEND,
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.holiday_base import HolidayBase
class Canada(HolidayBase):
country = "CA"
subdivisions = [
"AB",
"BC",
"MB",
"NB",
"NL",
"NS",
"NT",
"NU",
"ON",
"PE",
"QC",
"SK",
"YT",
]
def __init__(self, **kwargs):
# Default subdivision to ON; prov for backwards compatibility
if not kwargs.get("subdiv", kwargs.get("prov")):
kwargs["subdiv"] = "ON"
HolidayBase.__init__(self, **kwargs)
@staticmethod
def _get_nearest_monday(d: date) -> date:
if d.weekday() < FRI:
return d + rd(weekday=MO(-1))
else:
return d + rd(weekday=MO)
def _populate(self, year):
super()._populate(year)
if year < 1867:
return
# New Year's Day
name = "New Year's Day"
self[date(year, JAN, 1)] = name
if self.observed and date(year, JAN, 1).weekday() == SUN:
self[date(year, JAN, 1) + rd(days=+1)] = name + " (Observed)"
# The following year's observed New Year's Day can be in this year
# when it falls on a Friday (Jan 1st is a Saturday).
if self.observed and date(year, DEC, 31).weekday() == FRI:
self[date(year, DEC, 31)] = name + " (Observed)"
# Family Day / Louis Riel Day (MB) / Islander Day (PE)
# / Heritage Day (NS, YT)
if (
(self.subdiv == "AB" and year >= 1990)
or (self.subdiv == "SK" and year >= 2007)
or (self.subdiv == "ON" and year >= 2008)
or (self.subdiv == "NB" and year >= 2018)
):
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.subdiv == "BC":
if 2013 <= year <= 2018:
self[date(year, FEB, 1) + rd(weekday=MO(+2))] = "Family Day"
elif year > 2018:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.subdiv == "MB" and year >= 2008:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Louis Riel Day"
elif self.subdiv == "PE" and year >= 2010:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Islander Day"
elif self.subdiv == "PE" and year == 2009:
self[date(year, FEB, 1) + rd(weekday=MO(+2))] = "Islander Day"
elif self.subdiv == "NS" and year >= 2015:
# http://novascotia.ca/lae/employmentrights/NovaScotiaHeritageDay.asp
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Heritage Day"
elif self.subdiv == "YT" and year >= 1974:
# start date?
# https://www.britannica.com/topic/Heritage-Day-Canadian-holiday
# Heritage Day was created in 1973
# by the Heritage Canada Foundation
# therefore, start date is not earlier than 1974
# http://heritageyukon.ca/programs/heritage-day
# https://en.wikipedia.org/wiki/Family_Day_(Canada)#Yukon_Heritage_Day
# Friday before the last Sunday in February
dt = (
date(year, MAR, 1)
+ rd(days=-1)
+ rd(weekday=SU(-1))
+ rd(days=-2)
)
self[dt] = "Heritage Day"
# St. Patrick's Day
if self.subdiv == "NL" and year >= 1900:
# Nearest Monday to March 17
dt = self._get_nearest_monday(date(year, MAR, 17))
self[dt] = "St. Patrick's Day"
# Good Friday
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
# Easter Monday
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
# St. George's Day
if self.subdiv == "NL" and year >= 1990:
if year == 2010:
# 4/26 is the Monday closer to 4/23 in 2010
# but the holiday was observed on 4/19? Crazy Newfies!
dt = date(2010, APR, 19)
else:
# Nearest Monday to April 23
dt = self._get_nearest_monday(date(year, APR, 23))
self[dt] = "St. George's Day"
# Victoria Day / National Patriots' Day (QC)
if year >= 1953:
dt = date(year, MAY, 24) + rd(weekday=MO(-1))
if self.subdiv not in ("NB", "NS", "PE", "NL", "QC"):
self[dt] = "Victoria Day"
elif self.subdiv == "QC":
self[dt] = "National Patriots' Day"
# National Aboriginal Day
if self.subdiv == "NT" and year >= 1996:
self[date(year, JUN, 21)] = "National Aboriginal Day"
# St. Jean Baptiste Day
if self.subdiv == "QC" and year >= 1925:
name = "St. Jean Baptiste Day"
dt = date(year, JUN, 24)
self[dt] = name
if self.observed and dt.weekday() == SUN:
self[dt + rd(days=1)] = name + " (Observed)"
# Discovery Day
if self.subdiv == "NL" and year >= 1997:
# Nearest Monday to June 24
dt = self._get_nearest_monday(date(year, JUN, 24))
self[dt] = "Discovery Day"
elif self.subdiv == "YT" and year >= 1912:
self[date(year, AUG, 1) + rd(weekday=MO(+3))] = "Discovery Day"
# Canada Day / Memorial Day (NL)
if year >= 1983:
name = "Memorial Day" if self.subdiv == "NL" else "Canada Day"
else:
name = "Dominion Day"
dt = date(year, JUL, 1)
self[dt] = name
if year >= 1879 and self.observed and dt.weekday() in WEEKEND:
self[dt + rd(weekday=MO)] = name + " (Observed)"
# Nunavut Day
if self.subdiv == "NU":
name = "Nunavut Day"
if year >= 2001:
dt = date(year, JUL, 9)
self[dt] = name
if self.observed and dt.weekday() == SUN:
self[dt + rd(days=1)] = name + " (Observed)"
elif year == 2000:
self[date(2000, APR, 1)] = name
# Civic Holiday
if self.subdiv in ("ON", "MB", "NT") and year >= 1900:
self[date(year, AUG, 1) + rd(weekday=MO)] = "Civic Holiday"
elif self.subdiv == "AB" and year >= 1974:
# https://en.wikipedia.org/wiki/Civic_Holiday#Alberta
self[date(year, AUG, 1) + rd(weekday=MO)] = "Heritage Day"
elif self.subdiv == "BC" and year >= 1974:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "British Columbia Day"
elif self.subdiv == "NB" and year >= 1900:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "New Brunswick Day"
elif self.subdiv == "SK" and year >= 1900:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "Saskatchewan Day"
# Labour Day
if year >= 1894:
self[date(year, SEP, 1) + rd(weekday=MO)] = "Labour Day"
# Funeral of Queen Elizabeth II
# https://www.narcity.com/provinces-territories-will-have-a-day-off-monday-mourn-queen
# TODO: the territories holiday status (NT, NU, YT) is still tentative
queen_funeral_observers = ("BC", "NB", "NL", "NS", "PE", "YT")
if self.subdiv in queen_funeral_observers and year == 2022:
self[
date(2022, SEP, 19)
] = "Funeral of Her Majesty the Queen Elizabeth II"
# National Day for Truth and Reconciliation
if self.subdiv in ("MB", "NS") and year >= 2021:
self[
date(year, SEP, 30)
] = "National Day for Truth and Reconciliation"
# Thanksgiving
if self.subdiv not in ("NB", "NS", "PE", "NL") and year >= 1931:
if year == 1935:
# in 1935, Canadian Thanksgiving was moved due to the General
# Election falling on the second Monday of October
# https://books.google.ca/books?id=KcwlQsmheG4C&pg=RA1-PA1940&lpg=RA1-PA1940&dq=canada+thanksgiving+1935&source=bl&ots=j4qYrcfGuY&sig=gxXeAQfXVsOF9fOwjSMswPHJPpM&hl=en&sa=X&ved=0ahUKEwjO0f3J2PjOAhVS4mMKHRzKBLAQ6AEIRDAG#v=onepage&q=canada%20thanksgiving%201935&f=false
self[date(1935, OCT, 25)] = "Thanksgiving"
else:
self[date(year, OCT, 1) + rd(weekday=MO(+2))] = "Thanksgiving"
# Remembrance Day
if self.subdiv not in ("ON", "QC") and year >= 1931:
name = "Remembrance Day"
dt = date(year, NOV, 11)
self[dt] = name
if (
self.observed
and self.subdiv in ("NS", "NL", "NT", "PE", "SK")
and dt.weekday() == SUN
):
self[dt + rd(weekday=MO)] = name + " (Observed)"
# Christmas Day
name = "Christmas Day"
dt = date(year, DEC, 25)
self[dt] = name
if self.observed and dt.weekday() in WEEKEND:
self[dt + rd(days=2)] = name + " (Observed)"
# Boxing Day
name = "Boxing Day"
dt = date(year, DEC, 26)
self[dt] = name
if self.observed and dt.weekday() in WEEKEND:
self[dt + rd(days=2)] = name + " (Observed)"
class CA(Canada):
pass
class CAN(Canada):
pass
| mit | 67ee9de232a8499b4ec38719c3bba5c5 | 36.454212 | 283 | 0.525868 | 3.212378 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/ukraine.py | 1 | 5815 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import EASTER_ORTHODOX, easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import (
JAN,
APR,
MAR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
WEEKEND,
)
from holidays.holiday_base import HolidayBase
class Ukraine(HolidayBase):
"""
Current holidays list:
https://zakon1.rada.gov.ua/laws/show/322-08/paran454#n454
"""
country = "UA"
def _add_observed(self, holiday: date) -> None:
"""
27.01.1995: holiday on weekend move to next workday
https://zakon.rada.gov.ua/laws/show/35/95-вр
10.01.1998: cancelled
https://zakon.rada.gov.ua/laws/show/785/97-вр
23.04.1999: holiday on weekend move to next workday
https://zakon.rada.gov.ua/laws/show/576-14
"""
if (
self.observed
and holiday.weekday() in WEEKEND
and (
date(1995, JAN, 27) <= holiday <= date(1998, JAN, 9)
or holiday >= date(1999, APR, 23)
)
):
next_workday = holiday + rd(days=1)
while next_workday.weekday() in WEEKEND or self.get(
next_workday, None
):
next_workday += rd(days=1)
self[next_workday] = "Вихідний за " + self[holiday]
def _populate(self, year):
super()._populate(year)
# The current set of holidays came into force in 1991
# But most holiday days were implemented in 1918
if year <= 1917:
return
# New Year's Day
if year <= 1929 or year >= 1948:
self[date(year, JAN, 1)] = "Новий рік"
# Christmas Day (Julian calendar)
if year >= 1991:
self[
date(year, JAN, 7)
] = "Різдво Христове (за юліанським календарем)"
# Women's Day
if year >= 1966:
self[date(year, MAR, 8)] = "Міжнародний жіночий день"
if year >= 1991:
# Easter
dt = easter(year, method=EASTER_ORTHODOX)
self[dt] = "Великдень (Пасха)"
# Holy trinity
self[dt + rd(days=49)] = "Трійця"
# Labour Day
name = "День міжнародної солідарності трудящих"
if year >= 2018:
name = "День праці"
self[date(year, MAY, 1)] = name
# Labour Day in past
if 1929 <= year <= 2017:
self[date(year, MAY, 2)] = "День міжнародної солідарності трудящих"
# Victory Day
name = "День перемоги"
dt = date(year, MAY, 9)
if year >= 2016:
self[dt] = (
"День перемоги над нацизмом у Другій світовій війні "
"(День перемоги)"
)
elif 1965 <= year <= 2015:
self[dt] = name
elif 1945 <= year <= 1946:
self[dt] = name
self[date(year, SEP, 3)] = "День перемоги над Японією"
# Constitution Day
if year >= 1997:
self[date(year, JUN, 28)] = "День Конституції України"
# Day of Ukrainian Statehood
if year >= 2022:
self[date(year, JUL, 28)] = "День Української Державності"
# Independence Day
name = "День незалежності України"
if year >= 1992:
self[date(year, AUG, 24)] = name
elif year == 1991:
self[date(year, JUL, 16)] = name
# Day of the defender of Ukraine
if year >= 2015:
name = "День захисника України"
if year >= 2021:
name = "День захисників і захисниць України"
self[date(year, OCT, 14)] = name
# October Revolution
if year <= 1999:
name = "Річниця Великої Жовтневої соціалістичної революції"
self[date(year, NOV, 7)] = name
self[date(year, NOV, 8)] = name
# Christmas Day (Gregorian calendar)
if year >= 2017:
self[
date(year, DEC, 25)
] = "Різдво Христове (за григоріанським календарем)"
for dt in sorted(list(self.keys())):
if dt.year == year:
self._add_observed(dt)
# USSR holidays
# Bloody_Sunday_(1905)
if year <= 1950:
self[date(year, JAN, 22)] = "День пам’яті 9 січня 1905 року"
# Paris_Commune
if year <= 1928:
self[date(year, MAR, 18)] = "День Паризької Комуни"
# USSR Constitution day
name = "День Конституції СРСР"
if 1981 <= year <= 1990:
self[date(year, OCT, 7)] = name
elif 1937 <= year <= 1980:
self[date(year, DEC, 5)] = name
class UA(Ukraine):
pass
class UKR(Ukraine):
pass
| mit | ebe8fb7a6ed4e78d5d29ce324bcb3574 | 28.010989 | 79 | 0.538447 | 2.723053 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/austria.py | 1 | 2152 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from dateutil.relativedelta import MO
from holidays.constants import JAN, MAY, AUG, OCT, NOV, DEC
from holidays.holiday_base import HolidayBase
class Austria(HolidayBase):
country = "AT"
subdivisions = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
def __init__(self, **kwargs):
if isinstance(kwargs.get("subdiv"), int):
kwargs["subdiv"] = str(kwargs["subdiv"])
# Default subdivision to "9"
if not kwargs.get("subdiv", kwargs.get("state")):
kwargs["subdiv"] = "9"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
super()._populate(year)
# public holidays
self[date(year, JAN, 1)] = "Neujahr"
self[date(year, JAN, 6)] = "Heilige Drei Könige"
self[easter(year) + rd(weekday=MO)] = "Ostermontag"
self[date(year, MAY, 1)] = "Staatsfeiertag"
self[easter(year) + rd(days=39)] = "Christi Himmelfahrt"
self[easter(year) + rd(days=50)] = "Pfingstmontag"
self[easter(year) + rd(days=60)] = "Fronleichnam"
self[date(year, AUG, 15)] = "Mariä Himmelfahrt"
if 1919 <= year <= 1934:
self[date(year, NOV, 12)] = "Nationalfeiertag"
if year >= 1967:
self[date(year, OCT, 26)] = "Nationalfeiertag"
self[date(year, NOV, 1)] = "Allerheiligen"
self[date(year, DEC, 8)] = "Mariä Empfängnis"
self[date(year, DEC, 25)] = "Christtag"
self[date(year, DEC, 26)] = "Stefanitag"
class AT(Austria):
pass
class AUT(Austria):
pass
| mit | 9db28efa789cee2f3119a940f82df9ec | 34.213115 | 78 | 0.617784 | 2.979196 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/botswana.py | 1 | 3875 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from dateutil.relativedelta import MO
from holidays.constants import SAT, SUN, JAN, MAY, JUL, SEP, OCT, DEC
from holidays.holiday_base import HolidayBase
class Botswana(HolidayBase):
"""
https://www.gov.bw/public-holidays
https://publicholidays.africa/botswana/2021-dates/
https://www.timeanddate.com/holidays/botswana/
http://www.ilo.org/dyn/travail/docs/1766/Public%20Holidays%20Act.pdf
"""
country = "BW"
def _populate(self, year: int):
super()._populate(year)
if year > 1965:
self[date(year, JAN, 1)] = "New Year's Day"
self[date(year, JAN, 2)] = "New Year's Day Holiday"
# Easter and easter related calculations
e = easter(year)
good_friday = e - rd(days=2)
easter_saturday = e - rd(days=1)
easter_monday = e + rd(days=1)
self[good_friday] = "Good Friday"
self[easter_saturday] = "Holy Saturday"
self[easter_monday] = "Easter Monday"
self[date(year, MAY, 1)] = "Labour Day"
ascension_day = e + rd(days=39)
self[ascension_day] = "Ascension Day"
self[date(year, JUL, 1)] = "Sir Seretse Khama Day"
# 3rd Monday of July = "President's Day"
d = date(year, JUL, 1) + rd(weekday=MO(+3))
self[d] = "President's Day"
self[d + rd(days=1)] = "President's Day Holiday"
self[date(year, SEP, 30)] = "Botswana Day"
self[date(year, OCT, 1)] = "Botswana Day Holiday"
self[date(year, DEC, 25)] = "Christmas Day"
self[date(year, DEC, 26)] = "Boxing Day"
for k, v in list(self.items()):
# Whenever Boxing Day falls on a Saturday,
# it rolls over to the following Monday
if (
self.observed
and year > 2015
and k.weekday() == SAT
and k.year == year
and v.upper() in ("BOXING DAY", "LABOUR DAY")
):
# Add the (Observed) holiday
self[k + rd(days=2)] = v + " Holiday"
if (
self.observed
and year > 1994
and k.weekday() == SUN
and k.year == year
and v.upper() != "NEW YEAR'S DAY HOLIDAY"
):
# Add the (Observed) holiday
self[k + rd(days=1)] = v + " (Observed)"
# If there is a holiday and an (Observed) holiday on the same day,
# add an (Observed) holiday for that holiday
if len(self.get(k).split(",")) > 1:
# self.get(date) returns a string containing holidays as a
# comma delimited string split on delimiter to determine if
# there are multiple on the same day
# Add an (Observed) for the one that is not (Observed)
for i in self.get(k).split(","):
if " (Observed)" not in i:
self[k + rd(days=1)] = i.lstrip() + " (Observed)"
# Once off ad-hoc holiday.
if year == 2019:
self[date(year, JUL, 2)] = "Public Holiday"
class BW(Botswana):
pass
class BWA(Botswana):
pass
| mit | e4410ea0cdb019d42cf5d6bb0ba76ff7 | 34.227273 | 78 | 0.548903 | 3.423145 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/estonia.py | 1 | 1876 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import JAN, FEB, MAY, JUN, AUG, DEC
from holidays.holiday_base import HolidayBase
class Estonia(HolidayBase):
country = "EE"
def _populate(self, year):
super()._populate(year)
e = easter(year)
# New Year's Day
self[date(year, JAN, 1)] = "uusaasta"
# Independence Day, anniversary of the Republic of Estonia
self[date(year, FEB, 24)] = "iseseisvuspäev"
# Good Friday
self[e - rd(days=2)] = "suur reede"
# Easter Sunday
self[e] = "ülestõusmispühade 1. püha"
# Spring Day
self[date(year, MAY, 1)] = "kevadpüha"
# Pentecost
self[e + rd(days=49)] = "nelipühade 1. püha"
# Victory Day
self[date(year, JUN, 23)] = "võidupüha"
# Midsummer Day
self[date(year, JUN, 24)] = "jaanipäev"
# Day of Restoration of Independence
self[date(year, AUG, 20)] = "taasiseseisvumispäev"
# Christmas Eve
self[date(year, DEC, 24)] = "jõululaupäev"
# Christmas Day
self[date(year, DEC, 25)] = "esimene jõulupüha"
# Boxing Day
self[date(year, DEC, 26)] = "teine jõulupüha"
class EE(Estonia):
pass
class EST(Estonia):
pass
| mit | 568bb7bf797ff91b371b5166fd3796f8 | 24.805556 | 78 | 0.623251 | 2.935229 | false | false | false | false |
dr-prodigy/python-holidays | holidays/financial/ny_stock_exchange.py | 1 | 11711 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, timedelta
from dateutil.easter import easter
from dateutil.relativedelta import FR, MO, TH, TU
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import (
APR,
AUG,
DEC,
FEB,
JAN,
JUL,
JUN,
MAR,
MAY,
NOV,
OCT,
SEP,
)
from holidays.holiday_base import HolidayBase
class NewYorkStockExchange(HolidayBase):
# Official regulations:
# https://www.nyse.com/publicdocs/nyse/regulation/nyse/NYSE_Rules.pdf
# https://www.nyse.com/markets/hours-calendars
# Historical data:
# s3.amazonaws.com/armstrongeconomics-wp/2013/07/NYSE-Closings.pdf
market = "NYSE"
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _get_observed(self, d):
wdnum = d.isoweekday()
if wdnum == 6:
return d + rd(weekday=FR(-1))
if wdnum == 7:
return d + rd(weekday=MO(+1))
return d
def _set_observed_date(self, holiday_date, name):
date_obs = self._get_observed(holiday_date)
if date_obs == holiday_date:
self[holiday_date] = name
else:
self[date_obs] = name + " (Observed)"
def _populate(self, year):
super()._populate(year)
##############################################################
# REGULAR HOLIDAYS
##############################################################
# NYD
# This year's New Year Day.
self._set_observed_date(date(year, JAN, 1), "New Year's Day")
# https://www.nyse.com/publicdocs/nyse/regulation/nyse/NYSE_Rules.pdf
# As per Rule 7.2.: check if next year's NYD falls on Saturday and
# needs to be observed on Friday (Dec 31 of previous year).
dec_31 = date(year, DEC, 31)
if dec_31.isoweekday() == 5:
self._set_observed_date(dec_31 + rd(days=+1), "New Year's Day")
# MLK - observed 1998 - 3rd Monday of Jan
if year >= 1998:
self[
date(year, JAN, 1) + rd(weekday=MO(3))
] = "Martin Luther King Jr. Day"
# LINCOLN BIRTHDAY: observed 1896 - 1953 and 1968, Feb 12 (observed)
if (1896 <= year <= 1953) or year == 1968:
lincoln = date(year, FEB, 12)
self._set_observed_date(lincoln, "Lincoln's Birthday")
# WASHINGTON'S BIRTHDAY: Feb 22 (obs) until 1971, then 3rd Mon of Feb
if year < 1971:
wash = date(year, FEB, 22)
self._set_observed_date(wash, "Washington's Birthday")
else:
self[
date(year, FEB, 1) + rd(weekday=MO(3))
] = "Washington's Birthday"
# GOOD FRIDAY - closed every year except 1898, 1906, and 1907
e = easter(year)
if year not in [1898, 1906, 1907]:
self[e - rd(days=2)] = "Good Friday"
# MEM DAY (May 30) - closed every year since 1873
# last Mon in May since 1971
if 1873 <= year < 1971:
memday = date(year, MAY, 30)
self._set_observed_date(memday, "Memorial Day")
else:
self[date(year, MAY, 31) + rd(weekday=MO(-1))] = "Memorial Day"
# FLAG DAY: June 14th 1916 - 1953
if 1916 <= year <= 1953:
flagday = date(year, JUN, 14)
self._set_observed_date(flagday, "Flag Day")
# JUNETEENTH: since 2021
if year >= 2021:
juneteenth = date(year, JUN, 19)
self._set_observed_date(
juneteenth, "Juneteenth National Independence Day"
)
# INDEPENDENCE DAY (July 4) - history suggests closed every year
j4th = date(year, JUL, 4)
self._set_observed_date(j4th, "Independence Day")
# LABOR DAY - first mon in Sept, since 1887
if year >= 1887:
self[date(year, SEP, 1) + rd(weekday=MO(1))] = "Labor Day"
# COLUMBUS DAY/INDIGENOUS PPL DAY: Oct 12 - closed 1909-1953
if 1909 <= year <= 1953:
colday = date(year, OCT, 12)
self._set_observed_date(colday, "Columbus Day")
# ELECTION DAY: first Tues in NOV
# closed until 1969, then closed pres years 1972-80
if year <= 1968:
self[date(year, NOV, 1) + rd(weekday=TU(1))] = "Election Day"
elif year in [1972, 1976, 1980]:
self[date(year, NOV, 1) + rd(weekday=TU(1))] = "Election Day"
# VETERAN'S DAY: Nov 11 - closed 1918, 1921, 1934-1953
if year in [1918, 1921] or (1934 <= year <= 1953):
vetday = date(year, NOV, 11)
self._set_observed_date(vetday, "Veteran's Day")
# THXGIVING DAY: 4th Thurs in Nov - closed every year
self[date(year, NOV, 1) + rd(weekday=TH(4))] = "Thanksgiving Day"
# XMAS DAY: Dec 25th - every year
xmas = date(year, DEC, 25)
self._set_observed_date(xmas, "Christmas Day")
##############################################################
# SPECIAL HOLIDAYS
##############################################################
if year == 1888:
self[date(year, MAR, 12)] = "Blizzard of 1888"
self[date(year, MAR, 13)] = "Blizzard of 1888"
self[date(year, NOV, 30)] = "Thanksgiving Friday 1888"
elif year == 1889:
self[date(year, APR, 29)] = "Centennial of Washington Inauguration"
self[date(year, APR, 30)] = "Centennial of Washington Inauguration"
self[date(year, MAY, 1)] = "Centennial of Washington Inauguration"
elif year == 1892:
self[date(year, OCT, 12)] = "Columbian Celebration"
self[date(year, OCT, 21)] = "Columbian Celebration"
elif year == 1893:
self[date(year, APR, 27)] = "Columbian Celebration"
elif year == 1897:
self[date(year, APR, 27)] = "Grant's Birthday"
elif year == 1898:
self[date(year, MAY, 4)] = "Charter Day"
elif year == 1899:
self[date(year, MAY, 29)] = "Monday before Decoration Day"
self[date(year, JUL, 3)] = "Monday before Independence Day"
self[date(year, SEP, 29)] = "Admiral Dewey Celebration"
elif year == 1900:
self[date(year, DEC, 24)] = "Christmas Eve"
elif year == 1901:
self[date(year, JUL, 5)] = "Friday after Independence Day"
self[date(year, SEP, 19)] = "Funeral of President McKinley"
elif year == 1903:
self[date(year, APR, 22)] = "Opening of new NYSE building"
elif year == 1914:
# Beginning of WWI
begin = date(year, JUL, 31)
end = date(year, NOV, 27)
for d in (
begin + timedelta(days=n)
for n in range((end - begin).days + 1)
):
if d.isoweekday() in [6, 7]:
continue
self[d] = "World War I"
elif year == 1917:
self[date(year, JUN, 5)] = "Draft Registration Day"
elif year == 1918:
self[date(year, JAN, 28)] = "Heatless Day"
self[date(year, FEB, 4)] = "Heatless Day"
self[date(year, FEB, 11)] = "Heatless Day"
self[date(year, JUN, 14)] = "Heatless Day"
self[date(year, SEP, 12)] = "Draft Registration Day"
self[date(year, NOV, 11)] = "Armistice Day"
elif year == 1919:
self[date(year, MAR, 25)] = "Homecoming Day for 27th Division"
self[date(year, MAY, 6)] = "Parade Day for 77th Division"
self[date(year, SEP, 10)] = "Return of General Pershing"
elif year == 1923:
self[date(year, AUG, 3)] = "Death of President Warren G. Harding"
self[
date(year, AUG, 10)
] = "Funeral of President Warren G. Harding"
elif year == 1927:
self[date(year, JUN, 13)] = "Parade for Colonel Charles Lindbergh"
elif year == 1929:
self[date(year, NOV, 29)] = "Catch Up Day"
elif year == 1933:
begin = date(year, MAR, 6)
end = date(year, MAR, 14)
for d in (
begin + timedelta(days=n)
for n in range((end - begin).days + 1)
):
if d.isoweekday() in [6, 7]:
continue
self[d] = "Special Bank Holiday"
elif year == 1945:
self[date(year, AUG, 15)] = "V-J Day (WWII)"
self[date(year, AUG, 16)] = "V-J Day (WWII)"
self[date(year, DEC, 24)] = "Christmas Eve"
elif year == 1954:
self[date(year, DEC, 24)] = "Christmas Eve"
elif year == 1956:
self[date(year, DEC, 24)] = "Christmas Eve"
elif year == 1958:
self[date(year, DEC, 26)] = "Day after Christmas"
elif year == 1961:
self[date(year, MAY, 29)] = "Day before Decoration Day"
elif year == 1963:
self[date(year, NOV, 25)] = "Funeral of President John F. Kennedy"
elif year == 1965:
self[date(year, DEC, 24)] = "Christmas Eve"
elif year == 1968:
self[
date(year, APR, 9)
] = "Day of Mourning for Martin Luther King Jr."
self[date(year, JUL, 5)] = "Day after Independence Day"
begin = date(year, JUN, 12)
end = date(year, DEC, 31)
for d in (
begin + timedelta(days=n)
for n in range((end - begin).days + 1)
):
if d.isoweekday() != 3: # Wednesday special holiday
continue
self[d] = "Paper Crisis"
elif year == 1969:
self[date(year, FEB, 10)] = "Heavy Snow"
self[
date(year, MAR, 31)
] = "Funeral of President Dwight D. Eisenhower"
self[
date(year, JUL, 21)
] = "National Participation in Lunar Exploration"
elif year == 1972:
self[date(year, DEC, 28)] = "Funeral for President Harry S. Truman"
elif year == 1973:
self[
date(year, JAN, 25)
] = "Funeral for President Lyndon B. Johnson"
elif year == 1977:
self[date(year, JUL, 14)] = "Blackout in New Yor City"
elif year == 1994:
self[
date(year, APR, 27)
] = "Funeral for President Richard M. Nixon"
elif year == 2001:
self[date(year, SEP, 11)] = "Closed for Sept 11, 2001 Attacks"
self[date(year, SEP, 12)] = "Closed for Sept 11, 2001 Attacks"
self[date(year, SEP, 13)] = "Closed for Sept 11, 2001 Attacks"
self[date(year, SEP, 14)] = "Closed for Sept 11, 2001 Attacks"
elif year == 2004:
self[
date(year, JUN, 11)
] = "Day of Mourning for President Ronald W. Reagan"
elif year == 2007:
self[
date(year, JAN, 2)
] = "Day of Mourning for President Gerald R. Ford"
class XNYS(NewYorkStockExchange):
pass
class NYSE(NewYorkStockExchange):
pass
| mit | 454f1f297f0368ebe8a57eb5662d62b9 | 37.906977 | 79 | 0.523354 | 3.313809 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/india.py | 1 | 8232 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import warnings
from datetime import date
from holidays.constants import JAN, FEB, MAR, APR, MAY, JUN, AUG, OCT, NOV, DEC
from holidays.holiday_base import HolidayBase
class India(HolidayBase):
"""
https://www.india.gov.in/calendar
https://www.india.gov.in/state-and-ut-holiday-calendar
https://en.wikipedia.org/wiki/Public_holidays_in_India
https://www.calendarlabs.com/holidays/india/2021
https://slusi.dacnet.nic.in/watershedatlas/list_of_state_abbreviation.htm
https://vahan.parivahan.gov.in/vahan4dashboard/
"""
country = "IN"
subdivisions = [
"AN", # Andaman and Nicobar Islands
"AP", # Andhra Pradesh
"AR", # Arunachal Pradesh
"AS", # Assam
"BR", # Bihar
"CG", # Chhattisgarh
"CH", # Chandigarh
"DD", # Daman and Diu
"DH", # Dadra and Nagar Haveli
"DL", # Delhi
"GA", # Goa
"GJ", # Gujarat
"HP", # Himachal Pradesh
"HR", # Haryana
"JH", # Jharkhand
"JK", # Jammu and Kashmir
"KA", # Karnataka
"KL", # Kerala
"LA", # Ladakh
"LD", # Lakshadweep
"MH", # Maharashtra
"ML", # Meghalaya
"MN", # Manipur
"MP", # Madhya Pradesh
"MZ", # Mizoram
"NL", # Nagaland
"OR", # Orissa / Odisha (Govt sites (dacnet/vahan) use code "OR")
"PB", # Punjab
"PY", # Pondicherry
"RJ", # Rajasthan
"SK", # Sikkim
"TN", # Tamil Nadu
"TR", # Tripura
"TS", # Telangana
"UK", # Uttarakhand
"UP", # Uttar Pradesh
"WB", # West Bengal
]
def _populate(self, year):
super()._populate(year)
# Pongal/ Makar Sankranti
self[date(year, JAN, 14)] = "Makar Sankranti / Pongal"
if year >= 1950:
# Republic Day
self[date(year, JAN, 26)] = "Republic Day"
if year >= 1947:
# Independence Day
self[date(year, AUG, 15)] = "Independence Day"
# Gandhi Jayanti
self[date(year, OCT, 2)] = "Gandhi Jayanti"
# Labour Day
self[date(year, MAY, 1)] = "Labour Day"
# Christmas
self[date(year, DEC, 25)] = "Christmas"
# GJ: Gujarat
if self.subdiv == "GJ":
self[date(year, JAN, 14)] = "Uttarayan"
self[date(year, MAY, 1)] = "Gujarat Day"
self[date(year, OCT, 31)] = "Sardar Patel Jayanti"
if self.subdiv == "BR":
self[date(year, MAR, 22)] = "Bihar Day"
if self.subdiv == "RJ":
self[date(year, MAR, 30)] = "Rajasthan Day"
self[date(year, JUN, 15)] = "Maharana Pratap Jayanti"
if self.subdiv == "OR":
self[date(year, APR, 1)] = "Odisha Day (Utkala Dibasa)"
self[date(year, APR, 15)] = (
"Maha Vishuva Sankranti / Pana" " Sankranti"
)
if self.subdiv in (
"OR",
"AP",
"BR",
"WB",
"KL",
"HR",
"MH",
"UP",
"UK",
"TN",
):
self[date(year, APR, 14)] = "Dr. B. R. Ambedkar's Jayanti"
if self.subdiv == "TN":
self[date(year, APR, 14)] = "Puthandu (Tamil New Year)"
self[date(year, APR, 15)] = "Puthandu (Tamil New Year)"
if self.subdiv == "WB":
self[date(year, APR, 14)] = "Pohela Boishakh"
self[date(year, APR, 15)] = "Pohela Boishakh"
self[date(year, MAY, 9)] = "Rabindra Jayanti"
if self.subdiv == "AS":
self[date(year, APR, 15)] = "Bihu (Assamese New Year)"
if self.subdiv == "MH":
self[date(year, MAY, 1)] = "Maharashtra Day"
self[date(year, OCT, 15)] = "Dussehra"
if self.subdiv == "SK":
self[date(year, MAY, 16)] = "Annexation Day"
if self.subdiv == "KA":
self[date(year, NOV, 1)] = "Karnataka Rajyotsava"
if self.subdiv == "AP":
self[date(year, NOV, 1)] = "Andhra Pradesh Foundation Day"
if self.subdiv == "HR":
self[date(year, NOV, 1)] = "Haryana Foundation Day"
if self.subdiv == "MP":
self[date(year, NOV, 1)] = "Madhya Pradesh Foundation Day"
if self.subdiv == "KL":
self[date(year, NOV, 1)] = "Kerala Foundation Day"
if self.subdiv == "CG":
self[date(year, NOV, 1)] = "Chhattisgarh Foundation Day"
if self.subdiv == "TS":
self[date(year, OCT, 6)] = "Bathukamma Festival"
self[date(year, APR, 6)] = "Eid al-Fitr"
# Directly lifted Diwali and Holi dates from FBProphet from:
# https://github.com/facebook/prophet/blob/main/python/prophet/hdays.py
# Warnings kept in place so that users are aware
if year < 2010 or year > 2030:
warning_msg = (
"Diwali and Holi holidays available from 2010 to 2030 only"
)
warnings.warn(warning_msg, Warning)
name1 = "Diwali"
name2 = "Holi"
if year == 2010:
self[date(year, DEC, 5)] = name1
self[date(year, FEB, 28)] = name2
elif year == 2011:
self[date(year, OCT, 26)] = name1
self[date(year, MAR, 19)] = name2
elif year == 2012:
self[date(year, NOV, 13)] = name1
self[date(year, MAR, 8)] = name2
elif year == 2013:
self[date(year, NOV, 3)] = name1
self[date(year, MAR, 26)] = name2
elif year == 2014:
self[date(year, OCT, 23)] = name1
self[date(year, MAR, 17)] = name2
elif year == 2015:
self[date(year, NOV, 11)] = name1
self[date(year, MAR, 6)] = name2
elif year == 2016:
self[date(year, OCT, 30)] = name1
self[date(year, MAR, 24)] = name2
elif year == 2017:
self[date(year, OCT, 19)] = name1
self[date(year, MAR, 13)] = name2
elif year == 2018:
self[date(year, NOV, 7)] = name1
self[date(year, MAR, 2)] = name2
elif year == 2019:
self[date(year, OCT, 27)] = name1
self[date(year, MAR, 21)] = name2
elif year == 2020:
self[date(year, NOV, 14)] = name1
self[date(year, MAR, 9)] = name2
elif year == 2021:
self[date(year, NOV, 4)] = name1
self[date(year, MAR, 28)] = name2
elif year == 2022:
self[date(year, OCT, 24)] = name1
self[date(year, MAR, 18)] = name2
elif year == 2023:
self[date(year, OCT, 12)] = name1
self[date(year, MAR, 7)] = name2
elif year == 2024:
self[date(year, NOV, 1)] = name1
self[date(year, MAR, 25)] = name2
elif year == 2025:
self[date(year, OCT, 21)] = name1
self[date(year, MAR, 14)] = name2
elif year == 2026:
self[date(year, NOV, 8)] = name1
self[date(year, MAR, 3)] = name2
elif year == 2027:
self[date(year, OCT, 29)] = name1
self[date(year, MAR, 22)] = name2
elif year == 2028:
self[date(year, OCT, 17)] = name1
self[date(year, MAR, 11)] = name2
elif year == 2029:
self[date(year, NOV, 5)] = name1
self[date(year, FEB, 28)] = name2
elif year == 2030:
self[date(year, OCT, 26)] = name1
self[date(year, MAR, 19)] = name2
else:
pass
class IN(India):
pass
class IND(India):
pass
| mit | 220e638722f8cff2accc00131d419e3d | 31.537549 | 79 | 0.505588 | 3.037638 | false | false | false | false |
dr-prodigy/python-holidays | holidays/countries/slovakia.py | 1 | 2396 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import warnings
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import JAN, MAY, JUL, AUG, SEP, OCT, NOV, DEC
from holidays.holiday_base import HolidayBase
class Slovakia(HolidayBase):
"""
https://sk.wikipedia.org/wiki/Sviatok
https://www.slov-lex.sk/pravne-predpisy/SK/ZZ/1993/241/20181011.html
"""
country = "SK"
def _populate(self, year):
super()._populate(year)
self[date(year, JAN, 1)] = "Deň vzniku Slovenskej republiky"
self[date(year, JAN, 6)] = (
"Zjavenie Pána (Traja králi a"
" vianočnýsviatok pravoslávnych"
" kresťanov)"
)
e = easter(year)
self[e - rd(days=2)] = "Veľký piatok"
self[e + rd(days=1)] = "Veľkonočný pondelok"
self[date(year, MAY, 1)] = "Sviatok práce"
if year >= 1997:
self[date(year, MAY, 8)] = "Deň víťazstva nad fašizmom"
self[date(year, JUL, 5)] = "Sviatok svätého Cyrila a svätého Metoda"
self[date(year, AUG, 29)] = (
"Výročie Slovenského národného" " povstania"
)
self[date(year, SEP, 1)] = "Deň Ústavy Slovenskej republiky"
self[date(year, SEP, 15)] = "Sedembolestná Panna Mária"
if year == 2018:
self[date(year, OCT, 30)] = (
"100. výročie prijatia" " Deklarácie slovenského národa"
)
self[date(year, NOV, 1)] = "Sviatok Všetkých svätých"
if year >= 2001:
self[date(year, NOV, 17)] = "Deň boja za slobodu a demokraciu"
self[date(year, DEC, 24)] = "Štedrý deň"
self[date(year, DEC, 25)] = "Prvý sviatok vianočný"
self[date(year, DEC, 26)] = "Druhý sviatok vianočný"
class SK(Slovakia):
pass
class SVK(Slovakia):
pass
| mit | d7099eb9a9c89e73b71bac8294b34808 | 28.708861 | 78 | 0.614827 | 2.622346 | false | false | false | false |
rlabbe/filterpy | filterpy/common/helpers.py | 1 | 14251 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name, bare-except
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import print_function
from collections import defaultdict, deque
import copy
import inspect
import numpy as np
class Saver(object):
"""
Helper class to save the states of any filter object.
Each time you call save() all of the attributes (state, covariances, etc)
are appended to lists.
Generally you would do this once per epoch - predict/update.
Then, you can access any of the states by using the [] syntax or by
using the . operator.
.. code-block:: Python
my_saver = Saver()
... do some filtering
x = my_saver['x']
x = my_save.x
Either returns a list of all of the state `x` values for the entire
filtering process.
If you want to convert all saved lists into numpy arrays, call to_array().
Parameters
----------
kf : object
any object with a __dict__ attribute, but intended to be one of the
filtering classes
save_current : bool, default=False
save the current state of `kf` when the object is created;
skip_private: bool, default=False
Control skipping any private attribute (anything starting with '_')
Turning this on saves memory, but slows down execution a bit.
skip_callable: bool, default=False
Control skipping any attribute which is a method. Turning this on
saves memory, but slows down execution a bit.
ignore: (str,) tuple of strings
list of keys to ignore.
Examples
--------
.. code-block:: Python
kf = KalmanFilter(...whatever)
# initialize kf here
saver = Saver(kf) # save data for kf filter
for z in zs:
kf.predict()
kf.update(z)
saver.save()
x = np.array(s.x) # get the kf.x state in an np.array
plt.plot(x[:, 0], x[:, 2])
# ... or ...
s.to_array()
plt.plot(s.x[:, 0], s.x[:, 2])
"""
def __init__(
self, kf, save_current=False, skip_private=False, skip_callable=False, ignore=()
):
"""Construct the save object, optionally saving the current
state of the filter"""
# pylint: disable=too-many-arguments
self._kf = kf
self._DL = defaultdict(list)
self._skip_private = skip_private
self._skip_callable = skip_callable
self._ignore = ignore
self._len = 0
# need to save all properties since it is possible that the property
# is computed only on access. I use this trick a lot to minimize
# computing unused information.
properties = inspect.getmembers(
type(kf), lambda o: isinstance(o, property)
)
self.properties = [p for p in properties if p[0] not in ignore]
if save_current:
self.save()
def save(self):
"""save the current state of the Kalman filter"""
kf = self._kf
# force all attributes to be computed. this is only necessary
# if the class uses properties that compute data only when
# accessed
for prop in self.properties:
self._DL[prop[0]].append(getattr(kf, prop[0]))
v = copy.deepcopy(kf.__dict__)
if self._skip_private:
for key in list(v.keys()):
if key.startswith("_"):
del v[key]
if self._skip_callable:
for key in list(v.keys()):
if callable(v[key]):
del v[key]
for ig in self._ignore:
if ig in v:
del v[ig]
for key in list(v.keys()):
self._DL[key].append(v[key])
self.__dict__.update(self._DL)
self._len += 1
def __getitem__(self, key):
return self._DL[key]
def __setitem__(self, key, newvalue):
self._DL[key] = newvalue
self.__dict__.update(self._DL)
def __len__(self):
return self._len
@property
def keys(self):
"""list of all keys"""
return list(self._DL.keys())
def to_array(self, flatten=False):
"""
Convert all saved attributes from a list to np.array.
This may or may not work - every saved attribute must have the
same shape for every instance. i.e., if `K` changes shape due to `z`
changing shape then the call will raise an exception.
This can also happen if the default initialization in __init__ gives
the variable a different shape then it becomes after a predict/update
cycle.
"""
for key in self.keys:
try:
self.__dict__[key] = np.array(self._DL[key])
except:
# get back to lists so we are in a valid state
self.__dict__.update(self._DL)
raise ValueError("could not convert {} into np.array".format(key))
if flatten:
self.flatten()
def flatten(self):
"""
Flattens any np.array of column vectors into 1D arrays. Basically,
this makes data readable for humans if you are just inspecting via
the REPL. For example, if you have saved a KalmanFilter object with 89
epochs, self.x will be shape (89, 9, 1) (for example). After flatten
is run, self.x.shape == (89, 9), which displays nicely from the REPL.
There is no way to unflatten, so it's a one way trip.
"""
for key in self.keys:
try:
arr = self.__dict__[key]
shape = arr.shape
if shape[2] == 1:
self.__dict__[key] = arr.reshape(shape[0], shape[1])
arr = self.__dict__[key]
shape = arr.shape
if len(shape) == 2 and shape[1] == 1:
self.__dict__[key] = arr.ravel()
except:
# not an ndarray or not a column vector
pass
def __repr__(self):
return "<Saver object at {}\n Keys: {}>".format(
hex(id(self)), " ".join(self.keys)
)
def runge_kutta4(y, x, dx, f):
"""computes 4th order Runge-Kutta for dy/dx.
Parameters
----------
y : scalar
Initial/current value for y
x : scalar
Initial/current value for x
dx : scalar
difference in x (e.g. the time step)
f : ufunc(y,x)
Callable function (y, x) that you supply to compute dy/dx for
the specified values.
"""
k1 = dx * f(y, x)
k2 = dx * f(y + 0.5 * k1, x + 0.5 * dx)
k3 = dx * f(y + 0.5 * k2, x + 0.5 * dx)
k4 = dx * f(y + k3, x + dx)
return y + (k1 + 2 * k2 + 2 * k3 + k4) / 6.0
def pretty_str(label, arr):
"""
Generates a pretty printed NumPy array with an assignment. Optionally
transposes column vectors so they are drawn on one line. Strictly speaking
arr can be any time convertible by `str(arr)`, but the output may not
be what you want if the type of the variable is not a scalar or an
ndarray.
Examples
--------
>>> pprint('cov', np.array([[4., .1], [.1, 5]]))
cov = [[4. 0.1]
[0.1 5. ]]
>>> print(pretty_str('x', np.array([[1], [2], [3]])))
x = [[1 2 3]].T
"""
def is_col(a):
"""return true if a is a column vector"""
try:
return a.shape[0] > 1 and a.shape[1] == 1
except (AttributeError, IndexError):
return False
# display empty lists correctly
try:
if len(arr) == 0:
return label + " = " + str(type(arr)())
except TypeError:
pass
if type(arr) is list or type(arr) is tuple or type(arr) is deque:
return "\n".join(
[pretty_str(label + "[" + str(i) + "]", x) for (i, x) in enumerate(arr)]
)
if label is None:
label = ""
if label:
label += " = "
if is_col(arr):
return label + str(arr.T).replace("\n", "") + ".T"
rows = str(arr).split("\n")
if not rows:
return ""
s = label + rows[0]
pad = " " * len(label)
for line in rows[1:]:
s = s + "\n" + pad + line
return s
def pprint(label, arr, **kwargs):
"""pretty prints an NumPy array using the function pretty_str. Keyword
arguments are passed to the print() function.
See Also
--------
pretty_str
Examples
--------
>>> pprint('cov', np.array([[4., .1], [.1, 5]]))
cov = [[4. 0.1]
[0.1 5. ]]
"""
print(pretty_str(label, arr), **kwargs)
def reshape_z(z, dim_z, ndim):
"""ensure z is a (dim_z, 1) shaped vector"""
z = np.atleast_2d(z)
if z.shape[1] == dim_z:
z = z.T
if z.shape != (dim_z, 1):
raise ValueError(
"z (shape {}) must be convertible to shape ({}, 1)".format(z.shape, dim_z)
)
if ndim == 1:
z = z[:, 0]
if ndim == 0:
z = z[0, 0]
return z
def inv_diagonal(S):
"""
Computes the inverse of a diagonal NxN np.array S. In general this will
be much faster than calling np.linalg.inv().
However, does NOT check if the off diagonal elements are non-zero. So long
as S is truly diagonal, the output is identical to np.linalg.inv().
Parameters
----------
S : np.array
diagonal NxN array to take inverse of
Returns
-------
S_inv : np.array
inverse of S
Examples
--------
This is meant to be used as a replacement inverse function for
the KalmanFilter class when you know the system covariance S is
diagonal. It just makes the filter run faster, there is
>>> kf = KalmanFilter(dim_x=3, dim_z=1)
>>> kf.inv = inv_diagonal # S is 1x1, so safely diagonal
"""
S = np.asarray(S)
if S.ndim != 2 or S.shape[0] != S.shape[1]:
raise ValueError("S must be a square Matrix")
si = np.zeros(S.shape)
for i in range(len(S)):
si[i, i] = 1.0 / S[i, i]
return si
def outer_product_sum(A, B=None):
r"""
Computes the sum of the outer products of the rows in A and B
P = \Sum {A[i] B[i].T} for i in 0..N
Notionally:
P = 0
for y in A:
P += np.outer(y, y)
This is a standard computation for sigma points used in the UKF, ensemble
Kalman filter, etc., where A would be the residual of the sigma points
and the filter's state or measurement.
The computation is vectorized, so it is much faster than the for loop
for large A.
Parameters
----------
A : np.array, shape (M, N)
rows of N-vectors to have the outer product summed
B : np.array, shape (M, N)
rows of N-vectors to have the outer product summed
If it is `None`, it is set to A.
Returns
-------
P : np.array, shape(N, N)
sum of the outer product of the rows of A and B
Examples
--------
Here sigmas is of shape (M, N), and x is of shape (N). The two sets of
code compute the same thing.
>>> P = outer_product_sum(sigmas - x)
>>>
>>> P = 0
>>> for s in sigmas:
>>> y = s - x
>>> P += np.outer(y, y)
"""
if B is None:
B = A
outer = np.einsum("ij,ik->ijk", A, B)
return np.sum(outer, axis=0)
def compare_kf(kf1, kf2, log=True, **kwargs):
"""Compare two Kalman filters.
For each variable each object has in common (x, P, S, K, etc) compare
them using np.allclose().
Prints a report if `log` is true, and returns a list of names if any
are different, otherwise prints nothing and returns None.
"""
# get variables common to both objects
v1, v2 = vars(kf1), vars(kf2)
k1, k2 = set(v1.keys()), set(v2.keys())
attrs = k2.intersection(k1)
different_keys = []
for attr in attrs:
if attr[0] == "_":
continue
if not np.allclose(v1[attr], v2[attr], **kwargs):
if log:
print(attr, "is different")
print(pretty_str(attr, v1[attr]))
print(pretty_str(attr, v2[attr]))
print()
different_keys.append(attr)
if len(different_keys) > 0:
return different
else:
return None
def copy_states(dst, src):
"""Copy filter states from `src` to `dst`.
for each variable that `dst` and `src` have in common, use
np.copy() to copy from the source to the destination.
This has the potential of breaking things if you are using two different
types - the destination could end up in an incorrect state since not
all variables may be initalized correctly.
The main use case is for testing or comparing different algorithms
kf1 = KalmanFilter()
kf1.F = ...
kf1.P = ...
kf2 = KalmanFilter()
# reuse F, P, etc from kf1
copy_states(kf2, kf1)
for z in obs:
kf1.predict()
kf2.predict()
...
"""
# get variables common to both objects
v1, v2 = vars(dst), vars(src)
k1, k2 = set(v1.keys()), set(v2.keys())
attrs = k2.intersection(k1)
for key in attrs:
val = getattr(src, key)
if type(val).__name__ not in ["method", "function"]:
dst.__dict__[key] = copy.deepcopy(val)
def repr_string(obj, private=True):
"""Generate a __repr_ string for an filter object.
It will pretty print numpy arrays to be readable, and display lists
with indexed values. It also gathers up all properties.
"""
keys = obj.__dir__()
keys = [key for key in keys if key[:2] != "__"]
if not private:
keys = [key for key in keys if key[0] != "_"]
s = []
for key in keys:
val = getattr(obj, key)
if type(val).__name__ not in ["method", "function"]:
s.append(pretty_str(key, val))
return type(obj).__name__ + " object\n" + "\n".join(s)
| mit | 3a35b3a7e4dce93364713c72e13cbc63 | 26.093156 | 88 | 0.556593 | 3.719916 | false | false | false | false |
rlabbe/filterpy | filterpy/kalman/square_root.py | 1 | 10556 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-instance-attributes
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division)
from copy import deepcopy
import numpy as np
from numpy import dot, zeros, eye
from scipy.linalg import cholesky, qr, pinv
from filterpy.common import pretty_str
class SquareRootKalmanFilter(object):
"""
Create a Kalman filter which uses a square root implementation.
This uses the square root of the state covariance matrix, which doubles
the numerical precision of the filter, Therebuy reducing the effect
of round off errors.
It is likely that you do not need to use this algorithm; we understand
divergence issues very well now. However, if you expect the covariance
matrix P to vary by 20 or more orders of magnitude then perhaps this
will be useful to you, as the square root will vary by 10 orders
of magnitude. From my point of view this is merely a 'reference'
algorithm; I have not used this code in real world software. Brown[1]
has a useful discussion of when you might need to use the square
root form of this algorithm.
You are responsible for setting the various state variables to
reasonable values; the defaults below will not give you a functional
filter.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
Attributes
----------
x : numpy.array(dim_x, 1)
State estimate
P : numpy.array(dim_x, dim_x)
State covariance matrix
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
z : numpy.array
Last measurement used in update(). Read only.
R : numpy.array(dim_z, dim_z)
Measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
F : numpy.array()
State Transition matrix
H : numpy.array(dim_z, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
References
----------
[1] Robert Grover Brown. Introduction to Random Signals and Applied
Kalman Filtering. Wiley and sons, 2012.
"""
def __init__(self, dim_x, dim_z, dim_u=0):
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_x must be 0 or greater')
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)) # state
self._P = eye(dim_x) # uncertainty covariance
self._P1_2 = eye(dim_x) # sqrt uncertainty covariance
self._Q = eye(dim_x) # sqrt process uncertainty
self._Q1_2 = eye(dim_x) # sqrt process uncertainty
self.B = 0. # control transition matrix
self.F = np.eye(dim_x) # state transition matrix
self.H = np.zeros((dim_z, dim_x)) # Measurement function
self._R1_2 = eye(dim_z) # sqrt state uncertainty
self._R = eye(dim_z) # state uncertainty
self.z = np.array([[None]*self.dim_z]).T
self.K = np.zeros((dim_x, dim_z)) # kalman gain
self.S1_2 = np.zeros((dim_z, dim_z)) # sqrt system uncertainty
self.SI1_2 = np.zeros((dim_z, dim_z)) # Inverse sqrt system uncertainty
# Residual is computed during the innovation (update) step. We
# save it so that in case you want to inspect it for various
# purposes
self.y = zeros((dim_z, 1))
# identity matrix.
self._I = np.eye(dim_x)
self.M = np.zeros((dim_z + dim_x, dim_z + dim_x))
# copy prior and posterior
self.x_prior = np.copy(self.x)
self._P1_2_prior = np.copy(self._P1_2)
self.x_post = np.copy(self.x)
self._P1_2_post = np.copy(self._P1_2)
def update(self, z, R2=None):
"""
Add a new measurement (z) to the kalman filter. If z is None, nothing
is changed.
Parameters
----------
z : np.array
measurement for this update.
R2 : np.array, scalar, or None
Sqrt of meaaurement noize. Optionally provide to override the
measurement noise for this one call, otherwise self.R2 will
be used.
"""
if z is None:
self.z = np.array([[None]*self.dim_z]).T
self.x_post = self.x.copy()
self._P1_2_post = np.copy(self._P1_2)
return
if R2 is None:
R2 = self._R1_2
elif np.isscalar(R2):
R2 = eye(self.dim_z) * R2
# rename for convienance
dim_z = self.dim_z
M = self.M
M[0:dim_z, 0:dim_z] = R2.T
M[dim_z:, 0:dim_z] = dot(self.H, self._P1_2).T
M[dim_z:, dim_z:] = self._P1_2.T
_, r_decomp = qr(M)
self.S1_2 = r_decomp[0:dim_z, 0:dim_z].T
self.SI1_2 = pinv(self.S1_2)
self.K = dot(r_decomp[0:dim_z, dim_z:].T, self.SI1_2)
# y = z - Hx
# error (residual) between measurement and prediction
self.y = z - dot(self.H, self.x)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x += dot(self.K, self.y)
self._P1_2 = r_decomp[dim_z:, dim_z:].T
self.z = deepcopy(z)
self.x_post = self.x.copy()
self._P1_2_post = np.copy(self._P1_2)
def predict(self, u=0):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array, optional
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u)
# P = FPF' + Q
_, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T)
self._P1_2 = P2[:self.dim_x, :self.dim_x].T
# copy prior
self.x_prior = np.copy(self.x)
self._P1_2_prior = np.copy(self._P1_2)
def residual_of(self, z):
""" returns the residual for the given measurement (z). Does not alter
the state of the filter.
"""
return z - dot(self.H, self.x)
def measurement_of_state(self, x):
""" Helper function that converts a state into a measurement.
Parameters
----------
x : np.array
kalman state vector
Returns
-------
z : np.array
measurement corresponding to the given state
"""
return dot(self.H, x)
@property
def Q(self):
""" Process uncertainty"""
return dot(self._Q1_2, self._Q1_2.T)
@property
def Q1_2(self):
""" Sqrt Process uncertainty"""
return self._Q1_2
@Q.setter
def Q(self, value):
""" Process uncertainty"""
self._Q = value
self._Q1_2 = cholesky(self._Q, lower=True)
@property
def P(self):
""" covariance matrix"""
return dot(self._P1_2, self._P1_2.T)
@property
def P_prior(self):
""" covariance matrix of the prior"""
return dot(self._P1_2_prior, self._P1_2_prior.T)
@property
def P_post(self):
""" covariance matrix of the posterior"""
return dot(self._P1_2_prior, self._P1_2_prior.T)
@property
def P1_2(self):
""" sqrt of covariance matrix"""
return self._P1_2
@P.setter
def P(self, value):
""" covariance matrix"""
self._P = value
self._P1_2 = cholesky(self._P, lower=True)
@property
def R(self):
""" measurement uncertainty"""
return dot(self._R1_2, self._R1_2.T)
@property
def R1_2(self):
""" sqrt of measurement uncertainty"""
return self._R1_2
@R.setter
def R(self, value):
""" measurement uncertainty"""
self._R = value
self._R1_2 = cholesky(self._R, lower=True)
@property
def S(self):
""" system uncertainty (P projected to measurement space) """
return dot(self.S1_2, self.S1_2.T)
@property
def SI(self):
""" inverse system uncertainty (P projected to measurement space) """
return dot(self.SI1_2.T, self.SI1_2)
def __repr__(self):
return '\n'.join([
'SquareRootKalmanFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('SI', self.SI),
pretty_str('M', self.M),
pretty_str('B', self.B),
])
| mit | 4529e2d1a16ee14786b291e7a6988a9e | 28.322222 | 79 | 0.571618 | 3.487281 | false | false | false | false |
rlabbe/filterpy | filterpy/common/tests/test_discretization.py | 1 | 2662 | # -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from filterpy.common import (linear_ode_discretation, Q_discrete_white_noise,
kinematic_kf)
from numpy import array
def near_eq(x,y):
return abs(x-y) < 1.e-17
def test_kinematic():
kf = kinematic_kf(1,1)
def test_Q_discrete_white_noise():
Q = Q_discrete_white_noise (2)
assert Q[0,0] == .25
assert Q[1,0] == .5
assert Q[0,1] == .5
assert Q[1,1] == 1
assert Q.shape == (2,2)
def test_linear_ode():
F = array([[0,0,1,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1],
[0,0,0,0,0,0],
[0,0,0,0,0,0]], dtype=float)
L = array ([[0,0],
[0,0],
[0,0],
[0,0],
[1,0],
[0,1]], dtype=float)
q = .2
Q = array([[q, 0],[0, q]])
dt = 0.5
A,Q = linear_ode_discretation(F, L, Q, dt)
val = [1, 0, dt, 0, 0.5*dt**2, 0]
for i in range(6):
assert val[i] == A[0,i]
for i in range(6):
assert val[i-1] == A[1,i] if i > 0 else A[1,i] == 0
for i in range(6):
assert val[i-2] == A[2,i] if i > 1 else A[2,i] == 0
for i in range(6):
assert val[i-3] == A[3,i] if i > 2 else A[3,i] == 0
for i in range(6):
assert val[i-4] == A[4,i] if i > 3 else A[4,i] == 0
for i in range(6):
assert val[i-5] == A[5,i] if i > 4 else A[5,i] == 0
assert near_eq(Q[0,0], (1./20)*(dt**5)*q)
assert near_eq(Q[0,1], 0)
assert near_eq(Q[0,2], (1/8)*(dt**4)*q)
assert near_eq(Q[0,3], 0)
assert near_eq(Q[0,4], (1./6)*(dt**3)*q)
assert near_eq(Q[0,5], 0)
if __name__ == "__main__":
test_linear_ode()
test_Q_discrete_white_noise()
F = array([[0,0,1,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1],
[0,0,0,0,0,0],
[0,0,0,0,0,0]], dtype=float)
L = array ([[0,0],
[0,0],
[0,0],
[0,0],
[1,0],
[0,1]], dtype=float)
q = .2
Q = array([[q, 0],[0, q]])
dt = 1/30
A,Q = linear_ode_discretation(F, L, Q, dt)
print(Q) | mit | 0994bc0f66537266d8663ca7ffcd233f | 21.567797 | 77 | 0.463186 | 2.574468 | false | false | false | false |
rlabbe/filterpy | filterpy/discrete_bayes/discrete_bayes.py | 3 | 3420 | # -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from scipy.ndimage.filters import convolve
from scipy.ndimage.interpolation import shift
def normalize(pdf):
"""Normalize distribution `pdf` in-place so it sums to 1.0.
Returns pdf for convienence, so you can write things like:
>>> kernel = normalize(randn(7))
Parameters
----------
pdf : ndarray
discrete distribution that needs to be converted to a pdf. Converted
in-place, i.e., this is modified.
Returns
-------
pdf : ndarray
The converted pdf.
"""
pdf /= sum(np.asarray(pdf, dtype=float))
return pdf
def update(likelihood, prior):
""" Computes the posterior of a discrete random variable given a
discrete likelihood and prior. In a typical application the likelihood
will be the likelihood of a measurement matching your current environment,
and the prior comes from discrete_bayes.predict().
Parameters
----------
likelihood : ndarray, dtype=flaot
array of likelihood values
prior : ndarray, dtype=flaot
prior pdf.
Returns
-------
posterior : ndarray, dtype=float
Returns array representing the posterior.
Examples
--------
.. code-block:: Python
# self driving car. Sensor returns values that can be equated to positions
# on the road. A real likelihood compuation would be much more complicated
# than this example.
likelihood = np.ones(len(road))
likelihood[road==z] *= scale_factor
prior = predict(posterior, velocity, kernel)
posterior = update(likelihood, prior)
"""
posterior = prior * likelihood
return normalize(posterior)
def predict(pdf, offset, kernel, mode='wrap', cval=0.):
""" Performs the discrete Bayes filter prediction step, generating
the prior.
`pdf` is a discrete probability distribution expressing our initial
belief.
`offset` is an integer specifying how much we want to move to the right
(negative values means move to the left)
We assume there is some noise in that offset, which we express in `kernel`.
For example, if offset=3 and kernel=[.1, .7., .2], that means we think
there is a 70% chance of moving right by 3, a 10% chance of moving 2
spaces, and a 20% chance of moving by 4.
It returns the resulting distribution.
If `mode='wrap'`, then the probability distribution is wrapped around
the array.
If `mode='constant'`, or any other value the pdf is shifted, with `cval`
used to fill in missing elements.
Examples
--------
.. code-block:: Python
belief = [.05, .05, .05, .05, .55, .05, .05, .05, .05, .05]
prior = predict(belief, offset=2, kernel=[.1, .8, .1])
"""
if mode == 'wrap':
return convolve(np.roll(pdf, offset), kernel, mode='wrap')
return convolve(shift(pdf, offset, cval=cval), kernel,
cval=cval, mode='constant')
| mit | 7454b396071748989fded0b7f00168ee | 25.71875 | 82 | 0.652924 | 4.076281 | false | false | false | false |
django-salesforce/django-salesforce | tests/test_mixin/models.py | 2 | 2469 | """Demonstrate that a Model can inherite from more abstract models."""
from django.conf import settings
import salesforce
from salesforce import models
from salesforce.models import SalesforceModel
# All demo models simplified for readability, except tested features
class User(SalesforceModel):
username = models.CharField(max_length=80)
email = models.CharField(max_length=100)
class DefaultMixin(SalesforceModel):
"""Common fields used in the most of SFDC models."""
last_modified_date = models.DateTimeField(sf_read_only=models.READ_ONLY, auto_now=True)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING,
default=models.DEFAULTED_ON_CREATE) # db_column='OwnerId'
class Meta:
abstract = True
class CommonAccount(DefaultMixin, SalesforceModel):
"""Common fields of Salesforce Account model."""
description = models.TextField()
phone = models.CharField(max_length=255)
class Meta:
abstract = True
class CoreAccount(SalesforceModel):
"""Fields specific to standard Account only."""
name = models.CharField(max_length=255)
class Meta:
abstract = True
class PersonAccount(SalesforceModel):
"""Fields specific to Account after activating "Person Account"."""
LastName = models.CharField(max_length=80)
FirstName = models.CharField(max_length=40)
Name = models.CharField(max_length=255, sf_read_only=models.READ_ONLY)
IsPersonAccount = models.BooleanField(default=False, sf_read_only=models.READ_ONLY)
PersonEmail = models.CharField(max_length=100)
class Meta:
abstract = True
if not getattr(settings, 'SF_EXAMPLE_PERSON_ACCOUNT_ACTIVATED', False):
class Account(CommonAccount, CoreAccount):
pass
else:
class Account(CommonAccount, PersonAccount): # type: ignore[no-redef] # noqa
pass
class DummyMixin:
def some_overridden_method(self):
pass
class DummyMixin2:
pass
class Contact(DummyMixin, DefaultMixin, SalesforceModel, DummyMixin2):
name = models.CharField(max_length=255, sf_read_only=models.READ_ONLY)
last_name = models.CharField(max_length=80)
first_name = models.CharField(max_length=40, blank=True)
account = salesforce.fields.ForeignKey(Account, on_delete=salesforce.models.DO_NOTHING)
class ProxyContact(Contact):
class Meta:
proxy = True
class Proxy2Contact(ProxyContact):
class Meta:
proxy = True
| mit | d894bbd35b1ba6c3ffe560359d4fbcd4 | 27.709302 | 91 | 0.714459 | 3.833851 | false | false | false | false |
django-salesforce/django-salesforce | salesforce/backend/schema.py | 2 | 1395 | """
Minimal code to support ignored makemigrations (like django.db.backends.*.schema)
without interaction to SF (without migrate)
"""
from django.db import NotSupportedError
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from salesforce.backend import log
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
# pylint:disable=abstract-method # undefined: prepare_default, quote_value
def __init__(self, connection, collect_sql=False, atomic=True):
self.connection_orig = connection
self.collect_sql = collect_sql
# if self.collect_sql:
# self.collected_sql = []
super().__init__(connection, collect_sql=collect_sql, atomic=atomic)
# State-managing methods
def __enter__(self):
self.deferred_sql = [] # pylint:disable=attribute-defined-outside-init
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
def execute(self, sql, params=()):
if (sql == 'CREATE TABLE django_migrations ()'
or sql.startswith('DROP TABLE ')) and not params:
return
raise NotSupportedError("Migration SchemaEditor: %r, %r" % (sql, params))
def create_model(self, model):
log.info("Skipped in SchemaEditor: create_model %s", model)
| mit | 61f158e38813312b7169e7bc0ab4cc57 | 33.875 | 82 | 0.660215 | 4.067055 | false | false | false | false |
django-salesforce/django-salesforce | salesforce/tests/test_utils.py | 2 | 2731 | """
Tests for `salesforce.utils`
"""
# pylint:disable=protected-access
from unittest import skipUnless
from django.test import TestCase
from salesforce.dbapi.driver import beatbox
from salesforce.testrunner.example.models import Account, Lead
from salesforce.utils import convert_lead
class UtilitiesTest(TestCase):
databases = '__all__'
@skipUnless(beatbox, "Beatbox needs to be installed in order to run this test.")
def test_lead_conversion(self):
"""
Create a Lead object within Salesforce and try to
convert it, convert/merge it with the information from a duplicit Lead,
then clean all the generated objects.
"""
lead = Lead(FirstName="Foo", LastName="Bar", Company="django-salesforce",
Street='Test Avenue 45')
lead.save()
lead2 = Lead(FirstName="Foo", LastName="Bar", Company="django-salesforce",
Phone='123456789')
lead2.save()
ret = None
try:
# convert the first Lead
ret = convert_lead(lead, doNotCreateOpportunity=True)
# print("Response from convertLead: " +
# ', '.join('%s: %s' % (k, v) for k, v in sorted(ret.items())))
expected_names = set(('accountId', 'contactId', 'leadId', 'opportunityId', 'success'))
# The field 'relatedPersonAccountId' is present in the instances ver. 51.0+ Spring '21
self.assertEqual(set(ret).difference(['relatedPersonAccountId']), expected_names)
self.assertEqual(ret['success'], 'true')
# merge the new Account with the second Lead
ret2 = convert_lead(lead2, doNotCreateOpportunity=True, accountId=ret['accountId'])
account = Account.objects.get(pk=ret['accountId'])
# verify that account is merged
self.assertEqual(ret2['accountId'], account.pk)
self.assertEqual(account.BillingStreet, 'Test Avenue 45')
self.assertEqual(account.Phone, '123456789')
finally:
# Cleaning up...
if ret:
# Deleting the Account object will also delete the related Contact
# and Opportunity objects.
try:
account = Account.objects.get(pk=ret['accountId'])
except Exception: # pylint:disable=broad-except # pragma: no cover
# this allows to recycle the account even if the queryset code is broken
account = Account(pk=ret['accountId'])
account._state.db = lead._state.db
account.delete()
lead.delete() # FYI, ret['leadId'] == lead.pk
lead2.delete()
| mit | 6a400f37b6383e4e423827ace759b20a | 43.770492 | 98 | 0.600146 | 4.280564 | false | true | false | false |
django-salesforce/django-salesforce | tests/test_compatibility/models.py | 2 | 1930 | """Backward compatible behaviour with primary key 'Id' and upper-case field names"""
import datetime
from salesforce import models
from salesforce.models import SalesforceModel
class User(SalesforceModel):
Username = models.CharField(max_length=80)
Email = models.CharField(max_length=100)
class Lead(SalesforceModel):
Company = models.CharField(max_length=255)
LastName = models.CharField(max_length=80)
Owner = models.ForeignKey(User, on_delete=models.DO_NOTHING,
default=models.DEFAULTED_ON_CREATE, db_column='OwnerId')
# models for unit tests used without a connection only
class A(SalesforceModel):
email = models.EmailField(custom=True)
class Meta:
db_table = 'A__c'
class B(SalesforceModel):
class Meta:
db_table = 'B__c'
class AtoB(SalesforceModel):
a = models.ForeignKey(A, models.DO_NOTHING, custom=True)
b = models.ForeignKey(B, models.DO_NOTHING, custom=True)
class Meta:
db_table = 'AtoB__c'
class TryDefaults(SalesforceModel):
# this model doesn't exist in Salesforce, but it should be valid
# it is only for coverage of code by tests
example_str = models.CharField(max_length=50, default=models.DefaultedOnCreate('client'))
example_datetime = models.DateTimeField(default=models.DefaultedOnCreate(datetime.datetime(2021, 3, 31, 23, 59)))
# example_date = models.DateTimeField(default=models.DefaultedOnCreate(datetime.date(2021, 3, 31)))
example_time = models.DateTimeField(default=models.DefaultedOnCreate(datetime.time(23, 59)))
example_foreign_key = models.ForeignKey(User, on_delete=models.DO_NOTHING, default=models.DefaultedOnCreate())
# ,default=models.DefaultedOnCreate(User(pk='000000000000000')))
example_bool = models.BooleanField(default=models.DefaultedOnCreate(True))
example_bool_2 = models.BooleanField(default=models.DefaultedOnCreate(False))
| mit | bb95dab3715c6e8d9c1b3896aacf0f43 | 35.415094 | 117 | 0.727979 | 3.541284 | false | false | false | false |
django-salesforce/django-salesforce | salesforce/backend/base.py | 2 | 4465 | # django-salesforce
#
# by Phil Christensen
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
"""
Salesforce database backend for Django. (like django,db.backends.*.base)
"""
from typing import Any, Dict, Optional, TYPE_CHECKING
from django.conf import settings
from django.db.backends.base.base import BaseDatabaseWrapper
from salesforce.backend.client import DatabaseClient
from salesforce.backend.creation import DatabaseCreation
from salesforce.backend.features import DatabaseFeatures
from salesforce.backend.validation import DatabaseValidation
from salesforce.backend.operations import DatabaseOperations
from salesforce.backend.introspection import DatabaseIntrospection
from salesforce.backend.schema import DatabaseSchemaEditor
# from django.db.backends.signals import connection_created
from salesforce.backend.utils import CursorWrapper, async_unsafe
from salesforce.dbapi import driver as Database
from salesforce.dbapi.driver import IntegrityError, DatabaseError, SalesforceError # NOQA pylint:disable=unused-import
if TYPE_CHECKING:
from django.db.backends.base.base import ProtoCursor # pylint:disable=ungrouped-imports,no-name-in-module
__all__ = ('DatabaseWrapper', 'DatabaseError', 'SalesforceError',)
class DatabaseWrapper(BaseDatabaseWrapper):
"""
Core class that provides all DB support.
"""
# pylint:disable=abstract-method,too-many-instance-attributes
# undefined abstract methods: _start_transaction_under_autocommit, is_usable
vendor = 'salesforce'
display_name = 'Salesforce'
# Operators [contains, startswithm, endswith] are incorrectly
# case insensitive like sqlite3.
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE %s',
'icontains': 'LIKE %s',
# 'regex': 'REGEXP %s', # unsupported
# 'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor # type: ignore[assignment] # noqa # this is normal in Django
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def __init__(self, settings_dict, alias=None):
if alias is None:
alias = getattr(settings, 'SALESFORCE_DB_ALIAS', 'salesforce')
super().__init__(settings_dict, alias)
self._is_sandbox = None # type: Optional[bool]
@property
def sf_session(self) -> Database.SfSession:
if self.connection is None:
self.connect()
assert self.connection
return self.connection.sf_session
def get_connection_params(self) -> Dict[str, Any]:
settings_dict = self.settings_dict
params = settings_dict.copy()
params.update(settings_dict['OPTIONS'])
return params
@async_unsafe
def get_new_connection(self, conn_params: Dict[str, Any]) -> Database.RawConnection:
# simulated only a connection interface without connecting really
return Database.connect(settings_dict=conn_params, alias=self.alias)
def init_connection_state(self):
pass # nothing to init
def _set_autocommit(self, autocommit):
# SF REST API uses autocommit, but until rollback it is not a
# serious problem to ignore autocommit off
pass
@async_unsafe
def cursor(self) -> Any:
"""
Return a fake cursor for accessing the Salesforce API with SOQL.
"""
return CursorWrapper(self)
def create_cursor(self, name: Optional[str] = None) -> 'ProtoCursor':
row_type = {'dict': dict, 'list': list, None: None}[name]
return self.connection.cursor(row_type=row_type)
@property
def is_sandbox(self) -> bool:
if self._is_sandbox is None:
cur = self.cursor()
cur.execute("SELECT IsSandbox FROM Organization")
self._is_sandbox = cur.fetchone()[0]
return self._is_sandbox
def close(self) -> None:
if self.connection:
self.connection.close()
| mit | 9ba861563e36d0c8187d421391dd021c | 33.346154 | 119 | 0.671221 | 4.055404 | false | false | false | false |
urllib3/urllib3 | src/urllib3/_base_connection.py | 1 | 5651 | from __future__ import annotations
import typing
from .util.connection import _TYPE_SOCKET_OPTIONS
from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT
from .util.url import Url
_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]
class ProxyConfig(typing.NamedTuple):
ssl_context: ssl.SSLContext | None
use_forwarding_for_https: bool
assert_hostname: None | str | Literal[False]
assert_fingerprint: str | None
class _ResponseOptions(typing.NamedTuple):
# TODO: Remove this in favor of a better
# HTTP request/response lifecycle tracking.
request_method: str
request_url: str
preload_content: bool
decode_content: bool
enforce_content_length: bool
if typing.TYPE_CHECKING:
import ssl
from typing_extensions import Literal, Protocol
from .response import BaseHTTPResponse
class BaseHTTPConnection(Protocol):
default_port: typing.ClassVar[int]
default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS]
host: str
port: int
timeout: None | (
float
) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved.
blocksize: int
source_address: tuple[str, int] | None
socket_options: _TYPE_SOCKET_OPTIONS | None
proxy: Url | None
proxy_config: ProxyConfig | None
is_verified: bool
proxy_is_verified: bool | None
def __init__(
self,
host: str,
port: int | None = None,
*,
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
source_address: tuple[str, int] | None = None,
blocksize: int = 8192,
socket_options: _TYPE_SOCKET_OPTIONS | None = ...,
proxy: Url | None = None,
proxy_config: ProxyConfig | None = None,
) -> None:
...
def set_tunnel(
self,
host: str,
port: int | None = None,
headers: typing.Mapping[str, str] | None = None,
scheme: str = "http",
) -> None:
...
def connect(self) -> None:
...
def request(
self,
method: str,
url: str,
body: _TYPE_BODY | None = None,
headers: typing.Mapping[str, str] | None = None,
# We know *at least* botocore is depending on the order of the
# first 3 parameters so to be safe we only mark the later ones
# as keyword-only to ensure we have space to extend.
*,
chunked: bool = False,
preload_content: bool = True,
decode_content: bool = True,
enforce_content_length: bool = True,
) -> None:
...
def getresponse(self) -> BaseHTTPResponse:
...
def close(self) -> None:
...
@property
def is_closed(self) -> bool:
"""Whether the connection either is brand new or has been previously closed.
If this property is True then both ``is_connected`` and ``has_connected_to_proxy``
properties must be False.
"""
@property
def is_connected(self) -> bool:
"""Whether the connection is actively connected to any origin (proxy or target)"""
@property
def has_connected_to_proxy(self) -> bool:
"""Whether the connection has successfully connected to its proxy.
This returns False if no proxy is in use. Used to determine whether
errors are coming from the proxy layer or from tunnelling to the target origin.
"""
class BaseHTTPSConnection(BaseHTTPConnection, Protocol):
default_port: typing.ClassVar[int]
default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS]
# Certificate verification methods
cert_reqs: int | str | None
assert_hostname: None | str | Literal[False]
assert_fingerprint: str | None
ssl_context: ssl.SSLContext | None
# Trusted CAs
ca_certs: str | None
ca_cert_dir: str | None
ca_cert_data: None | str | bytes
# TLS version
ssl_minimum_version: int | None
ssl_maximum_version: int | None
ssl_version: int | str | None # Deprecated
# Client certificates
cert_file: str | None
key_file: str | None
key_password: str | None
def __init__(
self,
host: str,
port: int | None = None,
*,
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
source_address: tuple[str, int] | None = None,
blocksize: int = 8192,
socket_options: _TYPE_SOCKET_OPTIONS | None = ...,
proxy: Url | None = None,
proxy_config: ProxyConfig | None = None,
cert_reqs: int | str | None = None,
assert_hostname: None | str | Literal[False] = None,
assert_fingerprint: str | None = None,
server_hostname: str | None = None,
ssl_context: ssl.SSLContext | None = None,
ca_certs: str | None = None,
ca_cert_dir: str | None = None,
ca_cert_data: None | str | bytes = None,
ssl_minimum_version: int | None = None,
ssl_maximum_version: int | None = None,
ssl_version: int | str | None = None, # Deprecated
cert_file: str | None = None,
key_file: str | None = None,
key_password: str | None = None,
) -> None:
...
| mit | f0d857789594c87d850e1f8aa554a27c | 31.66474 | 94 | 0.556716 | 4.31374 | false | false | false | false |
urllib3/urllib3 | src/urllib3/util/util.py | 1 | 1146 | from __future__ import annotations
import typing
from types import TracebackType
def to_bytes(
x: str | bytes, encoding: str | None = None, errors: str | None = None
) -> bytes:
if isinstance(x, bytes):
return x
elif not isinstance(x, str):
raise TypeError(f"not expecting type {type(x).__name__}")
if encoding or errors:
return x.encode(encoding or "utf-8", errors=errors or "strict")
return x.encode()
def to_str(
x: str | bytes, encoding: str | None = None, errors: str | None = None
) -> str:
if isinstance(x, str):
return x
elif not isinstance(x, bytes):
raise TypeError(f"not expecting type {type(x).__name__}")
if encoding or errors:
return x.decode(encoding or "utf-8", errors=errors or "strict")
return x.decode()
def reraise(
tp: type[BaseException] | None,
value: BaseException,
tb: TracebackType | None = None,
) -> typing.NoReturn:
try:
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None # type: ignore[assignment]
tb = None
| mit | afa72bff8f09c0dd0ed7cca5006b9cab | 26.285714 | 74 | 0.612565 | 3.745098 | false | false | false | false |
urllib3/urllib3 | src/urllib3/filepost.py | 1 | 2395 | from __future__ import annotations
import binascii
import codecs
import os
import typing
from io import BytesIO
from .fields import _TYPE_FIELD_VALUE_TUPLE, RequestField
writer = codecs.lookup("utf-8")[3]
_TYPE_FIELDS_SEQUENCE = typing.Sequence[
typing.Union[typing.Tuple[str, _TYPE_FIELD_VALUE_TUPLE], RequestField]
]
_TYPE_FIELDS = typing.Union[
_TYPE_FIELDS_SEQUENCE,
typing.Mapping[str, _TYPE_FIELD_VALUE_TUPLE],
]
def choose_boundary() -> str:
"""
Our embarrassingly-simple replacement for mimetools.choose_boundary.
"""
return binascii.hexlify(os.urandom(16)).decode()
def iter_field_objects(fields: _TYPE_FIELDS) -> typing.Iterable[RequestField]:
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
iterable: typing.Iterable[RequestField | tuple[str, _TYPE_FIELD_VALUE_TUPLE]]
if isinstance(fields, typing.Mapping):
iterable = fields.items()
else:
iterable = fields
for field in iterable:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def encode_multipart_formdata(
fields: _TYPE_FIELDS, boundary: str | None = None
) -> tuple[bytes, str]:
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
Values are processed by :func:`urllib3.fields.RequestField.from_tuples`.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`urllib3.filepost.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(f"--{boundary}\r\n".encode("latin-1"))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, str):
writer(body).write(data)
else:
body.write(data)
body.write(b"\r\n")
body.write(f"--{boundary}--\r\n".encode("latin-1"))
content_type = f"multipart/form-data; boundary={boundary}"
return body.getvalue(), content_type
| mit | d7f68e40992951d591bd90cc88989c85 | 25.910112 | 85 | 0.650522 | 3.801587 | false | false | false | false |
phfaist/pylatexenc | tools/make_transcryptable_lib.py | 1 | 10464 | import re
import os
import os.path
source_dir = os.path.join(os.path.dirname(__file__), '..')
target_dir = os.path.join(os.path.dirname(__file__), 'transcryptable_output')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
#
# Process our source files to make them usable with Transcrypt (-> to create a
# JavaScript library)
#
include_sources = [
(['pylatexenc'], '__init__.py'),
(['pylatexenc'], '_util.py'),
(['pylatexenc'], 'version.py'),
(['pylatexenc','macrospec'], '__init__.py'),
(['pylatexenc','macrospec'], '_parsedargs.py'),
(['pylatexenc','macrospec'], '_argparsers.py'),
(['pylatexenc','macrospec'], '_specclasses.py'),
(['pylatexenc','macrospec'], '_latexcontextdb.py'),
(['pylatexenc','latexwalker'], '__init__.py'),
(['pylatexenc','latexwalker'], '_types.py'),
(['pylatexenc','latexwalker'], '_walker.py'),
]
accept_list_modules = [
'pylatexenc._util',
'pylatexenc.version',
'pylatexenc.macrospec',
'pylatexenc.macrospec._parsedargs',
'pylatexenc.macrospec._argparsers',
'pylatexenc.macrospec._specclasses',
'pylatexenc.macrospec._latexcontextdb',
'pylatexenc.latexwalker',
'pylatexenc.latexwalker._types',
'pylatexenc.latexwalker._walker',
]
_rx_from_import = re.compile(
r"""
^ # beginning of a line
from
\s+
(?P<pkg_where>[a-zA-Z0-9_.]+) # package path
\s+
import
(?P<import_targets>
(?P<import_targets_no_parens>
\s+
(?P<import_name>[A-Za-z0-9_*]+) # main imported module
(?:
[ \t]+ as [ \t]+
(?P<import_as>[A-Za-z0-9_]+) # alias import name
)?
(?:
(?:[ \t]*$)
|
(?P<more_import_names> # end of import stmt, or more stuff
[,\s]+
(.*\\\n)* # contents with backslash at end of line
(.*[^\\]$) # line not ending with a backslash
)
)
)
|
(?P<import_targets_with_parens>
\s*\(
(?:[A-Za-z0-9_,\s\n]+ # names, commas, whitespace, newlines
| [#].*$ )+ # or comments
\)
[ \t]*$
)
)
""",
flags=re.MULTILINE | re.VERBOSE
)
_rx_py2supportcode = re.compile(
r"""
^import [ \t]+ sys \s*
^if [ ]+ sys[.]version_info[.]major [ ]* == [ ]* 2 [ ]* : \s*[\n]
( ^[ \t]+.*[\n] )+ # greedy capturing of lines with indentation
""",
flags=re.MULTILINE | re.VERBOSE
)
# remove arguments to super() calls
_rx_super = re.compile(
r"""
super\s*
\(
\s*
(?P<clsname> [A-Za-z0-9_]+ )
\s*
,
\s*
(?P<self> self )
\s*
\)
""",
flags=re.MULTILINE | re.VERBOSE
)
_rx_import = re.compile(
r"""
^
import
[ \t]+
(?P<pkg_name>[a-zA-Z0-9_.]+) # package name
[ \t]*
$
""",
flags=re.MULTILINE | re.VERBOSE
)
# logger = logging.getLogger(...) at module level
_rx_logger_defn = re.compile(
r"""
^
logger
[ \t]+
=
[ \t]+
.*
$
""",
re.MULTILINE | re.VERBOSE
)
# logger.debug|warn|...
_rx_logger_use = re.compile(
r"""
\b
logger
[ \t]*
[.]
[ \t]*
(?P<logger_method>
debug
|
info
|
warning
|
error
|
critical
)
""",
re.MULTILINE | re.VERBOSE
)
# 'bisect_right_nodupl = ..."
_rx_bisect_defn = re.compile(
r"""
^
bisect_right_nodupl
[ \t]+
=
[ \t]+
.*
$
""",
re.MULTILINE | re.VERBOSE
)
_rx_construct_dict_with_generator = re.compile(
r"""
\b
dict\b
\(
(?P<contents>
[^\[\n]+
\b for \b
.*
)
\)
""",
re.MULTILINE | re.VERBOSE
)
def process_source_file(relative_dir_items, filename_py):
# a bunch of hacks onto the source file to make it compatible with
# transcrypt to generate a simple JS "latex-markup" parser
# remove any 'from __future__ import ...' line
print(f"Processing {relative_dir_items=} {filename_py=}")
def _comment_out_text(text):
return '###> ' + text.replace('\n', '\n###> ') + '\n'
def _comment_out_match(m):
return '###> ' + m.group().replace('\n', '\n###> ') + '\n'
def _repl_from_import(m):
pkg_where = m.group('pkg_where')
import_targets = m.group('import_targets')
import_name = m.group('import_name')
import_as = m.group('import_as')
more_import_names = m.group('more_import_names')
group = m.group()
if group[-1] == '\n':
group = group[:-1]
def _comment_out():
return _comment_out_text(group)
if pkg_where == '__future__':
# special '__future__' import, leave it out
return _comment_out()
# translate into module name that is imported
mod_path = pkg_where.split('.')
if mod_path[0]: # and os.path.isdir(os.join(source_dir, *mod_path)):
# found absolute import, all fine
pass
else:
# mod_path[0] is empty
if not mod_path[-1]: # 'from ".." import zz' -> three empty sections in mod_path
mod_path = mod_path[:-1]
r = list(relative_dir_items)
print(f"relative import: {r=}, {mod_path=}")
mod_path = mod_path[1:]
while mod_path and not mod_path[0]:
r = r[:-1]
mod_path = mod_path[1:]
print(f"resolved up one level: {r=}, {mod_path=}")
mod_path = r + mod_path
# mod_path is relative path of the package that is referenced
imported_sub_module = False
if os.path.isdir( os.path.join(source_dir, *mod_path) ) \
and os.path.isfile( os.path.join(source_dir, *mod_path, '__init__.py') ) :
# it's a directory -> a pythonb package, so we need to look at
# import_name ("from pylatexenc import latexwalker" -> mod_path
# should resolve to ['pylatexenc', 'latexwalker'])
if not import_name:
raise ValueError(f"Could not parse import statement, I need a single "
f"module to import please: ‘{group}’ ({mod_path=!r})")
mod_path = mod_path + [ import_name ]
imported_sub_module = True
mod_dotname = '.'.join(mod_path)
mod_fname = os.path.join(source_dir, *mod_path)
if not os.path.isfile( mod_fname + '.py' ) \
and not os.path.isfile( os.path.join(mod_fname, '__init__.py') ):
raise ValueError(f"Could not find module: ‘{group}’ "
f"({mod_fname=!r} {mod_dotname=!r})")
# check if the module is accept-listed
if mod_dotname not in accept_list_modules:
print(f"Removing import ‘{group}’, not in accept-list")
return _comment_out()
# tweak in different form
if imported_sub_module:
if more_import_names:
raise ValueError(
f"More names specified, can only handle one sub-module: ‘{group}’"
)
if import_as:
return f'import {mod_dotname} as {import_as}\n'
return f'import {mod_dotname} as {import_name}\n'
# transform into simple absolute from X import Y statement
return f'from {mod_dotname} import {import_targets}'
def _repl_import(m):
pkg_name = m.group('pkg_name')
group = m.group()
if pkg_name in ('logging', 'bisect',):
return _comment_out_text(group)
return group
def _repl_dict_generator(m):
contents = m.group('contents')
group = m.group()
repl = 'dict([ ' + contents + ' ])'
print(
"*** replacing suspected dict construction from a generator "
"comprehension by explicit list: ***\n"
f" ‘{group}’ -->\n"
f" ‘{repl}’\n"
)
return repl
with open(os.path.join(source_dir, *relative_dir_items, filename_py)) as f:
source_content = f.read()
final_source_content = source_content
# remove Py2 support code
final_source_content = _rx_py2supportcode.sub(
lambda m: _comment_out_text(m.group()),
final_source_content)
# fix "from ... import ..." imports
final_source_content = _rx_from_import.sub(_repl_from_import, final_source_content)
# find imports and selectively keep them --- e.g., remove 'logging'
final_source_content = _rx_import.sub(_repl_import, final_source_content)
# replace logger = logging.getLogger(...) as well as logger.XXX(...) -> console.log
final_source_content = _rx_logger_defn.sub(_comment_out_match, final_source_content)
final_source_content = _rx_logger_use.sub('console.log', final_source_content)
# super(SuperClass, self) --> super()
final_source_content = _rx_super.sub(
'super()',
final_source_content
)
# dict( x for x in zzz ) --> dict([ x for x in zzz ])
final_source_content = _rx_construct_dict_with_generator.sub(
_repl_dict_generator,
final_source_content
)
# custom implementation for bisect
custom_bisect = r"""
def bisect_right_nodupl(a, x):
# find the first index of a that is > pos
lo = 0
hi = len(a)
mid = None
while True:
if a[lo] > x:
return lo
if a[hi-1] <= x:
return hi
if hi - lo <= 2:
if a[lo+1] > x:
return lo
else: #if a[lo+2] > x:
return lo+1
mid = (hi + lo) // 2
if a[mid] > x:
hi = mid+1
elif a[mid] <= x:
lo = mid
"""
final_source_content = _rx_bisect_defn.sub(
lambda m: custom_bisect,
final_source_content
)
if not os.path.isdir(os.path.join(target_dir, *relative_dir_items)):
os.makedirs(os.path.join(target_dir, *relative_dir_items))
with open(os.path.join(target_dir, *relative_dir_items, filename_py), 'w') as fw:
fw.write(final_source_content)
for relative_dir_items, fname in include_sources:
process_source_file(relative_dir_items, fname)
| mit | 180261f1ba4fbeb44b570c0b744d44af | 25.43038 | 92 | 0.519349 | 3.429698 | false | false | false | false |
phfaist/pylatexenc | pylatexenc/_util.py | 1 | 3932 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Philippe Faist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Internal module. Internal API may move, disappear or otherwise change at any
# time and without notice.
import bisect
bisect_right_nodupl = bisect.bisect_right
# ------------------------------------------------------------------------------
class LineNumbersCalculator(object):
r"""
Utility to calculate line numbers.
"""
def __init__(self, s):
super(LineNumbersCalculator, self).__init__()
def find_all_new_lines(x):
# first line starts at the beginning of the string
yield 0
k = 0
while k < len(x):
k = x.find('\n', k)
if k == -1:
return
k += 1
# s[k] is the character after the newline, i.e., the 0-th column
# of the new line
yield k
self._pos_new_lines = list(find_all_new_lines(s))
def pos_to_lineno_colno(self, pos, as_dict=False):
r"""
Return the line and column number corresponding to the given `pos`.
Return a tuple `(lineno, colno)` giving line number and column number.
Line numbers start at 1 and column number start at zero, i.e., the
beginning of the document (`pos=0`) has line and column number `(1,0)`.
If `as_dict=True`, then a dictionary with keys 'lineno', 'colno' is
returned instead of a tuple.
"""
# find line number in list
# line_no is the index of the last item in self._pos_new_lines that is <= pos.
line_no = bisect_right_nodupl(self._pos_new_lines, pos)-1
assert line_no >= 0 and line_no < len(self._pos_new_lines)
col_no = pos - self._pos_new_lines[line_no]
# 1+... so that line and column numbers start at 1
if as_dict:
return {'lineno': 1 + line_no, 'colno': col_no}
return (1 + line_no, col_no)
# ------------------------------------------------------------------------------
class PushPropOverride(object):
def __init__(self, obj, propname, new_value):
super(PushPropOverride, self).__init__()
self.obj = obj
self.propname = propname
self.new_value = new_value
def __enter__(self):
if self.new_value is not None:
self.initval = getattr(self.obj, self.propname)
setattr(self.obj, self.propname, self.new_value)
return self
def __exit__(self, type, value, traceback):
# clean-up
if self.new_value is not None:
setattr(self.obj, self.propname, self.initval)
# ------------------------------------------------------------------------------
from ._util_support import (
pylatexenc_deprecated_ver,
pylatexenc_deprecated_2,
#
LazyDict
)
| mit | 11112026dc6046c8f8d188733b2d4a87 | 32.322034 | 86 | 0.592319 | 4.130252 | false | false | false | false |
phfaist/pylatexenc | tools/gen_xml_dic.py | 1 | 2236 | #
# mini-script to generate the pylatexenc.latexencode._uni2latexmap_xml dict mapping
#
import re
import sys
if sys.version_info.major > 2:
# python 3
unichr = chr
from xml.etree import ElementTree as ET
e = ET.parse('unicode.xml')
d = {}
dnames = {}
for chxml in e.find('charlist').iter('character'):
Uid = chxml.attrib['id']
if '-' in Uid:
# composite/multiple characters not supported
continue
charord = int(Uid.lstrip('U'), 16)
latexxml = chxml.find('latex')
if latexxml is None:
continue
latexval = latexxml.text
if latexval == unichr(charord):
# "latex" representation is the same char directly
continue
if charord == 0x20:
# skip space char
continue
if latexval.startswith(r'\ElsevierGlyph') or latexval.startswith(r'\El') \
or latexval.startswith(r'\ensuremath{\El'):
continue
if re.search(r'\\[a-zA-Z]+\s+$', latexval):
# ends with named macro+space, remove space because
# latexencode.UnicodeToLatexEncoder will handle that with
# replacement_latex_protection
latexval = latexval.rstrip()
d[charord] = latexval
dnames[charord] = chxml.find('description').text
# dump dictionary into new module file in current working directory
outputfile = '_uni2latexmap_xml.py'
HEADER = """\
# -*- coding: utf-8 -*-
#
# Automatically generated from unicode.xml by gen_xml_dic.py
#
"""
with open(outputfile, 'w') as f:
f.write(HEADER)
f.write("uni2latex = {\n")
for k,v in d.items():
f.write("0x%04X: %r,\n"%(k, v))
f.write("}\n")
print("Successfully generated file %s"%(outputfile))
# Now see which characters we don't have in our default set of symbols
from pylatexenc.latexencode._uni2latexmap import uni2latex as uni2latex_defaults
missing_keys = set(d.keys()).difference(set(uni2latex_defaults.keys()))
if missing_keys:
print("#\n# Missing keys added from unicode.xml\n#\n")
for k in sorted(missing_keys):
if "'" not in d[k]:
therepr = "r'"+d[k]+"'"
else:
therepr = repr(d[k])
thedef = "0x%04X: %s,"%(k, therepr)
print("%-50s# %s [%s]"%(thedef, dnames[k], unichr(k)))
| mit | 5f41f33cff36699ee5a138d966580c58 | 26.268293 | 83 | 0.630143 | 3.347305 | false | false | false | false |
tobgu/pyrsistent | tests/hypothesis_vector_test.py | 1 | 8835 | """
Hypothesis-based tests for pvector.
"""
import gc
from collections.abc import Iterable
from functools import wraps
from pyrsistent import PClass, field
from pytest import fixture
from pyrsistent import pvector, discard
from hypothesis import strategies as st, assume
from hypothesis.stateful import RuleBasedStateMachine, Bundle, rule
class RefCountTracker:
"""
An object that might catch reference count errors sometimes.
"""
def __init__(self):
self.id = id(self)
def __repr__(self):
return "<%s>" % (self.id,)
def __del__(self):
# If self is a dangling memory reference this check might fail. Or
# segfault :)
if self.id != id(self):
raise RuntimeError()
@fixture(scope="module")
def gc_when_done(request):
request.addfinalizer(gc.collect)
def test_setup(gc_when_done):
"""
Ensure we GC when tests finish.
"""
# Pairs of a list and corresponding pvector:
PVectorAndLists = st.lists(st.builds(RefCountTracker)).map(
lambda l: (l, pvector(l)))
def verify_inputs_unmodified(original):
"""
Decorator that asserts that the wrapped function does not modify its
inputs.
"""
def to_tuples(pairs):
return [(tuple(l), tuple(pv)) for (l, pv) in pairs]
@wraps(original)
def wrapper(self, **kwargs):
inputs = [k for k in kwargs.values() if isinstance(k, Iterable)]
tuple_inputs = to_tuples(inputs)
try:
return original(self, **kwargs)
finally:
# Ensure inputs were unmodified:
assert to_tuples(inputs) == tuple_inputs
return wrapper
def assert_equal(l, pv):
assert l == pv
assert len(l) == len(pv)
length = len(l)
for i in range(length):
assert l[i] == pv[i]
for i in range(length):
for j in range(i, length):
assert l[i:j] == pv[i:j]
assert l == list(iter(pv))
class PVectorBuilder(RuleBasedStateMachine):
"""
Build a list and matching pvector step-by-step.
In each step in the state machine we do same operation on a list and
on a pvector, and then when we're done we compare the two.
"""
sequences = Bundle("sequences")
@rule(target=sequences, start=PVectorAndLists)
def initial_value(self, start):
"""
Some initial values generated by a hypothesis strategy.
"""
return start
@rule(target=sequences, former=sequences)
@verify_inputs_unmodified
def append(self, former):
"""
Append an item to the pair of sequences.
"""
l, pv = former
obj = RefCountTracker()
l2 = l[:]
l2.append(obj)
return l2, pv.append(obj)
@rule(target=sequences, start=sequences, end=sequences)
@verify_inputs_unmodified
def extend(self, start, end):
"""
Extend a pair of sequences with another pair of sequences.
"""
l, pv = start
l2, pv2 = end
# compare() has O(N**2) behavior, so don't want too-large lists:
assume(len(l) + len(l2) < 50)
l3 = l[:]
l3.extend(l2)
return l3, pv.extend(pv2)
@rule(target=sequences, former=sequences, data=st.data())
@verify_inputs_unmodified
def remove(self, former, data):
"""
Remove an item from the sequences.
"""
l, pv = former
assume(l)
l2 = l[:]
i = data.draw(st.sampled_from(range(len(l))))
del l2[i]
return l2, pv.delete(i)
@rule(target=sequences, former=sequences, data=st.data())
@verify_inputs_unmodified
def set(self, former, data):
"""
Overwrite an item in the sequence.
"""
l, pv = former
assume(l)
l2 = l[:]
i = data.draw(st.sampled_from(range(len(l))))
obj = RefCountTracker()
l2[i] = obj
return l2, pv.set(i, obj)
@rule(target=sequences, former=sequences, data=st.data())
@verify_inputs_unmodified
def transform_set(self, former, data):
"""
Transform the sequence by setting value.
"""
l, pv = former
assume(l)
l2 = l[:]
i = data.draw(st.sampled_from(range(len(l))))
obj = RefCountTracker()
l2[i] = obj
return l2, pv.transform([i], obj)
@rule(target=sequences, former=sequences, data=st.data())
@verify_inputs_unmodified
def transform_discard(self, former, data):
"""
Transform the sequence by discarding a value.
"""
l, pv = former
assume(l)
l2 = l[:]
i = data.draw(st.sampled_from(range(len(l))))
del l2[i]
return l2, pv.transform([i], discard)
@rule(target=sequences, former=sequences, data=st.data())
@verify_inputs_unmodified
def subset(self, former, data):
"""
A subset of the previous sequence.
"""
l, pv = former
assume(l)
i = data.draw(st.sampled_from(range(len(l))))
j = data.draw(st.sampled_from(range(len(l))))
return l[i:j], pv[i:j]
@rule(pair=sequences)
@verify_inputs_unmodified
def compare(self, pair):
"""
The list and pvector must match.
"""
l, pv = pair
# compare() has O(N**2) behavior, so don't want too-large lists:
assume(len(l) < 50)
assert_equal(l, pv)
PVectorBuilderTests = PVectorBuilder.TestCase
class EvolverItem(PClass):
original_list = field()
original_pvector = field()
current_list = field()
current_evolver = field()
class PVectorEvolverBuilder(RuleBasedStateMachine):
"""
Build a list and matching pvector evolver step-by-step.
In each step in the state machine we do same operation on a list and
on a pvector evolver, and then when we're done we compare the two.
"""
sequences = Bundle("evolver_sequences")
@rule(target=sequences, start=PVectorAndLists)
def initial_value(self, start):
"""
Some initial values generated by a hypothesis strategy.
"""
l, pv = start
return EvolverItem(original_list=l,
original_pvector=pv,
current_list=l[:],
current_evolver=pv.evolver())
@rule(item=sequences)
def append(self, item):
"""
Append an item to the pair of sequences.
"""
obj = RefCountTracker()
item.current_list.append(obj)
item.current_evolver.append(obj)
@rule(start=sequences, end=sequences)
def extend(self, start, end):
"""
Extend a pair of sequences with another pair of sequences.
"""
# compare() has O(N**2) behavior, so don't want too-large lists:
assume(len(start.current_list) + len(end.current_list) < 50)
start.current_evolver.extend(end.current_list)
start.current_list.extend(end.current_list)
@rule(item=sequences, data=st.data())
def delete(self, item, data):
"""
Remove an item from the sequences.
"""
assume(item.current_list)
i = data.draw(st.sampled_from(range(len(item.current_list))))
del item.current_list[i]
del item.current_evolver[i]
@rule(item=sequences, data=st.data())
def setitem(self, item, data):
"""
Overwrite an item in the sequence using ``__setitem__``.
"""
assume(item.current_list)
i = data.draw(st.sampled_from(range(len(item.current_list))))
obj = RefCountTracker()
item.current_list[i] = obj
item.current_evolver[i] = obj
@rule(item=sequences, data=st.data())
def set(self, item, data):
"""
Overwrite an item in the sequence using ``set``.
"""
assume(item.current_list)
i = data.draw(st.sampled_from(range(len(item.current_list))))
obj = RefCountTracker()
item.current_list[i] = obj
item.current_evolver.set(i, obj)
@rule(item=sequences)
def compare(self, item):
"""
The list and pvector evolver must match.
"""
item.current_evolver.is_dirty()
# compare() has O(N**2) behavior, so don't want too-large lists:
assume(len(item.current_list) < 50)
# original object unmodified
assert item.original_list == item.original_pvector
# evolver matches:
for i in range(len(item.current_evolver)):
assert item.current_list[i] == item.current_evolver[i]
# persistent version matches
assert_equal(item.current_list, item.current_evolver.persistent())
# original object still unmodified
assert item.original_list == item.original_pvector
PVectorEvolverBuilderTests = PVectorEvolverBuilder.TestCase
| mit | 1c9c533c9131d8d954937b101172fefc | 28.0625 | 74 | 0.59253 | 3.746819 | false | false | false | false |
tobgu/pyrsistent | benchmarks/pmap.py | 1 | 6371 | from pyperform import BenchmarkedFunction
from pyrsistent import pmap #!
class Benchmarked(BenchmarkedFunction):
def __init__(self, scale=1, *args, **kwargs):
super(Benchmarked, self).__init__(*args, timeit_number=scale*1000, **kwargs)
################# Create ###################
@Benchmarked()
def create_empty_pmap():
for x in range(1000):
_ = pmap()
@Benchmarked()
def reference_create_empty_dict():
for x in range(1000):
_ = dict()
def _small_dict():
small_dict = dict((i, i) for i in range(10))
def _large_dict():
large_dict = dict((i, i) for i in range(2000))
@Benchmarked(setup=_small_dict)
def create_small_pmap():
for x in range(100):
_ = pmap(small_dict)
@Benchmarked(setup=_small_dict)
def reference_create_small_dict():
for x in range(100):
_ = dict(small_dict)
@Benchmarked(setup=_large_dict)
def create_large_pmap():
for x in range(1):
_ = pmap(large_dict)
@Benchmarked(setup=_large_dict)
def reference_create_large_dict():
for x in range(1):
_ = dict(large_dict)
# ######################### Insert ######################
def _small_pmap():
small_pmap = pmap(dict((i, i) for i in range(10)))
@Benchmarked(setup=_small_pmap)
def random_replace_small_pmap():
for x in (9, 1, 4, 5, 7, 7, 3, 2):
small_pmap.set(x, x)
@Benchmarked(setup=_small_dict)
def reference_random_replace_small_dict():
for x in (9, 1, 4, 5, 7, 7, 3, 2):
small_dict[x] = x
def _large_pmap():
large_pmap = pmap(dict((i, i) for i in range(2000)))
@Benchmarked(setup=_large_pmap)
def random_replace_large_pmap():
for x in (999, 111, 74, 1233, 6, 1997, 400, 1000):
large_pmap.set(x, x)
@Benchmarked(setup=_large_dict)
def reference_random_replace_large_dict():
for x in (999, 111, 74, 1233, 6, 1997, 400, 1000):
large_dict[x] = x
@Benchmarked(setup=_small_pmap)
def random_replace_small_pmap_evolver():
e = small_pmap.evolver()
for x in (9, 1, 4, 5, 7, 7, 3, 2):
e[x] = x
m = e.persistent()
@Benchmarked(setup=_large_pmap)
def random_replace_large_pmap_evolver():
e = large_pmap.evolver()
for x in (999, 111, 74, 1233, 6, 1997, 400, 1000):
e[x] = x
m = e.persistent()
@Benchmarked(setup=_small_pmap)
def random_insert_new_small_pmap():
for x in (19, 11, 14, 15, 17, 117, 13, 12):
small_pmap.set(x, x)
@Benchmarked(setup=_small_dict)
def reference_random_insert_new_small_dict():
for x in (19, 11, 14, 15, 17, 117, 13, 12):
small_dict[x] = x
@Benchmarked(setup=_large_pmap)
def random_insert_new_large_pmap():
for x in (100999, 100111, 10074, 1001233, 1006, 1001997, 100400, 1001000):
large_pmap.set(x, x)
@Benchmarked(setup=_large_dict)
def reference_random_insert_new_large_dict():
for x in (100999, 100111, 10074, 1001233, 1006, 1001997, 100400, 1001000):
large_dict[x] = x
# ################## Read ########################
@Benchmarked(setup=_small_pmap)
def random_read_small_pmap():
for x in (9, 1, 4, 5, 7, 7, 3, 2):
y = small_pmap[x]
@Benchmarked(setup=_small_dict)
def reference_random_read_small_dict():
for x in (9, 1, 4, 5, 7, 7, 3, 2):
y = small_dict[x]
@Benchmarked(setup=_large_pmap)
def random_read_large_native_pvector():
for x in (999, 111, 74, 1233, 6, 1997, 400, 1000):
y = large_pmap[x]
@Benchmarked(setup=_large_dict)
def reference_random_read_large_list():
for x in (999, 111, 74, 1233, 6, 1997, 400, 1000):
y = large_dict[x]
# #################### Iteration #########################
@Benchmarked(setup=_large_pmap)
def iteration_large_pmap():
for k in large_pmap:
pass
@Benchmarked(setup=_large_dict)
def reference_iteration_large_dict():
for k in large_dict:
pass
# #################### Comparison ########################
def _different_pmaps_same_size():
pmap1 = pmap(dict((i, i) for i in range(2000)))
pmap2 = pmap(dict((i, i + 1) for i in range(2000)))
def _different_pmaps_different_size():
pmap1 = pmap(dict((i, i) for i in range(2000)))
pmap2 = pmap(dict((i, i + 1) for i in range(1500)))
def _equal_pmaps():
pmap1 = pmap(dict((i, i) for i in range(2000)))
pmap2 = pmap(dict((i, i) for i in range(2000)))
def _equal_pmap_and_dict():
dict1 = dict((i, i) for i in range(2000))
pmap1 = pmap(dict((i, i) for i in range(2000)))
def _equal_dicts():
dict1 = dict((i, i) for i in range(2000))
dict2 = dict((i, i) for i in range(2000))
def _different_dicts_same_size():
dict1 = dict((i, i) for i in range(2000))
dict2 = dict((i, i + 1) for i in range(2000))
def _different_dicts_different_size():
dict1 = dict((i, i) for i in range(2000))
dict2 = dict((i, i + 1) for i in range(2000))
def _equal_pmaps_different_bucket_size():
pmap1 = pmap(dict((i, i) for i in range(2000)), 1999)
pmap2 = pmap(dict((i, i) for i in range(2000)), 2000)
def _equal_pmaps_same_bucket_size_different_insertion_order():
pmap1 = pmap([(i, i) for i in range(2000)], 1999)
pmap2 = pmap([(i, i) for i in range(1999, -1, -1)], 1999)
@Benchmarked(setup=_large_pmap)
def compare_same_pmap():
large_pmap == large_pmap
@Benchmarked(setup=_large_dict)
def reference_compare_same_dict():
large_dict == large_dict
@Benchmarked(setup=_equal_pmaps)
def compare_equal_pmaps():
pmap1 == pmap2
@Benchmarked(setup=_equal_dicts)
def reference_compare_equal_dicts():
dict1 == dict2
@Benchmarked(setup=_equal_pmap_and_dict)
def compare_equal_pmap_and_dict():
pmap1 == dict1
@Benchmarked(setup=_equal_pmaps_same_bucket_size_different_insertion_order)
def compare_equal_pmaps_different_insertion_order():
pmap1 == pmap2
@Benchmarked(setup=_equal_pmaps_different_bucket_size)
def compare_equal_pmaps_different_bucket_size():
pmap1 == pmap2
@Benchmarked(setup=_different_pmaps_same_size)
def compare_different_pmaps_same_size():
pmap1 == pmap2
@Benchmarked(setup=_different_dicts_same_size)
def reference_compare_different_dicts_same_size():
dict1 == dict2
@Benchmarked(setup=_different_pmaps_different_size)
def compare_different_pmaps_different_size():
pmap1 == pmap2
@Benchmarked(setup=_different_dicts_different_size)
def reference_compare_different_dicts_different_size():
dict1 == dict2
| mit | 3b19fcf509a7744f7c6684d52f08cdf5 | 22.772388 | 84 | 0.616858 | 2.862084 | false | false | false | false |
phfaist/pylatexenc | pylatexenc/latexwalker/_defaultspecs.py | 1 | 17330 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Philippe Faist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Internal module. May change without notice.
from ..macrospec import std_macro, std_environment, std_specials, \
MacroSpec, EnvironmentSpec, MacroStandardArgsParser, \
VerbatimArgsParser, LstListingArgsParser
specs = [
#
# CATEGORY: latex-base
#
('latex-base', {
'macros': [
std_macro('documentclass', True, 1),
std_macro('usepackage', True, 1),
std_macro('RequirePackage', True, 1),
std_macro('selectlanguage', True, 1),
std_macro('setlength', True, 2),
std_macro('addlength', True, 2),
std_macro('setcounter', True, 2),
std_macro('addcounter', True, 2),
std_macro('newcommand', "*{[[{"),
std_macro('renewcommand', "*{[[{"),
std_macro('providecommand', "*{[[{"),
std_macro('newenvironment', "*{[[{{"),
std_macro('renewenvironment', "*{[[{{"),
std_macro('provideenvironment', "*{[[{{"),
std_macro('DeclareMathOperator', '*{{'),
std_macro('hspace', '*{'),
std_macro('vspace', '*{'),
MacroSpec('mbox',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
# \title, \author, \date
MacroSpec('title', '{'),
MacroSpec('author', '{'),
MacroSpec('date', '{'),
# (Note: single backslash) end of line with optional no-break ('*') and
# additional vertical spacing, e.g. \\*[2mm]
#
# Special for this command: don't allow an optional spacing argument
# [2mm] to be separated by spaces from the rest of the macro. This
# emulates the behavior in AMS environments, and avoids some errors;
# e.g. in "\begin{align} A=0 \\ [C,D]=0 \end{align}" the "[C,D]"
# does not get captured as an optional macro argument.
MacroSpec('\\',
args_parser=MacroStandardArgsParser('*[', optional_arg_no_space=True)),
std_macro('item', True, 0),
# \input{someotherfile}
std_macro('input', False, 1),
std_macro('include', False, 1),
std_macro('includegraphics', True, 1),
std_macro('chapter', '*[{'),
std_macro('section', '*[{'),
std_macro('subsection', '*[{'),
std_macro('subsubsection', '*[{'),
std_macro('pagagraph', '*[{'),
std_macro('subparagraph', '*[{'),
std_macro('bibliography', '{'),
std_macro('emph', False, 1),
MacroSpec('textrm',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('textit',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('textbf',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('textmd',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('textsc',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('textsf',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('textsl',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('texttt',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('textup',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
MacroSpec('text',
args_parser=MacroStandardArgsParser('{', args_math_mode=[False])),
std_macro('mathrm', False, 1), # only allowed in math mode anyway
std_macro('mathbb', False, 1), # only allowed in math mode anyway
std_macro('mathbf', False, 1),
std_macro('mathit', False, 1),
std_macro('mathsf', False, 1),
std_macro('mathtt', False, 1),
std_macro('mathcal', False, 1),
std_macro('mathscr', False, 1),
std_macro('mathfrak', False, 1),
std_macro('label', False, 1),
std_macro('ref', False, 1),
std_macro('autoref', False, 1),
std_macro('cref', False, 1),
std_macro('Cref', False, 1),
std_macro('eqref', False, 1),
std_macro('url', False, 1),
std_macro('hypersetup', False, 1),
std_macro('footnote', True, 1),
std_macro('keywords', False, 1),
std_macro('hphantom', True, 1),
std_macro('vphantom', True, 1),
std_macro("'", False, 1),
std_macro("`", False, 1),
std_macro('"', False, 1),
std_macro("c", False, 1),
std_macro("^", False, 1),
std_macro("~", False, 1),
std_macro("H", False, 1),
std_macro("k", False, 1),
std_macro("=", False, 1),
std_macro("b", False, 1),
std_macro(".", False, 1),
std_macro("d", False, 1),
std_macro("r", False, 1),
std_macro("u", False, 1),
std_macro("v", False, 1),
MacroSpec('ensuremath',
args_parser=MacroStandardArgsParser('{', args_math_mode=[True])),
std_macro("not", False, 1),
std_macro("vec", False, 1),
std_macro("dot", False, 1),
std_macro("hat", False, 1),
std_macro("check", False, 1),
std_macro("breve", False, 1),
std_macro("acute", False, 1),
std_macro("grave", False, 1),
std_macro("tilde", False, 1),
std_macro("bar", False, 1),
std_macro("ddot", False, 1),
std_macro('frac', False, 2),
std_macro('nicefrac', False, 2),
std_macro('sqrt', True, 1),
MacroSpec('overline', '{'),
MacroSpec('underline', '{'),
MacroSpec('widehat', '{'),
MacroSpec('widetilde', '{'),
MacroSpec('wideparen', '{'),
MacroSpec('overleftarrow', '{'),
MacroSpec('overrightarrow', '{'),
MacroSpec('overleftrightarrow', '{'),
MacroSpec('underleftarrow', '{'),
MacroSpec('underrightarrow', '{'),
MacroSpec('underleftrightarrow', '{'),
MacroSpec('overbrace', '{'),
MacroSpec('underbrace', '{'),
MacroSpec('overgroup', '{'),
MacroSpec('undergroup', '{'),
MacroSpec('overbracket', '{'),
MacroSpec('underbracket', '{'),
MacroSpec('overlinesegment', '{'),
MacroSpec('underlinesegment', '{'),
MacroSpec('overleftharpoon', '{'),
MacroSpec('overrightharpoon', '{'),
MacroSpec('xleftarrow', '[{'),
MacroSpec('xrightarrow', '[{'),
std_macro('ket', False, 1),
std_macro('bra', False, 1),
std_macro('braket', False, 2),
std_macro('ketbra', False, 2),
std_macro('texorpdfstring', False, 2),
# xcolor commands
MacroSpec('definecolor', '[{{{'),
MacroSpec('providecolor', '[{{{'),
MacroSpec('colorlet', '[{[{'),
MacroSpec('color', '[{'),
MacroSpec('textcolor', '[{{'),
MacroSpec('pagecolor', '[{'),
MacroSpec('nopagecolor', ''),
MacroSpec('colorbox', '[{{'),
MacroSpec('fcolorbox', '[{[{{'),
MacroSpec('boxframe', '{{{'),
MacroSpec('rowcolors', '*[{{{'),
],
'environments': [
# NOTE: Starred variants (as in \begin{equation*}) are not specified as
# for macros with an argspec='*'. Rather, we need to define a separate
# spec for the starred variant as the star really is part of the
# environment name. If you specify argspec='*', the parser will try to
# look for an expression of the form '\begin{equation}*'
std_environment('figure', '['),
std_environment('figure*', '['),
std_environment('table', '['),
std_environment('table*', '['),
std_environment('abstract', None),
std_environment('tabular', '{'),
std_environment('tabular*', '{{'),
std_environment('tabularx', '{[{'),
std_environment('array', '[{'),
std_environment('equation', None, is_math_mode=True),
std_environment('equation*', None, is_math_mode=True),
std_environment('eqnarray', None, is_math_mode=True),
std_environment('eqnarray*', None, is_math_mode=True),
# AMS environments
std_environment('align', None, is_math_mode=True),
std_environment('align*', None, is_math_mode=True),
std_environment('gather', None, is_math_mode=True),
std_environment('gather*', None, is_math_mode=True),
std_environment('flalign', None, is_math_mode=True),
std_environment('flalign*', None, is_math_mode=True),
std_environment('multline', None, is_math_mode=True),
std_environment('multline*', None, is_math_mode=True),
std_environment('alignat', '{', is_math_mode=True),
std_environment('alignat*', '{', is_math_mode=True),
std_environment('split', None, is_math_mode=True),
],
'specials': [
std_specials('&'),
# TODO --- for this, we need to parse their argument but don't use
# the standard args parser because we need to be able to
# accept arguments like "x_\mathrm{initial}"
#
#std_specials('^'),
#std_specials('_'),
]}),
#
# CATEGORY: nonascii-specials
#
('nonascii-specials', {
'macros': [],
'environments': [],
'specials': [
std_specials("~"),
# cf. https://tex.stackexchange.com/a/439652/32188 "fake ligatures":
std_specials('``'),
std_specials("''"),
std_specials("--"),
std_specials("---"),
std_specials("!`"),
std_specials("?`"),
]}),
#
# CATEGORY: verbatim
#
('verbatim', {
'macros': [
MacroSpec('verb',
args_parser=VerbatimArgsParser(verbatim_arg_type='verb-macro')),
],
'environments': [
EnvironmentSpec('verbatim',
args_parser=VerbatimArgsParser(verbatim_arg_type='verbatim-environment')),
],
'specials': [
# optionally users could include the specials "|" like in latex-doc
# for verbatim |\like \this|...
]}),
('lstlisting', {
'macros': [],
'environments': [
EnvironmentSpec('lstlisting', args_parser=LstListingArgsParser()),
],
'specials': [
# optionally users could include the specials "|" like in latex-doc
# for lstlisting |\like \this|...
]}),
#
# CATEGORY: theorems
#
('theorems', {
'macros': [],
'environments': [
std_environment('theorem', '['),
std_environment('proposition', '['),
std_environment('lemma', '['),
std_environment('corollary', '['),
std_environment('definition', '['),
std_environment('conjecture', '['),
std_environment('remark', '['),
#
std_environment('proof', '['),
# short names
std_environment('thm', '['),
std_environment('prop', '['),
std_environment('lem', '['),
std_environment('cor', '['),
std_environment('conj', '['),
std_environment('rem', '['),
std_environment('defn', '['),
],
'specials': [
]}),
#
# CATEGORY: enumitem
#
('enumitem', {
'macros': [],
'environments': [
std_environment('enumerate', '['),
std_environment('itemize', '['),
std_environment('description', '['),
],
'specials': [
]}),
#
# CATEGORY: natbib
#
('natbib', {
'macros': [
std_macro('cite', '*[[{'),
std_macro('citet', '*[[{'),
std_macro('citep', '*[[{'),
std_macro('citealt', '*[[{'),
std_macro('citealp', '*[[{'),
std_macro('citeauthor', '*[[{'),
std_macro('citefullauthor', '[[{'),
std_macro('citeyear', '[[{'),
std_macro('citeyearpar', '[[{'),
std_macro('Citet', '*[[{'),
std_macro('Citep', '*[[{'),
std_macro('Citealt', '*[[{'),
std_macro('Citealp', '*[[{'),
std_macro('Citeauthor', '*[[{'),
std_macro('citetext', '{'),
std_macro('citenum', '{'),
std_macro('defcitealias', '{{'),
std_macro('citetalias', '[[{'),
std_macro('citepalias', '[[{'),
],
'environments': [
],
'specials': [
]}),
#
# CATEGORY: latex-ethuebung
#
('latex-ethuebung', {
'macros': [
# ethuebung
std_macro('UebungLoesungFont', False, 1),
std_macro('UebungHinweisFont', False, 1),
std_macro('UebungExTitleFont', False, 1),
std_macro('UebungSubExTitleFont', False, 1),
std_macro('UebungTipsFont', False, 1),
std_macro('UebungLabel', False, 1),
std_macro('UebungSubLabel', False, 1),
std_macro('UebungLabelEnum', False, 1),
std_macro('UebungLabelEnumSub', False, 1),
std_macro('UebungSolLabel', False, 1),
std_macro('UebungHinweisLabel', False, 1),
std_macro('UebungHinweiseLabel', False, 1),
std_macro('UebungSolEquationLabel', False, 1),
std_macro('UebungTipsLabel', False, 1),
std_macro('UebungTipsEquationLabel', False, 1),
std_macro('UebungsblattTitleSeries', False, 1),
std_macro('UebungsblattTitleSolutions', False, 1),
std_macro('UebungsblattTitleTips', False, 1),
std_macro('UebungsblattNumber', False, 1),
std_macro('UebungsblattTitleFont', False, 1),
std_macro('UebungTitleCenterVSpacing', False, 1),
std_macro('UebungAttachedSolutionTitleTop', False, 1),
std_macro('UebungAttachedSolutionTitleFont', False, 1),
std_macro('UebungAttachedSolutionTitle', False, 1),
std_macro('UebungTextAttachedSolution', False, 1),
std_macro('UebungDueByLabel', False, 1),
std_macro('UebungDueBy', False, 1),
std_macro('UebungLecture', False, 1),
std_macro('UebungProf', False, 1),
std_macro('UebungLecturer', False, 1),
std_macro('UebungSemester', False, 1),
std_macro('UebungLogoFile', False, 1),
std_macro('UebungLanguage', False, 1),
std_macro('UebungStyle', False, 1),
#
std_macro('uebung', '{['),
std_macro('exercise', '{['),
std_macro('keywords', False, 1),
std_macro('subuebung', False, 1),
std_macro('subexercise', False, 1),
std_macro('pdfloesung', True, 1),
std_macro('pdfsolution', True, 1),
std_macro('exenumfulllabel', False, 1),
std_macro('hint', False, 1),
std_macro('hints', False, 1),
std_macro('hinweis', False, 1),
std_macro('hinweise', False, 1),
],
'environments': [
],
'specials': [
]
}),
]
| mit | 0141f2b3bc94ec469df61bb3f7adb066 | 36.755991 | 102 | 0.505309 | 3.935059 | false | false | false | false |
tobgu/pyrsistent | pyrsistent/_precord.py | 3 | 7032 | from pyrsistent._checked_types import CheckedType, _restore_pickle, InvariantException, store_invariants
from pyrsistent._field_common import (
set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
)
from pyrsistent._pmap import PMap, pmap
class _PRecordMeta(type):
def __new__(mcs, name, bases, dct):
set_fields(dct, bases, name='_precord_fields')
store_invariants(dct, bases, '_precord_invariants', '__invariant__')
dct['_precord_mandatory_fields'] = \
set(name for name, field in dct['_precord_fields'].items() if field.mandatory)
dct['_precord_initial_values'] = \
dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL)
dct['__slots__'] = ()
return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct)
class PRecord(PMap, CheckedType, metaclass=_PRecordMeta):
"""
A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
access using subscript notation.
More documentation and examples of PRecord usage is available at https://github.com/tobgu/pyrsistent
"""
def __new__(cls, **kwargs):
# Hack total! If these two special attributes exist that means we can create
# ourselves. Otherwise we need to go through the Evolver to create the structures
# for us.
if '_precord_size' in kwargs and '_precord_buckets' in kwargs:
return super(PRecord, cls).__new__(cls, kwargs['_precord_size'], kwargs['_precord_buckets'])
factory_fields = kwargs.pop('_factory_fields', None)
ignore_extra = kwargs.pop('_ignore_extra', False)
initial_values = kwargs
if cls._precord_initial_values:
initial_values = dict((k, v() if callable(v) else v)
for k, v in cls._precord_initial_values.items())
initial_values.update(kwargs)
e = _PRecordEvolver(cls, pmap(pre_size=len(cls._precord_fields)), _factory_fields=factory_fields, _ignore_extra=ignore_extra)
for k, v in initial_values.items():
e[k] = v
return e.persistent()
def set(self, *args, **kwargs):
"""
Set a field in the record. This set function differs slightly from that in the PMap
class. First of all it accepts key-value pairs. Second it accepts multiple key-value
pairs to perform one, atomic, update of multiple fields.
"""
# The PRecord set() can accept kwargs since all fields that have been declared are
# valid python identifiers. Also allow multiple fields to be set in one operation.
if args:
return super(PRecord, self).set(args[0], args[1])
return self.update(kwargs)
def evolver(self):
"""
Returns an evolver of this object.
"""
return _PRecordEvolver(self.__class__, self)
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
', '.join('{0}={1}'.format(k, repr(v)) for k, v in self.items()))
@classmethod
def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
"""
Factory method. Will create a new PRecord of the current type and assign the values
specified in kwargs.
:param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
in the set of fields on the PRecord.
"""
if isinstance(kwargs, cls):
return kwargs
if ignore_extra:
kwargs = {k: kwargs[k] for k in cls._precord_fields if k in kwargs}
return cls(_factory_fields=_factory_fields, _ignore_extra=ignore_extra, **kwargs)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, dict(self),)
def serialize(self, format=None):
"""
Serialize the current PRecord using custom serializer functions for fields where
such have been supplied.
"""
return dict((k, serialize(self._precord_fields[k].serializer, format, v)) for k, v in self.items())
class _PRecordEvolver(PMap._Evolver):
__slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields', '_factory_fields', '_ignore_extra')
def __init__(self, cls, original_pmap, _factory_fields=None, _ignore_extra=False):
super(_PRecordEvolver, self).__init__(original_pmap)
self._destination_cls = cls
self._invariant_error_codes = []
self._missing_fields = []
self._factory_fields = _factory_fields
self._ignore_extra = _ignore_extra
def __setitem__(self, key, original_value):
self.set(key, original_value)
def set(self, key, original_value):
field = self._destination_cls._precord_fields.get(key)
if field:
if self._factory_fields is None or field in self._factory_fields:
try:
if is_field_ignore_extra_complaint(PRecord, field, self._ignore_extra):
value = field.factory(original_value, ignore_extra=self._ignore_extra)
else:
value = field.factory(original_value)
except InvariantException as e:
self._invariant_error_codes += e.invariant_errors
self._missing_fields += e.missing_fields
return self
else:
value = original_value
check_type(self._destination_cls, field, key, value)
is_ok, error_code = field.invariant(value)
if not is_ok:
self._invariant_error_codes.append(error_code)
return super(_PRecordEvolver, self).set(key, value)
else:
raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__))
def persistent(self):
cls = self._destination_cls
is_dirty = self.is_dirty()
pm = super(_PRecordEvolver, self).persistent()
if is_dirty or not isinstance(pm, cls):
result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size)
else:
result = pm
if cls._precord_mandatory_fields:
self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f
in (cls._precord_mandatory_fields - set(result.keys())))
if self._invariant_error_codes or self._missing_fields:
raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields),
'Field invariant failed')
check_global_invariants(result, cls._precord_invariants)
return result
| mit | 3bac5bb94542f23954e8f52c1180ca00 | 41.107784 | 133 | 0.607651 | 4.07416 | false | false | false | false |
gwu-libraries/social-feed-manager | sfm/ui/migrations/0009_auto__add_rule.py | 2 | 3722 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Rule'
db.create_table('ui_rule', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=False)),
('people', self.gf('django.db.models.fields.TextField')(blank=True)),
('words', self.gf('django.db.models.fields.TextField')(blank=True)),
('locations', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('ui', ['Rule'])
def backwards(self, orm):
# Deleting model 'Rule'
db.delete_table('ui_rule')
models = {
'ui.rule': {
'Meta': {'object_name': 'Rule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'people': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'words': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'ui.status': {
'Meta': {'ordering': "['-date_published']", 'object_name': 'Status'},
'avatar_url': ('django.db.models.fields.TextField', [], {}),
'content': ('django.db.models.fields.TextField', [], {}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rule_match': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'rule_tag': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'status_id': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'user_id': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'ui.trenddaily': {
'Meta': {'ordering': "['-date', 'name']", 'object_name': 'TrendDaily'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'events': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'promoted_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {})
},
'ui.trendweekly': {
'Meta': {'ordering': "['-date', 'name']", 'object_name': 'TrendWeekly'},
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'events': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'promoted_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['ui'] | mit | 6b6ded71c810d3f852de6a5857b5f652 | 53.75 | 103 | 0.537346 | 3.674235 | false | false | false | false |
gwu-libraries/social-feed-manager | sfm/ui/migrations/0017_auto__del_trendweekly__del_trenddaily.py | 2 | 7557 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TrendWeekly'
db.delete_table(u'ui_trendweekly')
# Deleting model 'TrendDaily'
db.delete_table(u'ui_trenddaily')
def backwards(self, orm):
# Adding model 'TrendWeekly'
db.create_table(u'ui_trendweekly', (
('name', self.gf('django.db.models.fields.TextField')(db_index=True)),
('promoted_content', self.gf('django.db.models.fields.TextField')(blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateField')(db_index=True)),
('query', self.gf('django.db.models.fields.TextField')()),
('events', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('ui', ['TrendWeekly'])
# Adding model 'TrendDaily'
db.create_table(u'ui_trenddaily', (
('name', self.gf('django.db.models.fields.TextField')(db_index=True)),
('promoted_content', self.gf('django.db.models.fields.TextField')(blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('query', self.gf('django.db.models.fields.TextField')()),
('events', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('ui', ['TrendDaily'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ui.dailytwitteruseritemcount': {
'Meta': {'ordering': "['date']", 'object_name': 'DailyTwitterUserItemCount'},
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_tweets': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'twitter_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'daily_counts'", 'to': u"orm['ui.TwitterUser']"})
},
u'ui.rule': {
'Meta': {'object_name': 'Rule'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'people': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'words': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'ui.twitteruser': {
'Meta': {'object_name': 'TwitterUser'},
'date_last_checked': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
u'ui.twitteruseritem': {
'Meta': {'object_name': 'TwitterUserItem'},
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_json': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'item_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'place': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'twitter_id': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'unique': 'True'}),
'twitter_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['ui.TwitterUser']"})
}
}
complete_apps = ['ui'] | mit | 36d97e0c5ef91c05a0ef22152414b945 | 64.155172 | 187 | 0.55485 | 3.620987 | false | false | false | false |
choderalab/perses | perses/analysis/extract_trajectory.py | 1 | 1648 | """
Functions to extract trajectory from a perses relative calculation
"""
import numpy as np
import mdtraj as md
from perses.analysis.utils import open_netcdf
def get_hybrid_topology(file):
""" Extracts hybrid_topology object from the .npy file generated by relative calculations
Parameters
----------
file : string
name of, or path to .npy file
Returns
-------
phases : list
list of phases found in .npy file
topologies : list
list of hybrid_topology objects
"""
hybrid_factory = np.load(file)
hybrid_factory = hybrid_factory.flatten()[0]
phases = []
topologies = []
for phase in hybrid_factory.keys():
topologies.append(hybrid_factory[phase].hybrid_topology)
return phases, topologies
def get_positions(file):
ncfile = open_netcdf(file)
all_positions = ncfile.variables['positions']
results = []
for i,pos in enumerate(all_positions):
coords = []
pos = pos.tolist()
results.append(pos[0])
return results
def write_trajectory(positions, topology, outputfile='trajectory.pdb',center=True,offline=None):
if offline != None:
traj = md.Trajectory(positions[0::offline],topology)
else:
traj = md.Trajectory(positions, topology)
if center == True:
traj.center_coordinates()
traj.save_pdb(outputfile)
return
if __name__ == '__main__':
import sys
ncfilename = sys.argv[1]
factoryfilename = sys.argv[2]
positions = get_positions(ncfilename)
_, topology = get_hybrid_topology(factoryfilename)
write_trajectory(positions,topology[0])
| mit | e811eafb5d4046e0271808a74c101dd2 | 22.542857 | 96 | 0.651699 | 3.933174 | false | false | false | false |
choderalab/perses | perses/rjmc/geometry.py | 1 | 142485 | """
This contains the base class for the geometry engine, which proposes new positions
for each additional atom that must be added.
"""
from simtk import unit
import numpy as np
import networkx as nx
from perses.storage import NetCDFStorageView
################################################################################
# Initialize logging
################################################################################
import logging
logging.basicConfig(level=logging.NOTSET)
_logger = logging.getLogger("geometry")
_logger.setLevel(logging.INFO)
################################################################################
# Constants
################################################################################
LOG_ZERO = -1.0e+6
ENERGY_MISMATCH_RATIO_THRESHOLD = 1e-3
ENERGY_THRESHOLD = 1e-6
################################################################################
# Utility methods
################################################################################
def check_dimensionality(quantity, compatible_units):
"""
Ensure that the specified quantity has units compatible with specified unit.
Parameters
----------
quantity : simtk.unit.Quantity or float
The quantity to be checked
compatible_units : simtk.unit.Quantity or simtk.unit.Unit or float
Ensure ``quantity`` is either float or numpy array (if ``float`` specified) or is compatible with the specified units
Raises
------
ValueError if the specified quantity does not have the appropriate dimensionality or type
Returns
-------
is_compatible : bool
Returns True if dimensionality is as requested
"""
if unit.is_quantity(compatible_units) or unit.is_unit(compatible_units):
try:
from simtk.unit.quantity import is_dimensionless
except ModuleNotFoundError:
from openmm.unit.quantity import is_dimensionless
if not is_dimensionless(quantity / compatible_units):
raise ValueError('{} does not have units compatible with expected {}'.format(quantity, compatible_units))
elif compatible_units == float:
if not (isinstance(quantity, float) or isinstance(quantity, np.ndarray)):
raise ValueError("'{}' expected to be a float, but was instead {}".format(quantity, type(quantity)))
else:
raise ValueError("Don't know how to handle compatible_units of {}".format(compatible_units))
# Units are compatible if they pass this point
return True
class GeometryEngine(object):
"""
This is the base class for the geometry engine.
Parameters
----------
metadata : dict
GeometryEngine-related metadata as a dict
"""
def __init__(self, metadata=None, storage=None):
# TODO: Either this base constructor should be called by subclasses, or we should remove its arguments.
pass
def propose(self, top_proposal, current_positions, beta):
"""
Make a geometry proposal for the appropriate atoms.
Parameters
----------
top_proposal : TopologyProposal object
Object containing the relevant results of a topology proposal
beta : float
The inverse temperature
Returns
-------
new_positions : [n, 3] ndarray
The new positions of the system
"""
return np.array([0.0,0.0,0.0])
def logp_reverse(self, top_proposal, new_coordinates, old_coordinates, beta):
"""
Calculate the logp for the given geometry proposal
Parameters
----------
top_proposal : TopologyProposal object
Object containing the relevant results of a topology proposal
new_coordinates : [n, 3] np.ndarray
The coordinates of the system after the proposal
old_coordiantes : [n, 3] np.ndarray
The coordinates of the system before the proposal
direction : str, either 'forward' or 'reverse'
whether the transformation is for the forward NCMC move or the reverse
beta : float
The inverse temperature
Returns
-------
logp : float
The log probability of the proposal for the given transformation
"""
return 0.0
class FFAllAngleGeometryEngine(GeometryEngine):
"""
This is an implementation of GeometryEngine which uses all valence terms and OpenMM
Parameters
----------
use_sterics : bool, optional, default=False
If True, sterics will be used in proposals to minimize clashes.
This may significantly slow down the simulation, however.
n_bond_divisions : int, default 1000
number of bond divisions in choosing the r for added/deleted atoms
n_angle_divisions : int, default 180
number of bond angle divisions in choosing theta for added/deleted atoms
n_torsion_divisions : int, default 360
number of torsion angle divisons in choosing phi for added/deleted atoms
verbose: bool, default True
whether to be verbose in output
storage: bool (or None), default None
whether to use NetCDFStorage
bond_softening_constant : float (between 0, 1), default 1.0
how much to soften bonds
angle_softening_constant : float (between 0, 1), default 1.0
how much to soften angles
neglect_angles : bool, optional, default True
whether to ignore and report on theta angle potentials that add variance to the work
use_14_nonbondeds : bool, default True
whether to consider 1,4 exception interactions in the geometry proposal
NOTE: if this is set to true, then in the HybridTopologyFactory, the argument 'interpolate_old_and_new_14s' must be set to False; visa versa
TODO : remove Context objects and checks since they are clunky and no longer used for troubleshooting
"""
def __init__(self,
metadata=None,
use_sterics=False,
n_bond_divisions=1000,
n_angle_divisions=180,
n_torsion_divisions=360,
verbose=True,
storage=None,
bond_softening_constant=1.0,
angle_softening_constant=1.0,
neglect_angles = False,
use_14_nonbondeds = True):
self._metadata = metadata
self.write_proposal_pdb = False # if True, will write PDB for sequential atom placements
self.pdb_filename_prefix = 'geometry-proposal' # PDB file prefix for writing sequential atom placements
self.nproposed = 0 # number of times self.propose() has been called
self.verbose = verbose
self.use_sterics = use_sterics
self._use_14_nonbondeds = use_14_nonbondeds
# if self.use_sterics: #not currently supported
# raise Exception("steric contributions are not currently supported.")
self._n_bond_divisions = n_bond_divisions
self._n_angle_divisions = n_angle_divisions
self._n_torsion_divisions = n_torsion_divisions
self._bond_softening_constant = bond_softening_constant
self._angle_softening_constant = angle_softening_constant
if storage:
self._storage = NetCDFStorageView(modname="GeometryEngine", storage=storage)
else:
self._storage = None
self.neglect_angles = neglect_angles
def propose(self, top_proposal, current_positions, beta, validate_energy_bookkeeping = True):
"""
Make a geometry proposal for the appropriate atoms.
Parameters
----------
top_proposal : TopologyProposal object
Object containing the relevant results of a topology proposal
current_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The current positions
beta : simtk.unit.Quantity with units compatible with 1/(kilojoules_per_mole)
The inverse thermal energy
validate_energy_bookkeeping : bool
whether to validate the energy mismatch ratio; this is no longer strictly necessary, and will certainly fail if ring closure or non-conservative perturbations are conducted
(non-conservative transformations are defined as transformations wherein not _all_ of the valence energies are used to make topology proposals...)
Returns
-------
new_positions : [n, 3] ndarray
The new positions of the system
logp_proposal : float
The log probability of the forward-only proposal
"""
_logger.info("propose: performing forward proposal")
# Ensure positions have units compatible with nanometers
check_dimensionality(current_positions, unit.nanometers)
check_dimensionality(beta, unit.kilojoules_per_mole**(-1))
# TODO: Change this to use md_unit_system instead of hard-coding nanometers
if not top_proposal.unique_new_atoms:
_logger.info("propose: there are no unique new atoms; logp_proposal = 0.0.")
# If there are no unique new atoms, return new positions in correct order for new topology object and log probability of zero
# TODO: Carefully check this
import parmed
structure = parmed.openmm.load_topology(top_proposal.old_topology, top_proposal._old_system)
atoms_with_positions = [ structure.atoms[atom_idx] for atom_idx in top_proposal.new_to_old_atom_map.keys() ]
new_positions = self._copy_positions(atoms_with_positions, top_proposal, current_positions)
logp_proposal, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms = 0.0, None, None, None, None
self.forward_final_growth_system = None
else:
_logger.info("propose: unique new atoms detected; proceeding to _logp_propose...")
logp_proposal, new_positions, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms, omitted_terms = self._logp_propose(top_proposal, current_positions, beta, direction='forward', validate_energy_bookkeeping = validate_energy_bookkeeping)
self.nproposed += 1
check_dimensionality(new_positions, unit.nanometers)
check_dimensionality(logp_proposal, float)
#define forward attributes
self.forward_rjmc_info = rjmc_info
self.forward_atoms_with_positions_reduced_potential, self.forward_final_context_reduced_potential = atoms_with_positions_reduced_potential, final_context_reduced_potential
self.forward_neglected_angle_terms = neglected_angle_terms
return new_positions, logp_proposal
def logp_reverse(self, top_proposal, new_coordinates, old_coordinates, beta, validate_energy_bookkeeping = True):
"""
Calculate the logp for the given geometry proposal
Parameters
----------
top_proposal : TopologyProposal object
Object containing the relevant results of a topology proposal
new_coordinates : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The coordinates of the system after the proposal
old_coordiantes : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The coordinates of the system before the proposal
beta : simtk.unit.Quantity with units compatible with 1/(kilojoules_per_mole)
The inverse thermal energy
validate_energy_bookkeeping : bool
whether to validate the energy mismatch ratio; this is no longer strictly necessary, and will certainly fail if ring closure or non-conservative perturbations are conducted
(non-conservative transformations are defined as transformations wherein not _all_ of the valence energies are used to make topology proposals...)
Returns
-------
logp : float
The log probability of the proposal for the given transformation
"""
_logger.info("logp_reverse: performing reverse proposal")
check_dimensionality(new_coordinates, unit.nanometers)
check_dimensionality(old_coordinates, unit.nanometers)
check_dimensionality(beta, unit.kilojoules_per_mole**(-1))
# If there are no unique old atoms, the log probability is zero.
if not top_proposal.unique_old_atoms:
_logger.info("logp_reverse: there are no unique old atoms; logp_proposal = 0.0.")
#define reverse attributes
self.reverse_new_positions, self.reverse_rjmc_info, self.reverse_atoms_with_positions_reduced_potential, self.reverse_final_context_reduced_potential, self.reverse_neglected_angle_terms = None, None, None, None, None
self.reverse_final_growth_system = None
return 0.0
# Compute log proposal probability for reverse direction
_logger.info("logp_reverse: unique new atoms detected; proceeding to _logp_propose...")
logp_proposal, new_positions, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms, omitted_terms = self._logp_propose(top_proposal, old_coordinates, beta, new_positions=new_coordinates, direction='reverse', validate_energy_bookkeeping = validate_energy_bookkeeping)
self.reverse_new_positions, self.reverse_rjmc_info = new_positions, rjmc_info
self.reverse_atoms_with_positions_reduced_potential, self.reverse_final_context_reduced_potential = atoms_with_positions_reduced_potential, final_context_reduced_potential
self.reverse_neglected_angle_terms = neglected_angle_terms
check_dimensionality(logp_proposal, float)
return logp_proposal
def _write_partial_pdb(self, pdbfile, topology, positions, atoms_with_positions, model_index):
"""
Write the subset of the molecule for which positions are defined.
Parameters
----------
pdbfile : file-like object
The open file-like object for the PDB file being written
topology : simtk.openmm.Topology
The OpenMM Topology object
positions : simtk.unit.Quantity of shape (n_atoms, 3) with units compatible with nanometers
The positions
atoms_with_positions : list of parmed.Atom
parmed Atom objects for which positions have been defined
model_index : int
The MODEL index for the PDB file to use
"""
check_dimensionality(positions, unit.nanometers)
from simtk.openmm.app import Modeller
modeller = Modeller(topology, positions)
atom_indices_with_positions = [ atom.idx for atom in atoms_with_positions ]
atoms_to_delete = [ atom for atom in modeller.topology.atoms() if (atom.index not in atom_indices_with_positions) ]
modeller.delete(atoms_to_delete)
pdbfile.write('MODEL %5d\n' % model_index)
from simtk.openmm.app import PDBFile
PDBFile.writeFile(modeller.topology, modeller.positions, file=pdbfile)
pdbfile.flush()
pdbfile.write('ENDMDL\n')
def _logp_propose(self, top_proposal, old_positions, beta, new_positions=None, direction='forward',
validate_energy_bookkeeping=True, platform_name='CPU'):
"""
This is an INTERNAL function that handles both the proposal and the logp calculation,
to reduce code duplication. Whether it proposes or just calculates a logp is based on
the direction option. Note that with respect to "new" and "old" terms, "new" will always
mean the direction we are proposing (even in the reverse case), so that for a reverse proposal,
this function will still take the new coordinates as new_coordinates
Parameters
----------
top_proposal : topology_proposal.TopologyProposal object
topology proposal containing the relevant information
old_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The coordinates of the system before the proposal
beta : simtk.unit.Quantity with units compatible with 1/(kilojoules_per_mole)
The inverse thermal energy
new_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers, optional, default=None
The coordinates of the system after the proposal, or None for forward proposals
direction : str
Whether to make a proposal ('forward') or just calculate logp ('reverse')
validate_energy_bookkeeping : bool
whether to validate the energy mismatch ratio; this is no longer strictly necessary, and will certainly fail if ring closure or non-conservative perturbations are conducted
(non-conservative transformations are defined as transformations wherein not _all_ of the valence energies are used to make topology proposals...)
Returns
-------
logp_proposal : float
the logp of the proposal
new_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The new positions (same as input if direction='reverse')
rjmc_info: list
List of proposal information, of form [atom.idx, u_r, u_theta, r, theta, phi, logp_r, logp_theta, logp_phi, np.log(detJ), added_energy, proposal_prob]
atoms_with_positions_reduced_potential : float
energy of core atom configuration (i.e. before any proposal is made).
final_context_reduced_potential : float
enery of final system (corrected for valence-only and whether angles are neglected). In reverse regime, this is the old system.
neglected_angle_terms : list of ints
list of indices corresponding to the angle terms in the corresponding system that are neglected (i.e. which are to be
placed into the lambda perturbation scheme)
omitted_growth_terms : dict
dictionary of terms that have been omitted in the proposal
the dictionary carries indices corresponding to the new or old topology, depending on whether the proposal is forward, or reverse (respectively)
"""
_logger.info("Conducting forward proposal...")
import copy
from perses.dispersed.utils import compute_potential_components
# Ensure all parameters have the expected units
check_dimensionality(old_positions, unit.angstroms)
if new_positions is not None:
check_dimensionality(new_positions, unit.angstroms)
# Determine order in which atoms (and the torsions they are involved in) will be proposed
_logger.info("Computing proposal order with NetworkX...")
proposal_order_tool = NetworkXProposalOrder(top_proposal, direction=direction)
torsion_proposal_order, logp_choice, omitted_bonds = proposal_order_tool.determine_proposal_order()
atom_proposal_order = [ torsion[0] for torsion in torsion_proposal_order ]
# some logs for clarity
_logger.info(f"number of atoms to be placed: {len(atom_proposal_order)}")
_logger.info(f"Atom index proposal order is {atom_proposal_order}")
_logger.info(f"omitted_bonds: {omitted_bonds}")
growth_parameter_name = 'growth_stage'
if direction=="forward":
_logger.info("direction of proposal is forward; creating atoms_with_positions and new positions from old system/topology...")
# Find and copy known positions to match new topology
import parmed
structure = parmed.openmm.load_topology(top_proposal.new_topology, top_proposal.new_system)
atoms_with_positions = [structure.atoms[atom_idx] for atom_idx in top_proposal.new_to_old_atom_map.keys()]
new_positions = self._copy_positions(atoms_with_positions, top_proposal, old_positions)
self._new_posits = copy.deepcopy(new_positions)
# Create modified System object
_logger.info("creating growth system...")
growth_system_generator = GeometrySystemGenerator(top_proposal.new_system,
torsion_proposal_order,
omitted_bonds = omitted_bonds,
reference_topology = top_proposal._new_topology,
global_parameter_name=growth_parameter_name,
use_sterics=self.use_sterics,
neglect_angles = self.neglect_angles,
use_14_nonbondeds = self._use_14_nonbondeds)
growth_system = growth_system_generator.get_modified_system()
elif direction=='reverse':
_logger.info("direction of proposal is reverse; creating atoms_with_positions from old system/topology")
if new_positions is None:
raise ValueError("For reverse proposals, new_positions must not be none.")
# Find and copy known positions to match old topology
import parmed
structure = parmed.openmm.load_topology(top_proposal.old_topology, top_proposal.old_system)
atoms_with_positions = [structure.atoms[atom_idx] for atom_idx in top_proposal.old_to_new_atom_map.keys()]
# Create modified System object
_logger.info("creating growth system...")
growth_system_generator = GeometrySystemGenerator(top_proposal.old_system,
torsion_proposal_order,
omitted_bonds = omitted_bonds,
reference_topology = top_proposal._old_topology,
global_parameter_name=growth_parameter_name,
use_sterics=self.use_sterics,
neglect_angles = self.neglect_angles,
use_14_nonbondeds = self._use_14_nonbondeds)
growth_system = growth_system_generator.get_modified_system()
else:
raise ValueError("Parameter 'direction' must be forward or reverse")
# Define a system for the core atoms before new atoms are placed
self.atoms_with_positions_system = growth_system_generator._atoms_with_positions_system
self.growth_system = growth_system
# Get the angle terms that are neglected from the growth system
neglected_angle_terms = growth_system_generator.neglected_angle_terms
_logger.info(f"neglected angle terms include {neglected_angle_terms}")
# Rename the logp_choice from the NetworkXProposalOrder for the purpose of adding logPs in the growth stage
logp_proposal = np.sum(np.array(logp_choice))
_logger.info(f"log probability choice of torsions and atom order: {logp_proposal}")
if self._storage:
self._storage.write_object("{}_proposal_order".format(direction), proposal_order_tool, iteration=self.nproposed)
# Create an OpenMM context
from simtk import openmm
from perses.dispersed.utils import configure_platform
_logger.info("creating platform, integrators, and contexts; setting growth parameter")
platform = configure_platform(platform_name, fallback_platform_name='Reference', precision='double')
integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
atoms_with_positions_system_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
final_system_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
context = openmm.Context(growth_system, integrator, platform)
growth_system_generator.set_growth_parameter_index(len(atom_proposal_order)+1, context)
#create final growth contexts for nonalchemical perturbations...
if direction == 'forward':
self.forward_final_growth_system = copy.deepcopy(context.getSystem())
elif direction == 'reverse':
self.reverse_final_growth_system = copy.deepcopy(context.getSystem())
growth_parameter_value = 1 # Initialize the growth_parameter value before the atom placement loop
# In the forward direction, atoms_with_positions_system considers the atoms_with_positions
# In the reverse direction, atoms_with_positions_system considers the old_positions of atoms in the
atoms_with_positions_context = openmm.Context(self.atoms_with_positions_system, atoms_with_positions_system_integrator, platform)
if direction == 'forward':
_logger.info("setting atoms_with_positions context new positions")
atoms_with_positions_context.setPositions(new_positions)
else:
_logger.info("setting atoms_with_positions context old positions")
atoms_with_positions_context.setPositions(old_positions)
#Print the energy of the system before unique_new/old atoms are placed...
state = atoms_with_positions_context.getState(getEnergy=True)
atoms_with_positions_reduced_potential = beta*state.getPotentialEnergy()
atoms_with_positions_reduced_potential_components = compute_potential_components(atoms_with_positions_context,
platform=platform)
_logger.debug(f'atoms_with_positions_reduced_potential_components:')
for f, e in atoms_with_positions_reduced_potential_components.items():
_logger.debug(f'\t{f} : {e}')
atoms_with_positions_methods_differences = abs(atoms_with_positions_reduced_potential -
sum(atoms_with_positions_reduced_potential_components.values()))
_logger.debug(f'Diffence in energy on adding unique atoms: {atoms_with_positions_methods_differences}')
assert atoms_with_positions_methods_differences < \
ENERGY_THRESHOLD, f"the difference between the atoms_with_positions_reduced_potential and the sum of " \
f"atoms_with_positions_reduced_potential_components is" \
f" {atoms_with_positions_methods_differences}"
# Place each atom in predetermined order
_logger.info("There are {} new atoms".format(len(atom_proposal_order)))
rjmc_info = list()
energy_logger = [] #for bookkeeping per_atom energy reduced potentials
for torsion_atom_indices, proposal_prob in zip(torsion_proposal_order, logp_choice):
_logger.debug(f"Proposing torsion {torsion_atom_indices} with proposal probability {proposal_prob}")
# Get parmed Structure Atom objects associated with torsion
atom, bond_atom, angle_atom, torsion_atom = [ structure.atoms[index] for index in torsion_atom_indices ]
# Activate the new atom interactions
growth_system_generator.set_growth_parameter_index(growth_parameter_value, context=context)
# Get internal coordinates if direction is reverse
if direction == 'reverse':
atom_coords, bond_coords, angle_coords, torsion_coords = [ old_positions[index] for index in torsion_atom_indices ]
internal_coordinates, detJ = self._cartesian_to_internal(atom_coords, bond_coords, angle_coords, torsion_coords)
# Extract dimensionless internal coordinates
r, theta, phi = internal_coordinates[0], internal_coordinates[1], internal_coordinates[2] # dimensionless
_logger.debug(f"\treverse proposal: r = {r}; theta = {theta}; phi = {phi}")
bond = self._get_relevant_bond(atom, bond_atom)
if bond is not None:
if direction == 'forward':
r = self._propose_bond(bond, beta, self._n_bond_divisions)
_logger.debug(f"\tproposing forward bond of {r}.")
logp_r = self._bond_logp(r, bond, beta, self._n_bond_divisions)
_logger.debug(f"\tlogp_r = {logp_r}.")
# Retrieve relevant quantities for valence bond and compute u_r
r0, k = bond.type.req, bond.type.k * self._bond_softening_constant
sigma_r = unit.sqrt((1.0/(beta*k)))
r0, k, sigma_r = r0.value_in_unit_system(unit.md_unit_system), k.value_in_unit_system(unit.md_unit_system), sigma_r.value_in_unit_system(unit.md_unit_system)
u_r = 0.5*((r - r0)/sigma_r)**2
_logger.debug(f"\treduced r potential = {u_r}.")
else:
if direction == 'forward':
constraint = self._get_bond_constraint(atom, bond_atom, top_proposal.new_system)
if constraint is None:
raise ValueError("Structure contains a topological bond [%s - %s] with no constraint or bond information." % (str(atom), str(bond_atom)))
r = constraint.value_in_unit_system(unit.md_unit_system) #set bond length to exactly constraint
_logger.debug(f"\tproposing forward constrained bond of {r} with log probability of 0.0 and implied u_r of 0.0.")
logp_r = 0.0
u_r = 0.0
# Propose an angle and calculate its log probability
angle = self._get_relevant_angle(atom, bond_atom, angle_atom)
if direction=='forward':
theta = self._propose_angle(angle, beta, self._n_angle_divisions)
_logger.debug(f"\tproposing forward angle of {theta}.")
logp_theta = self._angle_logp(theta, angle, beta, self._n_angle_divisions)
_logger.debug(f"\t logp_theta = {logp_theta}.")
# Retrieve relevant quantities for valence angle and compute u_theta
theta0, k = angle.type.theteq, angle.type.k * self._angle_softening_constant
sigma_theta = unit.sqrt(1.0/(beta * k))
theta0, k, sigma_theta = theta0.value_in_unit_system(unit.md_unit_system), k.value_in_unit_system(unit.md_unit_system), sigma_theta.value_in_unit_system(unit.md_unit_system)
u_theta = 0.5*((theta - theta0)/sigma_theta)**2
_logger.info(f"\treduced angle potential = {u_theta}.")
# Propose a torsion angle and calcualate its log probability
if direction=='forward':
# Note that (r, theta) are dimensionless here
phi, logp_phi = self._propose_torsion(context, torsion_atom_indices, new_positions, r, theta, beta, self._n_torsion_divisions)
xyz, detJ = self._internal_to_cartesian(new_positions[bond_atom.idx], new_positions[angle_atom.idx], new_positions[torsion_atom.idx], r, theta, phi)
new_positions[atom.idx] = xyz
_logger.debug(f"\tproposing forward torsion of {phi}.")
_logger.debug(f"\tsetting new_positions[{atom.idx}] to {xyz}. ")
else:
old_positions_for_torsion = copy.deepcopy(old_positions)
# Note that (r, theta, phi) are dimensionless here
logp_phi = self._torsion_logp(context, torsion_atom_indices, old_positions_for_torsion, r, theta, phi, beta, self._n_torsion_divisions)
_logger.debug(f"\tlogp_phi = {logp_phi}")
# Compute potential energy
if direction == 'forward':
context.setPositions(new_positions)
else:
context.setPositions(old_positions)
state = context.getState(getEnergy=True)
reduced_potential_energy = beta*state.getPotentialEnergy()
_logger.debug(f"\taccumulated growth context reduced energy = {reduced_potential_energy}")
#Compute change in energy from previous reduced potential
if growth_parameter_value == 1: # then there is no previous reduced potential so u_phi is simply reduced_potential_energy - u_r - u_theta
added_energy = reduced_potential_energy
else:
previous_reduced_potential_energy = energy_logger[-1]
added_energy = reduced_potential_energy - previous_reduced_potential_energy
_logger.debug(f"growth index {growth_parameter_value} added reduced energy = {added_energy}.")
atom_placement_dict = {'atom_index': atom.idx,
'u_r': u_r,
'u_theta' : u_theta,
'r': r,
'theta': theta,
'phi': phi,
'logp_r': logp_r,
'logp_theta': logp_theta,
'logp_phi': logp_phi,
'log_detJ': np.log(detJ),
'added_energy': added_energy,
'proposal_prob': proposal_prob}
rjmc_info.append(atom_placement_dict)
logp_proposal += logp_r + logp_theta + logp_phi - np.log(detJ) # TODO: Check sign of detJ
growth_parameter_value += 1
energy_logger.append(reduced_potential_energy)
# DEBUG: Write PDB file for placed atoms
atoms_with_positions.append(atom)
_logger.debug(f"\tatom placed, rjmc_info list updated, and growth_parameter_value incremented.")
# assert that the energy of the new positions is ~= atoms_with_positions_reduced_potential + reduced_potential_energy
# The final context is treated in the same way as the atoms_with_positions_context
if direction == 'forward': #if the direction is forward, the final system for comparison is top_proposal's new system
_system, _positions = top_proposal._new_system, new_positions
else:
_system, _positions = top_proposal._old_system, old_positions
if not self.use_sterics:
final_system = self._define_no_nb_system(_system, neglected_angle_terms, atom_proposal_order)
_logger.info(f"{direction} final system defined with {len(neglected_angle_terms)} neglected angles.")
else:
final_system = copy.deepcopy(_system)
force_names = {force.__class__.__name__ : index for index, force in enumerate(final_system.getForces())}
if 'NonbondedForce' in force_names.keys():
final_system.getForce(force_names['NonbondedForce']).setUseDispersionCorrection(False)
_logger.info(f"{direction} final system defined with nonbonded interactions.")
final_context = openmm.Context(final_system, final_system_integrator, platform)
final_context.setPositions(_positions)
state = final_context.getState(getEnergy=True)
final_context_reduced_potential = beta*state.getPotentialEnergy()
final_context_components = [(force, energy*beta) for force, energy in
compute_potential_components(final_context, platform=platform).items()]
atoms_with_positions_reduced_potential_components = [
(force, energy*beta) for force, energy in compute_potential_components(atoms_with_positions_context,
platform=platform).items()
]
_logger.debug(f"reduced potential components before atom placement:")
for item in atoms_with_positions_reduced_potential_components:
_logger.debug(f"\t\t{item[0]}: {item[1]}")
_logger.info(f"total reduced potential before atom placement: {atoms_with_positions_reduced_potential}")
_logger.debug(f"potential components added from growth system:")
added_energy_components = [(force, energy*beta) for force, energy in
compute_potential_components(context, platform=platform).items()]
for item in added_energy_components:
_logger.debug(f"\t\t{item[0]}: {item[1]}")
# now for the corrected reduced_potential_energy
if direction == 'forward':
positions = new_positions
else:
positions = old_positions
reduced_potential_energy = self._corrected_reduced_potential(growth_system_generator, positions, platform_name, atom_proposal_order, beta)
_logger.info(f"total reduced energy added from growth system: {reduced_potential_energy}")
_logger.debug(f"reduced potential of final system:")
for item in final_context_components:
_logger.debug(f"\t\t{item[0]}: {item[1]}")
_logger.info(f"final reduced energy {final_context_reduced_potential}")
_logger.info(f"sum of energies: {atoms_with_positions_reduced_potential + reduced_potential_energy}")
_logger.info(f"magnitude of difference in the energies: {abs(final_context_reduced_potential - atoms_with_positions_reduced_potential - reduced_potential_energy)}")
energy_mismatch_ratio = (atoms_with_positions_reduced_potential + reduced_potential_energy) / (final_context_reduced_potential)
if validate_energy_bookkeeping:
assert (energy_mismatch_ratio < ENERGY_MISMATCH_RATIO_THRESHOLD + 1) and (energy_mismatch_ratio > 1 - ENERGY_MISMATCH_RATIO_THRESHOLD) , f"The ratio of the calculated final energy to the true final energy is {energy_mismatch_ratio}"
# Final log proposal:
_logger.info("Final logp_proposal: {}".format(logp_proposal))
# Clean up OpenMM Context since garbage collector is sometimes slow
del context; del atoms_with_positions_context; del final_context
del integrator; del atoms_with_positions_system_integrator; del final_system_integrator
check_dimensionality(logp_proposal, float)
check_dimensionality(new_positions, unit.nanometers)
omitted_growth_terms = growth_system_generator.omitted_growth_terms
if self.use_sterics:
return logp_proposal, new_positions, rjmc_info, 0.0, reduced_potential_energy, [], omitted_growth_terms
return logp_proposal, new_positions, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms, omitted_growth_terms
def _corrected_reduced_potential(self, growth_system_generator, positions, platform_name, atom_proposal_order, beta):
"""
in order to compute the properly-bookkept energy mismatch, we must define a growth system without the biasing torsions
"""
import copy
from simtk import openmm
from perses.dispersed.utils import compute_potential_components
_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
growth_system = copy.deepcopy(growth_system_generator.get_modified_system())
#the last thing to do for bookkeeping is to delete the torsion force associated with the extra ring-closing and chirality restraints
#first, we see if there are two CustomTorsionForce objects...
custom_torsion_forces = [force_index for force_index in range(growth_system.getNumForces()) if growth_system.getForce(force_index).__class__.__name__ == 'CustomTorsionForce']
if len(custom_torsion_forces) == 2:
_logger.debug(f"\tfound 2 custom torsion forces")
#then the first one is the normal growth torsion force object and the second is the added torsion force object used to handle chirality and ring-closing constraints
growth_system.removeForce(max(custom_torsion_forces))
from perses.dispersed.utils import configure_platform
platform = configure_platform(platform_name, fallback_platform_name='Reference', precision='double')
mod_context = openmm.Context(growth_system, _integrator, platform)
growth_system_generator.set_growth_parameter_index(len(atom_proposal_order)+1, mod_context)
mod_context.setPositions(positions)
mod_state = mod_context.getState(getEnergy=True)
modified_reduced_potential_energy = beta * mod_state.getPotentialEnergy()
added_energy_components = compute_potential_components(mod_context, platform=platform)
print(f"added energy components: {added_energy_components}")
# Explicitly clean up context memory allocation
del mod_context
return modified_reduced_potential_energy
def _define_no_nb_system(self,
system,
neglected_angle_terms,
atom_proposal_order):
"""
This is a quick internal function to generate a final system for an assertion comparison with the energy added in the geometry proposal to the final
energy. Specifically, this function generates a final system (neglecting nonbonded interactions and specified valence terms)
Parameters
----------
system : openmm.app.System object
system of the target (from the topology proposal), which should include all valence, steric, and electrostatic terms
neglected_angle_terms : list of ints
list of HarmonicAngleForce indices corresponding to the neglected terms
Returns
-------
final_system : openmm.app.System object
final system for energy comparison
"""
import copy
from simtk import unit
no_nb_system = copy.deepcopy(system)
_logger.info("\tbeginning construction of no_nonbonded final system...")
_logger.info(f"\tinitial no-nonbonded final system forces {[force.__class__.__name__ for force in list(no_nb_system.getForces())]}")
num_forces = no_nb_system.getNumForces()
for index in reversed(range(num_forces)):
force = no_nb_system.getForce(index)
if force.__class__.__name__ == 'NonbondedForce' or force.__class__.__name__ == 'MonteCarloBarostat':
if self._use_14_nonbondeds and force.__class__.__name__ == 'NonbondedForce':
for particle_index in range(force.getNumParticles()):
[charge, sigma, epsilon] = force.getParticleParameters(particle_index)
force.setParticleParameters(particle_index, charge*0.0, sigma, epsilon*0.0)
for exception_index in range(force.getNumExceptions()):
p1, p2, chargeprod, sigma, epsilon = force.getExceptionParameters(exception_index)
if len(set(atom_proposal_order).intersection(set([p1, p2]))) == 0: #there is no growth index in this exception, so we
force.setExceptionParameters(exception_index, p1, p2, chargeProd = chargeprod * 0.0, sigma = sigma, epsilon = epsilon * 0.0)
else:
no_nb_system.removeForce(index)
elif force.__class__.__name__ == 'HarmonicAngleForce':
num_angles = force.getNumAngles()
for angle_idx in neglected_angle_terms:
p1, p2, p3, theta0, K = force.getAngleParameters(angle_idx)
force.setAngleParameters(angle_idx, p1, p2, p3, theta0, unit.Quantity(value=0.0, unit=unit.kilojoule/(unit.mole*unit.radian**2)))
# #the last thing to do for bookkeeping is to delete the torsion force associated with the extra ring-closing and chirality restraints
#
# #first, we see if there are two CustomTorsionForce objects...
# custom_torsion_forces = [force_index for force_index in range(no_nb_system.getNumForces()) if no_nb_system.getForce(force_index).__class__.__name__ == 'CustomTorsionForce']
# if len(custom_torsion_forces) == 2:
# #then the first one is the normal growth torsion force object and the second is the added torsion force object used to handle chirality and ring-closing constraints
# no_nb_system.removeForce(max(custom_torsion_forces))
forces = { no_nb_system.getForce(index).__class__.__name__ : no_nb_system.getForce(index) for index in range(no_nb_system.getNumForces()) }
_logger.info(f"\tfinal no-nonbonded final system forces {forces.keys()}")
#bonds
bond_forces = forces['HarmonicBondForce']
_logger.info(f"\tthere are {bond_forces.getNumBonds()} bond forces in the no-nonbonded final system")
#angles
angle_forces = forces['HarmonicAngleForce']
_logger.info(f"\tthere are {angle_forces.getNumAngles()} angle forces in the no-nonbonded final system")
#torsions
torsion_forces = forces['PeriodicTorsionForce']
_logger.info(f"\tthere are {torsion_forces.getNumTorsions()} torsion forces in the no-nonbonded final system")
return no_nb_system
def _copy_positions(self, atoms_with_positions, top_proposal, current_positions):
"""
Copy the current positions to an array that will also hold new positions
Parameters
----------
atoms_with_positions : list of parmed.Atom
parmed Atom objects denoting atoms that currently have positions
top_proposal : topology_proposal.TopologyProposal
topology proposal object
current_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
Positions of the current system
Returns
-------
new_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
New positions for new topology object with known positions filled in
"""
check_dimensionality(current_positions, unit.nanometers)
# Create new positions
new_shape = [top_proposal.n_atoms_new, 3]
# Workaround for CustomAngleForce NaNs: Create random non-zero positions for new atoms.
new_positions = unit.Quantity(np.random.random(new_shape), unit=unit.nanometers)
# Copy positions for atoms that have them defined
for atom in atoms_with_positions:
old_index = top_proposal.new_to_old_atom_map[atom.idx]
new_positions[atom.idx] = current_positions[old_index]
check_dimensionality(new_positions, unit.nanometers)
return new_positions
def _get_relevant_bond(self, atom1, atom2):
"""
Get parmaeters defining the bond connecting two atoms
Parameters
----------
atom1 : parmed.Atom
One of the atoms in the bond
atom2 : parmed.Atom object
The other atom in the bond
Returns
-------
bond : parmed.Bond with units modified to simtk.unit.Quantity
Bond connecting the two atoms, or None if constrained or no bond term exists.
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
bonds_1 = set(atom1.bonds)
bonds_2 = set(atom2.bonds)
relevant_bond_set = bonds_1.intersection(bonds_2)
relevant_bond = relevant_bond_set.pop()
if relevant_bond.type is None:
return None
relevant_bond_with_units = self._add_bond_units(relevant_bond)
check_dimensionality(relevant_bond_with_units.type.req, unit.nanometers)
check_dimensionality(relevant_bond_with_units.type.k, unit.kilojoules_per_mole/unit.nanometers**2)
return relevant_bond_with_units
def _get_bond_constraint(self, atom1, atom2, system):
"""
Get constraint parameters corresponding to the bond between the given atoms
Parameters
----------
atom1 : parmed.Atom
The first atom of the constrained bond
atom2 : parmed.Atom
The second atom of the constrained bond
system : openmm.System
The system containing the constraint
Returns
-------
constraint : simtk.unit.Quantity or None
If a constraint is defined between the two atoms, the length is returned; otherwise None
"""
# TODO: This algorithm is incredibly inefficient.
# Instead, generate a dictionary lookup of constrained distances.
atom_indices = set([atom1.idx, atom2.idx])
n_constraints = system.getNumConstraints()
constraint = None
for i in range(n_constraints):
p1, p2, length = system.getConstraintParameters(i)
constraint_atoms = set([p1, p2])
if len(constraint_atoms.intersection(atom_indices))==2:
constraint = length
if constraint is not None:
check_dimensionality(constraint, unit.nanometers)
return constraint
def _get_relevant_angle(self, atom1, atom2, atom3):
"""
Get the angle containing the 3 given atoms
Parameters
----------
atom1 : parmed.Atom
The first atom defining the angle
atom2 : parmed.Atom
The second atom defining the angle
atom3 : parmed.Atom
The third atom in the angle
Returns
-------
relevant_angle_with_units : parmed.Angle with parmeters modified to be simtk.unit.Quantity
Angle connecting the three atoms
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
atom1_angles = set(atom1.angles)
atom2_angles = set(atom2.angles)
atom3_angles = set(atom3.angles)
relevant_angle_set = atom1_angles.intersection(atom2_angles, atom3_angles)
# DEBUG
if len(relevant_angle_set) == 0:
print('atom1_angles:')
print(atom1_angles)
print('atom2_angles:')
print(atom2_angles)
print('atom3_angles:')
print(atom3_angles)
raise Exception('Atoms %s-%s-%s do not share a parmed Angle term' % (atom1, atom2, atom3))
relevant_angle = relevant_angle_set.pop()
if type(relevant_angle.type.k) != unit.Quantity:
relevant_angle_with_units = self._add_angle_units(relevant_angle)
else:
relevant_angle_with_units = relevant_angle
check_dimensionality(relevant_angle.type.theteq, unit.radians)
check_dimensionality(relevant_angle.type.k, unit.kilojoules_per_mole/unit.radians**2)
return relevant_angle_with_units
def _add_bond_units(self, bond):
"""
Attach units to a parmed harmonic bond
Parameters
----------
bond : parmed.Bond
The bond object whose paramters will be converted to unit-bearing quantities
Returns
-------
bond : parmed.Bond with units modified to simtk.unit.Quantity
The same modified Bond object that was passed in
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
# TODO: Shouldn't we be making a deep copy?
# If already promoted to unit-bearing quantities, return the object
if type(bond.type.k)==unit.Quantity:
return bond
# Add parmed units
# TODO: Get rid of this, and just operate on the OpenMM System instead
bond.type.req = unit.Quantity(bond.type.req, unit=unit.angstrom)
bond.type.k = unit.Quantity(2.0*bond.type.k, unit=unit.kilocalorie_per_mole/unit.angstrom**2)
return bond
def _add_angle_units(self, angle):
"""
Attach units to parmed harmonic angle
Parameters
----------
angle : parmed.Angle
The angle object whose paramters will be converted to unit-bearing quantities
Returns
-------
angle : parmed.Angle with units modified to simtk.unit.Quantity
The same modified Angle object that was passed in
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
# TODO: Shouldn't we be making a deep copy?
# If already promoted to unit-bearing quantities, return the object
if type(angle.type.k)==unit.Quantity:
return angle
# Add parmed units
# TODO: Get rid of this, and just operate on the OpenMM System instead
angle.type.theteq = unit.Quantity(angle.type.theteq, unit=unit.degree)
angle.type.k = unit.Quantity(2.0*angle.type.k, unit=unit.kilocalorie_per_mole/unit.radian**2)
return angle
def _add_torsion_units(self, torsion):
"""
Add the correct units to a torsion
Parameters
----------
torsion : parmed.Torsion
The angle object whose paramters will be converted to unit-bearing quantities
Returns
-------
torsion : parmed.Torsion with units modified to simtk.unit.Quantity
The same modified Torsion object that was passed in
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
# TODO: Shouldn't we be making a deep copy?
# If already promoted to unit-bearing quantities, return the object
if type(torsion.type.phi_k) == unit.Quantity:
return torsion
# Add parmed units
# TODO: Get rid of this, and just operate on the OpenMM System instead
torsion.type.phi_k = unit.Quantity(torsion.type.phi_k, unit=unit.kilocalorie_per_mole)
torsion.type.phase = unit.Quantity(torsion.type.phase, unit=unit.degree)
return torsion
def _rotation_matrix(self, axis, angle):
"""
Compute a rotation matrix about the origin given a coordinate axis and an angle.
Parameters
----------
axis : ndarray of shape (3,) without units
The axis about which rotation should occur
angle : float (implicitly in radians)
The angle of rotation about the axis
Returns
-------
rotation_matrix : ndarray of shape (3,3) without units
The 3x3 rotation matrix
"""
axis = axis/np.linalg.norm(axis)
axis_squared = np.square(axis)
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
rot_matrix_row_one = np.array([cos_angle+axis_squared[0]*(1-cos_angle),
axis[0]*axis[1]*(1-cos_angle) - axis[2]*sin_angle,
axis[0]*axis[2]*(1-cos_angle)+axis[1]*sin_angle])
rot_matrix_row_two = np.array([axis[1]*axis[0]*(1-cos_angle)+axis[2]*sin_angle,
cos_angle+axis_squared[1]*(1-cos_angle),
axis[1]*axis[2]*(1-cos_angle) - axis[0]*sin_angle])
rot_matrix_row_three = np.array([axis[2]*axis[0]*(1-cos_angle)-axis[1]*sin_angle,
axis[2]*axis[1]*(1-cos_angle)+axis[0]*sin_angle,
cos_angle+axis_squared[2]*(1-cos_angle)])
rotation_matrix = np.array([rot_matrix_row_one, rot_matrix_row_two, rot_matrix_row_three])
return rotation_matrix
def _cartesian_to_internal(self, atom_position, bond_position, angle_position, torsion_position):
"""
Cartesian to internal coordinate conversion
Parameters
----------
atom_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom whose internal coordinates are to be computed with respect to other atoms
bond_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with bond length ``r``
angle_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with angle ``theta``
torsion_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with torsion ``phi``
Returns
-------
internal_coords : tuple of (float, float, float)
Tuple representing (r, theta, phi):
r : float (implicitly in nanometers)
Bond length distance from ``bond_position`` to newly placed atom
theta : float (implicitly in radians on domain [0,pi])
Angle formed by ``(angle_position, bond_position, new_atom)``
phi : float (implicitly in radians on domain [-pi, +pi))
Torsion formed by ``(torsion_position, angle_position, bond_position, new_atom)``
detJ : float
The absolute value of the determinant of the Jacobian transforming from (r,theta,phi) to (x,y,z)
.. todo :: Clarify the direction of the Jacobian
"""
# TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs
check_dimensionality(atom_position, unit.nanometers)
check_dimensionality(bond_position, unit.nanometers)
check_dimensionality(angle_position, unit.nanometers)
check_dimensionality(torsion_position, unit.nanometers)
# Convert to internal coordinates once everything is dimensionless
# Make sure positions are float64 arrays implicitly in units of nanometers for numba
from perses.rjmc import coordinate_numba
internal_coords = coordinate_numba.cartesian_to_internal(
atom_position.value_in_unit(unit.nanometers).astype(np.float64),
bond_position.value_in_unit(unit.nanometers).astype(np.float64),
angle_position.value_in_unit(unit.nanometers).astype(np.float64),
torsion_position.value_in_unit(unit.nanometers).astype(np.float64))
# Return values are also in floating point implicitly in nanometers and radians
r, theta, phi = internal_coords
# Compute absolute value of determinant of Jacobian
detJ = np.abs(r**2*np.sin(theta))
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(phi, float)
check_dimensionality(detJ, float)
return internal_coords, detJ
def _internal_to_cartesian(self, bond_position, angle_position, torsion_position, r, theta, phi):
"""
Calculate the cartesian coordinates of a newly placed atom in terms of internal coordinates,
along with the absolute value of the determinant of the Jacobian.
Parameters
----------
bond_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with bond length ``r``
angle_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with angle ``theta``
torsion_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with torsion ``phi``
r : simtk.unit.Quantity with units compatible with nanometers
Bond length distance from ``bond_position`` to newly placed atom
theta : simtk.unit.Quantity with units compatible with radians
Angle formed by ``(angle_position, bond_position, new_atom)``
phi : simtk.unit.Quantity with units compatible with radians
Torsion formed by ``(torsion_position, angle_position, bond_position, new_atom)``
Returns
-------
xyz : simtk.unit.Quantity wrapped numpy array of shape (3,) with units compatible with nanometers
The position of the newly placed atom
detJ : float
The absolute value of the determinant of the Jacobian transforming from (r,theta,phi) to (x,y,z)
.. todo :: Clarify the direction of the Jacobian
"""
# TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs
check_dimensionality(bond_position, unit.nanometers)
check_dimensionality(angle_position, unit.nanometers)
check_dimensionality(torsion_position, unit.nanometers)
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(phi, float)
# Compute Cartesian coordinates from internal coordinates using all-dimensionless quantities
# All inputs to numba must be in float64 arrays implicitly in md_unit_syste units of nanometers and radians
from perses.rjmc import coordinate_numba
xyz = coordinate_numba.internal_to_cartesian(
bond_position.value_in_unit(unit.nanometers).astype(np.float64),
angle_position.value_in_unit(unit.nanometers).astype(np.float64),
torsion_position.value_in_unit(unit.nanometers).astype(np.float64),
np.array([r, theta, phi], np.float64))
# Transform position of new atom back into unit-bearing Quantity
xyz = unit.Quantity(xyz, unit=unit.nanometers)
# Compute abs det Jacobian using unitless values
detJ = np.abs(r**2*np.sin(theta))
check_dimensionality(xyz, unit.nanometers)
check_dimensionality(detJ, float)
return xyz, detJ
def _bond_log_pmf(self, bond, beta, n_divisions):
r"""
Calculate the log probability mass function (PMF) of drawing a bond.
.. math ::
p(r; \beta, K_r, r_0) \propto r^2 e^{-\frac{\beta K_r}{2} (r - r_0)^2 }
Parameters
----------
bond : parmed.Structure.Bond modified to use simtk.unit.Quantity
Valence bond parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing bond length
Returns
-------
r_i : np.ndarray of shape (n_divisions,) implicitly in units of nanometers
r_i[i] is the bond length leftmost bin edge with corresponding log probability mass function p_i[i]
log_p_i : np.ndarray of shape (n_divisions,)
log_p_i[i] is the corresponding log probability mass of bond length r_i[i]
bin_width : float implicitly in units of nanometers
The bin width for individual PMF bins
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: We end up computing the discretized PMF over and over again; we can speed this up by caching
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
# Check input argument dimensions
assert check_dimensionality(bond.type.req, unit.angstroms)
assert check_dimensionality(bond.type.k, unit.kilojoules_per_mole/unit.nanometers**2)
assert check_dimensionality(beta, unit.kilojoules_per_mole**(-1))
# Retrieve relevant quantities for valence bond
r0 = bond.type.req # equilibrium bond distance, unit-bearing quantity
k = bond.type.k * self._bond_softening_constant # force constant, unit-bearing quantity
sigma_r = unit.sqrt((1.0/(beta*k))) # standard deviation, unit-bearing quantity
# Convert to dimensionless quantities in MD unit system
r0 = r0.value_in_unit_system(unit.md_unit_system)
k = k.value_in_unit_system(unit.md_unit_system)
sigma_r = sigma_r.value_in_unit_system(unit.md_unit_system)
# Determine integration bounds
lower_bound, upper_bound = max(0., r0 - 6*sigma_r), (r0 + 6*sigma_r)
# Compute integration quadrature points
r_i, bin_width = np.linspace(lower_bound, upper_bound, num=n_divisions, retstep=True, endpoint=False)
# Form log probability
from scipy.special import logsumexp
log_p_i = 2*np.log(r_i+(bin_width/2.0)) - 0.5*((r_i+(bin_width/2.0)-r0)/sigma_r)**2
log_p_i -= logsumexp(log_p_i)
check_dimensionality(r_i, float)
check_dimensionality(log_p_i, float)
check_dimensionality(bin_width, float)
return r_i, log_p_i, bin_width
def _bond_logp(self, r, bond, beta, n_divisions):
r"""
Calculate the log-probability of a given bond at a given inverse temperature
Propose dimensionless bond length r from distribution
.. math ::
r \sim p(r; \beta, K_r, r_0) \propto r^2 e^{-\frac{\beta K_r}{2} (r - r_0)^2 }
Parameters
----------
r : float
bond length, implicitly in nanometers
bond : parmed.Structure.Bond modified to use simtk.unit.Quantity
Valence bond parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing bond length
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(r, float)
check_dimensionality(beta, 1/unit.kilojoules_per_mole)
r_i, log_p_i, bin_width = self._bond_log_pmf(bond, beta, n_divisions)
if (r < r_i[0]) or (r >= r_i[-1] + bin_width):
return LOG_ZERO
# Determine index that r falls within
index = int((r - r_i[0])/bin_width)
assert (index >= 0) and (index < n_divisions)
# Correct for division size
logp = log_p_i[index] - np.log(bin_width)
return logp
def _propose_bond(self, bond, beta, n_divisions):
r"""
Propose dimensionless bond length r from distribution
.. math ::
r \sim p(r; \beta, K_r, r_0) \propto r^2 e^{-\frac{\beta K_r}{2} (r - r_0)^2 }
Parameters
----------
bond : parmed.Structure.Bond modified to use simtk.unit.Quantity
Valence bond parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing bond length
Returns
-------
r : float
Dimensionless bond length, implicitly in nanometers
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(beta, 1/unit.kilojoules_per_mole)
r_i, log_p_i, bin_width = self._bond_log_pmf(bond, beta, n_divisions)
# Draw an index
index = np.random.choice(range(n_divisions), p=np.exp(log_p_i))
r = r_i[index]
# Draw uniformly in that bin
r = np.random.uniform(r, r+bin_width)
# Return dimensionless r, implicitly in nanometers
assert check_dimensionality(r, float)
assert (r > 0)
return r
def _angle_log_pmf(self, angle, beta, n_divisions):
r"""
Calculate the log probability mass function (PMF) of drawing a angle.
.. math ::
p(\theta; \beta, K_\theta, \theta_0) \propto \sin(\theta) e^{-\frac{\beta K_\theta}{2} (\theta - \theta_0)^2 }
Parameters
----------
angle : parmed.Structure.Angle modified to use simtk.unit.Quantity
Valence bond parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing bond length
Returns
-------
theta_i : np.ndarray of shape (n_divisions,) implicitly in units of radians
theta_i[i] is the angle with corresponding log probability mass function p_i[i]
log_p_i : np.ndarray of shape (n_divisions,)
log_p_i[i] is the corresponding log probability mass of angle theta_i[i]
bin_width : float implicitly in units of radians
The bin width for individual PMF bins
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
# TODO: We end up computing the discretized PMF over and over again; we can speed this up by caching
# Check input argument dimensions
assert check_dimensionality(angle.type.theteq, unit.radians)
assert check_dimensionality(angle.type.k, unit.kilojoules_per_mole/unit.radians**2)
assert check_dimensionality(beta, unit.kilojoules_per_mole**(-1))
# Retrieve relevant quantities for valence angle
theta0 = angle.type.theteq
k = angle.type.k * self._angle_softening_constant
sigma_theta = unit.sqrt(1.0/(beta * k)) # standard deviation, unit-bearing quantity
# Convert to dimensionless quantities in MD unit system
theta0 = theta0.value_in_unit_system(unit.md_unit_system)
k = k.value_in_unit_system(unit.md_unit_system)
sigma_theta = sigma_theta.value_in_unit_system(unit.md_unit_system)
# Determine integration bounds
# We can't compute log(0) so we have to avoid sin(theta) = 0 near theta = {0, pi}
EPSILON = 1.0e-3
lower_bound, upper_bound = EPSILON, np.pi-EPSILON
# Compute left bin edges
theta_i, bin_width = np.linspace(lower_bound, upper_bound, num=n_divisions, retstep=True, endpoint=False)
# Compute log probability
from scipy.special import logsumexp
log_p_i = np.log(np.sin(theta_i+(bin_width/2.0))) - 0.5*((theta_i+(bin_width/2.0)-theta0)/sigma_theta)**2
log_p_i -= logsumexp(log_p_i)
check_dimensionality(theta_i, float)
check_dimensionality(log_p_i, float)
check_dimensionality(bin_width, float)
return theta_i, log_p_i, bin_width
def _angle_logp(self, theta, angle, beta, n_divisions):
r"""
Calculate the log-probability of a given angle at a given inverse temperature
Propose dimensionless bond length r from distribution
.. math ::
p(\theta; \beta, K_\theta, \theta_0) \propto \sin(\theta) e^{-\frac{\beta K_\theta}{2} (\theta - \theta_0)^2 }
Parameters
----------
theta : float
angle, implicitly in radians
angle : parmed.Structure.Angle modified to use simtk.unit.Quantity
Valence angle parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing angle
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(theta, float)
check_dimensionality(beta, 1/unit.kilojoules_per_mole)
theta_i, log_p_i, bin_width = self._angle_log_pmf(angle, beta, n_divisions)
if (theta < theta_i[0]) or (theta >= theta_i[-1] + bin_width):
return LOG_ZERO
# Determine index that r falls within
index = int((theta - theta_i[0]) / bin_width)
assert (index >= 0) and (index < n_divisions)
# Correct for division size
logp = log_p_i[index] - np.log(bin_width)
return logp
def _propose_angle(self, angle, beta, n_divisions):
r"""
Propose dimensionless angle from distribution
.. math ::
\theta \sim p(\theta; \beta, K_\theta, \theta_0) \propto \sin(\theta) e^{-\frac{\beta K_\theta}{2} (\theta - \theta_0)^2 }
Parameters
----------
angle : parmed.Structure.Angle modified to use simtk.unit.Quantity
Valence angle parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse temperature
n_divisions : int
Number of quandrature points for drawing angle
Returns
-------
theta : float
Dimensionless valence angle, implicitly in radians
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(beta, 1/unit.kilojoules_per_mole)
theta_i, log_p_i, bin_width = self._angle_log_pmf(angle, beta, n_divisions)
# Draw an index
index = np.random.choice(range(n_divisions), p=np.exp(log_p_i))
theta = theta_i[index]
# Draw uniformly in that bin
theta = np.random.uniform(theta, theta+bin_width)
# Return dimensionless theta, implicitly in nanometers
assert check_dimensionality(theta, float)
return theta
def _torsion_scan(self, torsion_atom_indices, positions, r, theta, n_divisions):
"""
Compute unit-bearing Carteisan positions and torsions (dimensionless, in md_unit_system) for a torsion scan
Parameters
----------
torsion_atom_indices : int tuple of shape (4,)
Atom indices defining torsion, where torsion_atom_indices[0] is the atom to be driven
positions : simtk.unit.Quantity of shape (natoms,3) with units compatible with nanometers
Positions of the atoms in the system
r : float (implicitly in md_unit_system)
Dimensionless bond length (must be in nanometers)
theta : float (implicitly in md_unit_system)
Dimensionless valence angle (must be in radians)
n_divisions : int
The number of divisions for the torsion scan
Returns
-------
xyzs : simtk.unit.Quantity wrapped np.ndarray of shape (n_divisions,3) with dimensions length
The cartesian coordinates of each
phis : np.ndarray of shape (n_divisions,), implicitly in radians
The torsions angles representing the left bin edge at which a potential will be calculated
bin_width : float, implicitly in radians
The bin width of torsion scan increment
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
assert check_dimensionality(positions, unit.angstroms)
assert check_dimensionality(r, float)
assert check_dimensionality(theta, float)
# Compute dimensionless positions in md_unit_system as numba-friendly float64
length_unit = unit.nanometers
import copy
positions_copy = copy.deepcopy(positions)
positions_copy = positions_copy.value_in_unit(length_unit).astype(np.float64)
atom_positions, bond_positions, angle_positions, torsion_positions = [ positions_copy[index] for index in torsion_atom_indices ]
# Compute dimensionless torsion values for torsion scan
phis, bin_width = np.linspace(-np.pi, +np.pi, num=n_divisions, retstep=True, endpoint=False)
# Compute dimensionless positions for torsion scan
from perses.rjmc import coordinate_numba
internal_coordinates = np.array([r, theta, 0.0], np.float64)
xyzs = coordinate_numba.torsion_scan(bond_positions, angle_positions, torsion_positions, internal_coordinates, phis)
# Convert positions back into standard md_unit_system length units (nanometers)
xyzs_quantity = unit.Quantity(xyzs, unit=unit.nanometers)
# Return unit-bearing positions and dimensionless torsions (implicitly in md_unit_system)
check_dimensionality(xyzs_quantity, unit.nanometers)
check_dimensionality(phis, float)
return xyzs_quantity, phis, bin_width
def _torsion_log_pmf(self, growth_context, torsion_atom_indices, positions, r, theta, beta, n_divisions):
"""
Calculate the torsion log probability using OpenMM, including all energetic contributions for the atom being driven
This includes all contributions from bonds, angles, and torsions for the atom being placed
(and, optionally, sterics if added to the growth system when it was created).
Parameters
----------
growth_context : simtk.openmm.Context
Context containing the modified system
torsion_atom_indices : int tuple of shape (4,)
Atom indices defining torsion, where torsion_atom_indices[0] is the atom to be driven
positions : simtk.unit.Quantity with shape (natoms,3) with units compatible with nanometers
Positions of the atoms in the system
r : float (implicitly in nanometers)
Dimensionless bond length (must be in nanometers)
theta : float (implcitly in radians on domain [0,+pi])
Dimensionless valence angle (must be in radians)
beta : simtk.unit.Quantity with units compatible with1/(kJ/mol)
Inverse thermal energy
n_divisions : int
Number of divisions for the torsion scan
Returns
-------
logp_torsions : np.ndarray of float with shape (n_divisions,)
logp_torsions[i] is the normalized probability density at phis[i]
phis : np.ndarray of float with shape (n_divisions,), implicitly in radians
phis[i] is the torsion angle left bin edges at which the log probability logp_torsions[i] was calculated
bin_width : float implicitly in radian
The bin width for torsions
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: This method could benefit from memoization to speed up tests and particle filtering
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(positions, unit.angstroms)
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(beta, 1.0 / unit.kilojoules_per_mole)
# Compute energies for all torsions
logq = np.zeros(n_divisions) # logq[i] is the log unnormalized torsion probability density
atom_idx = torsion_atom_indices[0]
xyzs, phis, bin_width = self._torsion_scan(torsion_atom_indices, positions, r, theta, n_divisions)
xyzs = xyzs.value_in_unit_system(unit.md_unit_system) # make positions dimensionless again
positions = positions.value_in_unit_system(unit.md_unit_system)
for i, xyz in enumerate(xyzs):
# Set positions
positions[atom_idx,:] = xyz
growth_context.setPositions(positions)
# Compute potential energy
state = growth_context.getState(getEnergy=True)
potential_energy = state.getPotentialEnergy()
# Store unnormalized log probabilities
logq_i = -beta*potential_energy
logq[i] = logq_i
# It's OK to have a few torsions with NaN energies,
# but we need at least _some_ torsions to have finite energies
if np.sum(np.isnan(logq)) == n_divisions:
raise Exception("All %d torsion energies in torsion PMF are NaN." % n_divisions)
# Suppress the contribution from any torsions with NaN energies
logq[np.isnan(logq)] = -np.inf
# Compute the normalized log probability
from scipy.special import logsumexp
logp_torsions = logq - logsumexp(logq)
# Write proposed torsion energies to a PDB file for visualization or debugging, if desired
if hasattr(self, '_proposal_pdbfile'):
# Write proposal probabilities to PDB file as B-factors for inert atoms
f_i = -logp_torsions
f_i -= f_i.min() # minimum free energy is zero
f_i[f_i > 999.99] = 999.99
self._proposal_pdbfile.write('MODEL\n')
for i, xyz in enumerate(xyzs):
self._proposal_pdbfile.write('ATOM %5d %4s %3s %c%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n' % (i+1, ' Ar ', 'Ar ', ' ', atom_idx+1, 10*xyz[0], 10*xyz[1], 10*xyz[2], np.exp(logp_torsions[i]), f_i[i]))
self._proposal_pdbfile.write('TER\n')
self._proposal_pdbfile.write('ENDMDL\n')
# TODO: Write proposal PMFs to storage
# atom_proposal_indices[order]
# atom_positions[order,k]
# torsion_pmf[order, division_index]
assert check_dimensionality(logp_torsions, float)
assert check_dimensionality(phis, float)
assert check_dimensionality(bin_width, float)
return logp_torsions, phis, bin_width
def _propose_torsion(self, growth_context, torsion_atom_indices, positions, r, theta, beta, n_divisions):
"""
Propose a torsion angle using OpenMM
Parameters
----------
growth_context : simtk.openmm.Context
Context containing the modified system
torsion_atom_indices : int tuple of shape (4,)
Atom indices defining torsion, where torsion_atom_indices[0] is the atom to be driven
positions : simtk.unit.Quantity with shape (natoms,3) with units compatible with nanometers
Positions of the atoms in the system
r : float (implicitly in nanometers)
Dimensionless bond length (must be in nanometers)
theta : float (implcitly in radians on domain [0,+pi])
Dimensionless valence angle (must be in radians)
beta : simtk.unit.Quantity with units compatible with1/(kJ/mol)
Inverse thermal energy
n_divisions : int
Number of divisions for the torsion scan
Returns
-------
phi : float, implicitly in radians
The proposed torsion angle
logp : float
The log probability of the proposal
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(positions, unit.angstroms)
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(beta, 1.0 / unit.kilojoules_per_mole)
# Compute probability mass function for all possible proposed torsions
logp_torsions, phis, bin_width = self._torsion_log_pmf(growth_context, torsion_atom_indices, positions, r, theta, beta, n_divisions)
# Draw a torsion bin and a torsion uniformly within that bin
index = np.random.choice(range(len(phis)), p=np.exp(logp_torsions))
phi = phis[index]
logp = logp_torsions[index]
# Draw uniformly within the bin
phi = np.random.uniform(phi, phi+bin_width)
logp -= np.log(bin_width)
assert check_dimensionality(phi, float)
assert check_dimensionality(logp, float)
return phi, logp
def _torsion_logp(self, growth_context, torsion_atom_indices, positions, r, theta, phi, beta, n_divisions):
"""
Calculate the logp of a torsion using OpenMM
Parameters
----------
growth_context : simtk.openmm.Context
Context containing the modified system
torsion_atom_indices : int tuple of shape (4,)
Atom indices defining torsion, where torsion_atom_indices[0] is the atom to be driven
positions : simtk.unit.Quantity with shape (natoms,3) with units compatible with nanometers
Positions of the atoms in the system
r : float (implicitly in nanometers)
Dimensionless bond length (must be in nanometers)
theta : float (implicitly in radians on domain [0,+pi])
Dimensionless valence angle (must be in radians)
phi : float (implicitly in radians on domain [-pi,+pi))
Dimensionless torsion angle (must be in radians)
beta : simtk.unit.Quantity with units compatible with1/(kJ/mol)
Inverse thermal energy
n_divisions : int
Number of divisions for the torsion scan
Returns
-------
torsion_logp : float
The log probability this torsion would be drawn
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# Check that quantities are unitless
check_dimensionality(positions, unit.angstroms)
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(phi, float)
check_dimensionality(beta, 1.0 / unit.kilojoules_per_mole)
# Compute torsion probability mass function
logp_torsions, phis, bin_width = self._torsion_log_pmf(growth_context, torsion_atom_indices, positions, r, theta, beta, n_divisions)
# Determine which bin the torsion falls within
index = np.argmin(np.abs(phi-phis)) # WARNING: This assumes both phi and phis have domain of [-pi,+pi)
# Convert from probability mass function to probability density function so that sum(dphi*p) = 1, with dphi = (2*pi)/n_divisions.
torsion_logp = logp_torsions[index] - np.log(bin_width)
assert check_dimensionality(torsion_logp, float)
return torsion_logp
class GeometrySystemGenerator(object):
"""
Internal utility class to generate OpenMM systems with only valence terms and special parameters for newly placed atoms to assist in geometry proposals.
The resulting system will have the specified global context parameter (controlled by ``parameter_name``)
that selects which proposed atom will have all its valence terms activated. When this parameter is set to the
index of the atom being added within ``growth_indices``, all valence terms associated with that atom will be computed.
Only valence terms involving newly placed atoms will be computed; valence terms between fixed atoms will be omitted.
"""
def __init__(self,
reference_system,
torsion_proposal_order,
omitted_bonds,
reference_topology,
global_parameter_name='growth_index',
add_extra_torsions = True,
add_extra_angles = False,
use_sterics=False,
force_names=None,
force_parameters=None,
verbose=True,
neglect_angles = True,
use_14_nonbondeds = True):
"""
Parameters
----------
reference_system : simtk.openmm.System object
The system containing the relevant forces and particles
torsion_proposal_order : list of list of 4-int
The order in which the torsion indices will be proposed
omitted_bonds : list of tuple of int
list of atom index tuples (corresponding to reference_topology atoms) which have been omitted in the atom proposal
reference_topology : simtk.openmm.topology.Topology (augmented)
used to probe the topology for rotamers, chiral centers, etc.
global_parameter_name : str, optional, default='growth_index'
The name of the global context parameter
add_extra_torsions : bool, optional
Whether to add additional torsions to keep rings flat. Default true.
force_names : list of str
A list of the names of forces that will be included in this system
force_parameters : dict
Options for the forces (e.g., NonbondedMethod : 'CutffNonPeriodic')
neglect_angles : bool
whether to ignore and report on theta angle potentials that add variance to the work
verbose : bool, optional, default=False
If True, will print verbose output.
neglect_angles : bool
whether to neglect (coupled) angle terms that would make the variance non-zero (within numerical tolerance threshold)
use_14_nonbondeds : bool, default True
whether to consider 1,4 exception interactions in the geometry proposal
Attributes
----------
growth_system : simtk.openmm.System object
The system containing all of the valence forces to be added (with the exception of neglected angle forces if neglect_angles == False) with respect
to the reference_system Parameter.
atoms_with_positions_system : simtk.openmm.System object
The system containing all of the core atom valence forces. This is to be used in the proposal to assert that the final growth_system energy plus
the atoms_with_positions_system energy is equal to the final_system energy (for the purpose of energy bookkeeping).
neglected_angle_terms : list of ints
The indices of the HarmonicAngleForce parameters which are neglected for the purpose of minimizing work variance. This will be empty if neglect_angles == False.
"""
import copy
# TODO: Rename `growth_indices` (which is really a list of Atom objects) to `atom_growth_order` or `atom_addition_order`
#create an 'omitted_terms'
self.omitted_growth_terms = {'bonds': [], 'angles': [], 'torsions': [], '1,4s': []}
self.omitted_bonds = omitted_bonds
self.extra_torsion_terms = {}
self.extra_angle_terms = {}
self.reference_topology = reference_topology
# Check that we're not using the reserved name
if global_parameter_name == 'growth_idx':
raise ValueError('global_parameter_name cannot be "growth_idx" due to naming collisions')
growth_indices = [ torsion[0] for torsion in torsion_proposal_order ]
default_growth_index = len(growth_indices) # default value of growth index to use in System that is returned
self.current_growth_index = default_growth_index
# Bonds, angles, and torsions
self._HarmonicBondForceEnergy = "select(step({}+0.1 - growth_idx), (K/2)*(r-r0)^2, 0);"
self._HarmonicAngleForceEnergy = "select(step({}+0.1 - growth_idx), (K/2)*(theta-theta0)^2, 0);"
self._PeriodicTorsionForceEnergy = "select(step({}+0.1 - growth_idx), k*(1+cos(periodicity*theta-phase)), 0);"
# Nonbonded sterics and electrostatics.
# TODO: Allow user to select whether electrostatics or sterics components are included in the nonbonded interaction energy.
self._nonbondedEnergy = "select(step({}+0.1 - growth_idx), U_sterics + U_electrostatics, 0);"
self._nonbondedEnergy += "growth_idx = max(growth_idx1, growth_idx2);"
# Sterics
from openmmtools.constants import ONE_4PI_EPS0 # OpenMM constant for Coulomb interactions (implicitly in md_unit_system units)
# TODO: Auto-detect combining rules to allow this to work with other force fields?
# TODO: Enable more flexible handling / metaprogramming of CustomForce objects?
self._nonbondedEnergy += "U_sterics = 4*epsilon*x*(x-1.0); x = (sigma/r)^6;"
self._nonbondedEnergy += "epsilon = sqrt(epsilon1*epsilon2); sigma = 0.5*(sigma1 + sigma2);"
# Electrostatics
self._nonbondedEnergy += "U_electrostatics = ONE_4PI_EPS0*charge1*charge2/r;"
self._nonbondedEnergy += "ONE_4PI_EPS0 = %f;" % ONE_4PI_EPS0
# Exceptions
self._nonbondedExceptionEnergy = "select(step({}+0.1 - growth_idx), U_exception, 0);"
self._nonbondedExceptionEnergy += "U_exception = ONE_4PI_EPS0*chargeprod/r + 4*epsilon*x*(x-1.0); x = (sigma/r)^6;"
self._nonbondedExceptionEnergy += "ONE_4PI_EPS0 = %f;" % ONE_4PI_EPS0
self.sterics_cutoff_distance = 9.0 * unit.angstroms # cutoff for steric interactions with added/deleted atoms
self.verbose = verbose
# Get list of particle indices for new and old atoms.
new_particle_indices = growth_indices
old_particle_indices = [idx for idx in range(reference_system.getNumParticles()) if idx not in new_particle_indices]
# Compile index of reference forces
reference_forces = dict()
reference_forces_indices = dict()
for (index, force) in enumerate(reference_system.getForces()):
force_name = force.__class__.__name__
if force_name in reference_forces:
raise ValueError('reference_system has two {} objects. This is currently unsupported.'.format(force_name))
else:
reference_forces_indices[force_name] = index
reference_forces[force_name] = force
# Create new System
from simtk import openmm
growth_system = openmm.System()
atoms_with_positions_system = copy.deepcopy(reference_system)
# Copy particles
for i in range(reference_system.getNumParticles()):
growth_system.addParticle(reference_system.getParticleMass(i))
# Virtual sites are, in principle, automatically supported
# Create bond force
_logger.info("\tcreating bond force...")
modified_bond_force = openmm.CustomBondForce(self._HarmonicBondForceEnergy.format(global_parameter_name))
modified_bond_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['r0', 'K', 'growth_idx']:
modified_bond_force.addPerBondParameter(parameter_name)
growth_system.addForce(modified_bond_force)
reference_bond_force = reference_forces['HarmonicBondForce']
_logger.info(f"\tthere are {reference_bond_force.getNumBonds()} bonds in reference force.")
for bond_index in range(reference_bond_force.getNumBonds()):
p1, p2, r0, K = reference_bond_force.getBondParameters(bond_index)
growth_idx = self._calculate_growth_idx([p1, p2], growth_indices)
_logger.debug(f"\t\tfor bond {bond_index} (i.e. partices {p1} and {p2}), the growth_index is {growth_idx}")
if growth_idx > 0:
if (p1, p2) not in omitted_bonds and (p2, p1) not in omitted_bonds:
modified_bond_force.addBond(p1, p2, [r0, K, growth_idx])
_logger.debug(f"\t\t\tadding to the growth system")
else:
_logger.debug(f"\t\t\tomitted bond")
self.omitted_growth_terms['bonds'].append((p1,p2))
atoms_with_positions_system.getForce(reference_forces_indices['HarmonicBondForce']).setBondParameters(bond_index,p1, p2, r0, K*0.0)
else:
_logger.debug(f"\t\t\tadding to the the atoms with positions system.")
# Create angle force
# NOTE: here, we are implementing an angle exclusion scheme for angle terms that are coupled to lnZ_phi
_logger.info("\tcreating angle force...")
modified_angle_force = openmm.CustomAngleForce(self._HarmonicAngleForceEnergy.format(global_parameter_name))
modified_angle_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['theta0', 'K', 'growth_idx']:
modified_angle_force.addPerAngleParameter(parameter_name)
growth_system.addForce(modified_angle_force)
reference_angle_force = reference_forces['HarmonicAngleForce']
neglected_angle_term_indices = [] #initialize the index list of neglected angle forces
_logger.info(f"\tthere are {reference_angle_force.getNumAngles()} angles in reference force.")
for angle in range(reference_angle_force.getNumAngles()):
p1, p2, p3, theta0, K = reference_angle_force.getAngleParameters(angle)
growth_idx = self._calculate_growth_idx([p1, p2, p3], growth_indices)
_logger.debug(f"\t\tfor angle {angle} (i.e. partices {p1}, {p2}, and {p3}), the growth_index is {growth_idx}")
if growth_idx > 0:
if neglect_angles and (not use_sterics):
if any( [p1, p2, p3] == torsion[:3] or [p3, p2, p1] == torsion[:3] for torsion in torsion_proposal_order):
#then there is a new atom in the angle term and the angle is part of a torsion and is necessary
_logger.debug(f"\t\t\tadding to the growth system since it is part of a torsion")
modified_angle_force.addAngle(p1, p2, p3, [theta0, K, growth_idx])
else:
#then it is a neglected angle force, so it must be tallied
_logger.debug(f"\t\t\ttallying to neglected term indices")
neglected_angle_term_indices.append(angle)
else:
possible_omissions = [(p1,p2), (p2, p3), (p2,p1), (p3,p2)]
if any(angle_pair in omitted_bonds for angle_pair in possible_omissions):
_logger.debug(f"\t\t\tomitted angle")
self.omitted_growth_terms['angles'].append((p1,p2,p3))
else:
_logger.debug(f"\t\t\tadding to the growth system")
modified_angle_force.addAngle(p1, p2, p3, [theta0, K, growth_idx])
atoms_with_positions_system.getForce(reference_forces_indices['HarmonicAngleForce']).setAngleParameters(angle, p1, p2, p3, theta0, K*0.0)
else:
#then it is an angle term of core atoms and should be added to the atoms_with_positions_angle_force
_logger.debug(f"\t\t\tadding to the the atoms with positions system.")
# Create torsion force
_logger.info("\tcreating torsion force...")
modified_torsion_force = openmm.CustomTorsionForce(self._PeriodicTorsionForceEnergy.format(global_parameter_name))
modified_torsion_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['periodicity', 'phase', 'k', 'growth_idx']:
modified_torsion_force.addPerTorsionParameter(parameter_name)
_logger.info(f"\tcreating extra torsions force...")
extra_modified_torsion_force = copy.deepcopy(modified_torsion_force) #we will add this if we _do_ call the extra modified torsions force
growth_system.addForce(modified_torsion_force) #but we add this, regardlesss
reference_torsion_force = reference_forces['PeriodicTorsionForce']
_logger.info(f"\tthere are {reference_torsion_force.getNumTorsions()} torsions in reference force.")
for torsion in range(reference_torsion_force.getNumTorsions()):
p1, p2, p3, p4, periodicity, phase, k = reference_torsion_force.getTorsionParameters(torsion)
growth_idx = self._calculate_growth_idx([p1, p2, p3, p4], growth_indices)
_logger.debug(f"\t\tfor torsion {torsion} (i.e. partices {p1}, {p2}, {p3}, and {p4}), the growth_index is {growth_idx}")
if growth_idx > 0:
possible_omissions = [(p1,p2), (p2,p3), (p3,p4), (p2,p1), (p3,p2), (p4,p3)]
if any(torsion_pair in omitted_bonds for torsion_pair in possible_omissions):
_logger.debug(f"\t\t\tomitted torsion")
self.omitted_growth_terms['torsions'].append((p1,p2,p3,p4))
else:
modified_torsion_force.addTorsion(p1, p2, p3, p4, [periodicity, phase, k, growth_idx])
_logger.debug(f"\t\t\tadding to the growth system")
atoms_with_positions_system.getForce(reference_forces_indices['PeriodicTorsionForce']).setTorsionParameters(torsion, p1, p2, p3, p4, periodicity, phase, k*0.0)
else:
_logger.debug(f"\t\t\tadding to the the atoms with positions system.")
# TODO: check this for bugs by turning on sterics
if (use_sterics or use_14_nonbondeds) and 'NonbondedForce' in reference_forces.keys():
_logger.info("\tcreating nonbonded force...")
# Copy parameters for local sterics parameters in nonbonded force
reference_nonbonded_force = reference_forces['NonbondedForce']
atoms_with_positions_system.getForce(reference_forces_indices['NonbondedForce']).setUseDispersionCorrection(False)
_logger.info("\t\tgrabbing reference nonbonded method, cutoff, switching function, switching distance...")
reference_nonbonded_force_method = reference_nonbonded_force.getNonbondedMethod()
_logger.debug(f"\t\t\tnonbonded method: {reference_nonbonded_force_method}")
reference_nonbonded_force_cutoff = reference_nonbonded_force.getCutoffDistance()
_logger.debug(f"\t\t\tnonbonded cutoff distance: {reference_nonbonded_force_cutoff}")
reference_nonbonded_force_switching_function = reference_nonbonded_force.getUseSwitchingFunction()
_logger.debug(f"\t\t\tnonbonded switching function (boolean): {reference_nonbonded_force_switching_function}")
reference_nonbonded_force_switching_distance = reference_nonbonded_force.getSwitchingDistance()
_logger.debug(f"\t\t\tnonbonded switching distance: {reference_nonbonded_force_switching_distance}")
#now we add the 1,4 interaction force
if reference_nonbonded_force.getNumExceptions() > 0:
_logger.info("\t\tcreating nonbonded exception force (i.e. custom bond for 1,4s)...")
custom_bond_force = openmm.CustomBondForce(self._nonbondedExceptionEnergy.format(global_parameter_name))
custom_bond_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['chargeprod', 'sigma', 'epsilon', 'growth_idx']:
custom_bond_force.addPerBondParameter(parameter_name)
growth_system.addForce(custom_bond_force)
#Now we iterate through the exceptions and add custom bond forces if the growth intex for that bond > 0
_logger.info("\t\tlooping through exceptions calculating growth indices, and adding appropriate interactions to custom bond force.")
_logger.info(f"\t\tthere are {reference_nonbonded_force.getNumExceptions()} in the reference Nonbonded force")
possible_omissions = [[(p1,p2), (p2,p3), (p3,p4), (p2,p1), (p3,p2), (p4,p3)]]
for exception_index in range(reference_nonbonded_force.getNumExceptions()):
p1, p2, chargeprod, sigma, epsilon = reference_nonbonded_force.getExceptionParameters(exception_index)
growth_idx = self._calculate_growth_idx([p1, p2], growth_indices)
_logger.debug(f"\t\t\t{p1} and {p2} with charge {chargeprod} and epsilon {epsilon} have a growth index of {growth_idx}")
# Only need to add terms that are nonzero and involve newly added atoms.
if (growth_idx > 0) and ((chargeprod.value_in_unit_system(unit.md_unit_system) != 0.0) or (epsilon.value_in_unit_system(unit.md_unit_system) != 0.0)):
fails = 0
for tor in self.omitted_growth_terms['torsions']:
tor_set = set(tor)
if set((p1,p2)).issubset(tor_set):
fails += 1
if fails > 0:
self.omitted_growth_terms['1,4s'].append((p1,p2))
else:
custom_bond_force.addBond(p1, p2, [chargeprod, sigma, epsilon, growth_idx])
else:
_logger.info("\t\tthere are no Exceptions in the reference system.")
if use_sterics:
#now we define a custom nonbonded force for the growth system
_logger.info("\t\tadding custom nonbonded force...")
modified_sterics_force = openmm.CustomNonbondedForce(self._nonbondedEnergy.format(global_parameter_name))
modified_sterics_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['charge', 'sigma', 'epsilon', 'growth_idx']:
modified_sterics_force.addPerParticleParameter(parameter_name)
growth_system.addForce(modified_sterics_force)
# Translate nonbonded method to the custom nonbonded force
_logger.info("\t\tsetting nonbonded method, cutoff, switching function, and switching distance to custom nonbonded force...")
if reference_nonbonded_force_method in [0,1]: #if Nonbonded method is NoCutoff or CutoffNonPeriodic
modified_sterics_force.setNonbondedMethod(reference_nonbonded_force_method)
modified_sterics_force.setCutoffDistance(reference_nonbonded_force_cutoff)
elif reference_nonbonded_force_method in [2,3,4]:
modified_sterics_force.setNonbondedMethod(2)
modified_sterics_force.setCutoffDistance(self.sterics_cutoff_distance)
modified_sterics_force.setUseSwitchingFunction(reference_nonbonded_force_switching_function)
modified_sterics_force.setSwitchingDistance(reference_nonbonded_force_switching_distance)
else:
raise Exception(f"reference force nonbonded method {reference_nonbonded_force_method} is NOT supported for custom nonbonded force!")
# define atoms_with_positions_Nonbonded_Force
#atoms_with_positions_nonbonded_force.setUseDispersionCorrection(False)
# Add particle parameters to the custom nonbonded force...and add interactions to the atoms_with_positions_nonbonded_force if growth_index == 0
_logger.info("\t\tlooping through reference nonbonded force to add particle params to custom nonbonded force")
for particle_index in range(reference_nonbonded_force.getNumParticles()):
[charge, sigma, epsilon] = reference_nonbonded_force.getParticleParameters(particle_index)
growth_idx = self._calculate_growth_idx([particle_index], growth_indices)
modified_sterics_force.addParticle([charge, sigma, epsilon, growth_idx])
if particle_index in growth_indices:
atoms_with_positions_system.getForce(reference_forces_indices['NonbondedForce']).setParticleParameters(particle_index, charge*0.0, sigma, epsilon*0.0)
# Add exclusions, which are active at all times.
# (1,4) exceptions are always included, since they are part of the valence terms.
_logger.info("\t\tlooping through reference nonbonded force exceptions to add exclusions to custom nonbonded force")
for exception_index in range(reference_nonbonded_force.getNumExceptions()):
[p1, p2, chargeprod, sigma, epsilon] = reference_nonbonded_force.getExceptionParameters(exception_index)
modified_sterics_force.addExclusion(p1, p2)
#we also have to add the exceptions to the atoms_with_positions_nonbonded_force
#if len(set([p1, p2]).intersection(set(old_particle_indices))) == 2:
if len(set([p1,p2]).intersection(set(growth_indices))) > 0:
_logger.debug(f"\t\t\tparticle {p1} and/or {p2} are new indices and have an exception of {chargeprod} and {epsilon}. setting to zero.")
#then both particles are old, so we can add the exception to the atoms_with_positions_nonbonded_force
atoms_with_positions_system.getForce(reference_forces_indices['NonbondedForce']).setExceptionParameters(exception_index, p1, p2, chargeprod * 0.0, sigma, epsilon * 0.0)
# Only compute interactions of new particles with all other particles
# TODO: Allow inteactions to be resticted to only the residue being grown.
modified_sterics_force.addInteractionGroup(set(new_particle_indices), set(old_particle_indices))
modified_sterics_force.addInteractionGroup(set(new_particle_indices), set(new_particle_indices))
if reference_nonbonded_force_method in [0,1]:
if 'MonteCarloBarostat' in reference_forces_indices.keys():
atoms_with_positions_system.removeForce(reference_forces_indices['MonteCarloBarostat'])
else:
if 'MonteCarloBarostat' in reference_forces_indices.keys():
atoms_with_positions_system.removeForce(reference_forces_indices['MonteCarloBarostat'])
if 'NonbondedForce' in reference_forces_indices.keys(): #if we aren't using 14 interactions, we simply delete the nonbonded force object
atoms_with_positions_system.removeForce(reference_forces_indices['NonbondedForce'])
elif 'NonbondedForce' in reference_forces.keys():
if 'MonteCarloBarostat' in reference_forces_indices.keys():
atoms_with_positions_system.removeForce(reference_forces_indices['MonteCarloBarostat'])
if 'NonbondedForce' in reference_forces_indices.keys(): #if we aren't using 14 interactions, we simply delete the nonbonded force object
atoms_with_positions_system.removeForce(reference_forces_indices['NonbondedForce'])
# Add extra ring-closing torsions, if requested.
if add_extra_torsions:
_logger.debug(f"\t\tattempting to add extra torsions...")
if reference_topology == None:
raise ValueError("Need to specify topology in order to add extra torsions.")
self._determine_extra_torsions(extra_modified_torsion_force, reference_topology, growth_indices)
if extra_modified_torsion_force.getNumTorsions() > 0:
#then we should add it to the growth system...
growth_system.addForce(extra_modified_torsion_force)
# if add_extra_angles:
# if reference_topology==None:
# raise ValueError("Need to specify topology in order to add extra angles")
# self._determine_extra_angles(modified_angle_force, reference_topology, growth_indices)
# Store growth system
self._growth_parameter_name = global_parameter_name
self._growth_system = growth_system
self._atoms_with_positions_system = atoms_with_positions_system #note this is only bond, angle, and torsion forces
self.neglected_angle_terms = neglected_angle_term_indices #these are angle terms that are neglected because of coupling to lnZ_phi
_logger.info("Neglected angle terms : {}".format(neglected_angle_term_indices))
_logger.info(f"omitted_growth_terms: {self.omitted_growth_terms}")
_logger.info(f"extra torsions: {self.extra_torsion_terms}")
def set_growth_parameter_index(self, growth_parameter_index, context=None):
"""
Set the growth parameter index
"""
# TODO: Set default force global parameters if context is not None.
if context is not None:
context.setParameter(self._growth_parameter_name, growth_parameter_index)
self.current_growth_index = growth_parameter_index
def get_modified_system(self):
"""
Create a modified system with parameter_name parameter. When 0, only core atoms are interacting;
for each integer above 0, an additional atom is made interacting, with order determined by growth_index.
Returns
-------
growth_system : simtk.openmm.System object
System with the appropriate modifications, with growth parameter set to maximum.
"""
return self._growth_system
def _determine_extra_torsions(self,
torsion_force,
reference_topology,
growth_indices):
"""
In order to facilitate ring closure and ensure proper bond stereochemistry,
we add additional biasing torsions to rings and stereobonds that are then corrected
for in the acceptance probability.
Determine which residue is covered by the new atoms
Identify rotatable bonds
Construct analogous residue in OpenEye and generate configurations with Omega
Measure appropriate torsions and generate relevant parameters
.. warning :: Only one residue should be changing
.. warning :: This currently will not work for polymer residues
.. todo :: Use a database of biasing torsions constructed ahead of time and match to residues by NetworkX
Parameters
----------
torsion_force : openmm.CustomTorsionForce object
the new/old torsion force if forward/backward
reference_topology : openmm.app.Topology object (augmented)
the new/old topology if forward/backward
oemol : openeye.oechem.OEMol
An OEMol representing the new (old) system if forward (backward)
growth_indices : list of int
The list of new atoms and the order in which they will be added.
Returns
-------
torsion_force : openmm.CustomTorsionForce
The torsion force with extra torsions added appropriately.
"""
from perses.rjmc import coordinate_numba
from openeye import oechem
# Do nothing if there are no atoms to grow.
if len(growth_indices) == 0:
return torsion_force
#set ring restraints
_logger.debug(f"\t\t\tattempting to add ring restraints")
#get the list of torsions in the molecule that are not about a rotatable bond
# Note that only torsions involving heavy atoms are enumerated here.
rotor = oechem.OEIsRotor()
torsion_predicate = oechem.OENotBond(rotor)
non_rotor_torsions = list(oechem.OEGetTorsions(reference_topology.residue_oemol, torsion_predicate))
#relevant_torsion_list = self._select_torsions_without_h(non_rotor_torsions)
relevant_torsion_list = non_rotor_torsions
#now, for each torsion, extract the set of indices and the angle
periodicity = 1
k = 1200.0*unit.kilocalories_per_mole # stddev of 1.2 degrees
#print([atom.name for atom in growth_indices])
_logger.debug(f"\t\t\trelevant torsions for ring restraints being added...")
for torsion in relevant_torsion_list:
#make sure to get the atom index that corresponds to the topology
#atom_indices = [torsion.a.GetData("topology_index"), torsion.b.GetData("topology_index"), torsion.c.GetData("topology_index"), torsion.d.GetData("topology_index")]
oe_atom_indices = [torsion.a.GetIdx(),
torsion.b.GetIdx(),
torsion.c.GetIdx(),
torsion.d.GetIdx()]
if all(_idx in list(reference_topology.reverse_residue_to_oemol_map.keys()) for _idx in oe_atom_indices):
#then every atom in the oemol lives in the openmm topology/residue, so we can consider it
topology_index_map = [reference_topology.reverse_residue_to_oemol_map[q] for q in oe_atom_indices]
else:
topology_index_map = None
# Determine phase in [-pi,+pi) interval
#phase = (np.pi)*units.radians+angle
adjusted_phase = self.adjust_phase(phase = torsion.radians)
#print('PHASE>>>> ' + str(phase)) # DEBUG
if topology_index_map is not None:
growth_idx = self._calculate_growth_idx(topology_index_map, growth_indices)
atom_names = [torsion.a.GetName(), torsion.b.GetName(), torsion.c.GetName(), torsion.d.GetName()]
#print("Adding torsion with atoms %s and growth index %d" %(str(atom_names), growth_idx))
#If this is a CustomTorsionForce, we need to pass the parameters as a list, and it will have the growth_idx parameter.
#If it's a regular PeriodicTorsionForce, there is no growth_index and the parameters are passed separately.
p1, p2, p3, p4 = topology_index_map
possible_omissions = [(p1,p2), (p2,p3), (p3,p4), (p2,p1), (p3,p2), (p4,p3)]
if growth_idx > 0:
if any(torsion_pair in self.omitted_bonds for torsion_pair in possible_omissions):
pass
else:
_torsion_index = torsion_force.addTorsion(topology_index_map[0], topology_index_map[1], topology_index_map[2], topology_index_map[3], [periodicity, adjusted_phase, k, growth_idx])
self.extra_torsion_terms[_torsion_index] = (topology_index_map[0], topology_index_map[1], topology_index_map[2], topology_index_map[3], [periodicity, adjusted_phase, k, growth_idx])
_logger.debug(f"\t\t\t\t{(topology_index_map[0], topology_index_map[1], topology_index_map[2], topology_index_map[3])}")
else:
pass
#we omit terms wherein the growth index only pertains to the
_logger.debug(f"\t\t\trelevant torsions for chirality restraints being added...")
#set chirality restraints (adapted from https://github.com/choderalab/perses/blob/protein_mutations_ivy/perses/rjmc/geometry.py)
#set stereochemistry
#the chirality of the atoms is supposed to be pre-specified by NetworkXMolecule
#render a 3d structure: note that this fucks up the rjmc proposal (since we cannot enumerate the number of possible conformers)
#add the improper torsions associated with the chiral center
coords = reference_topology.residue_oemol.GetCoords()
networkx_graph = reference_topology._get_networkx_molecule()
#CIP_perceptions = {0: 'R', 1: 'S'}
#iterate over all of the atoms with chiral centers
_logger.debug(f"\t\t\t\tnodes: {networkx_graph.nodes()}")
for _node in networkx_graph.nodes(data = True):
_logger.debug(f"\t\t\t\tquerying node {_node[0]}")
_logger.debug(f"\t\t\t\tnode attributes: {_node[1]}")
if _node[1]['oechem_atom'].IsChiral():
_logger.debug(f"\t\t\t\tnode is chiral...")
assert(_node[1]['oechem_atom']).HasStereoSpecified(), f"atom {_node[1]['oechem_atom']} is chiral, but the chirality is not specified."
_stereo = stereo = oechem.OEPerceiveCIPStereo(reference_topology.residue_oemol, _node[1]['oechem_atom'])
#_logger.debug(f"\t\t\t\t\tis chiral with CIP: {CIP_perceptions[_stereo]}")
#get the neighbors
#nbrs_top : list(int) of topology indices
#nbrs_oemol : list(int) of oemol indices of neighbor
#nbrs : list(OEAtomBase) of the oemol atoms of neighbors
#get the neighbors of the chiral atom of interest
nbrs_top, nbrs_oemol, nbrs = [], [], []
for nbr in networkx_graph[_node[0]]:
nbrs_top.append(nbr)
nbrs_oemol.append(reference_topology.residue_to_oemol_map[nbr])
nbrs.append(networkx_graph.nodes[nbr]['oechem_atom'])
_logger.debug(f"\t\t\t\t\tquerying neighbors: {nbrs_top} with data: {[networkx_graph.nodes[lst_nbr]['openmm_atom'] for lst_nbr in nbrs_top]}")
growth_idx = self._calculate_growth_idx(nbrs_top, growth_indices)
_logger.debug(f"\t\t\t\t\tthe growth index of the neighbors is {growth_idx}")
if growth_idx > 0:
if len(list(networkx_graph[_node[0]])) == 4:
_logger.debug(f"\t\t\t\t\tthe number of neighbors is 4; proceeding")
# TODO: handle chiral centers where the valency of the chiral center > 4
#specify the atom order for calculating the angle
#the order of the improper torsion will be as follows (p1, p2, p3, p4):
#p1: the neighbor of the chiral atom whose growth index is minimally greater than the growth index of the chiral center
#p2: the chiral center
#p3: the neighbor of the chiral center whose growth index is maximally less than (or equal to) the growth index of the chiral center
#p4: the neighbor of the chiral atom whose growth index is minimally greater than the growth index of p1
_node_growth_index = self._calculate_growth_idx([_node[0]], growth_indices)
nbr_growth_indices = [self._calculate_growth_idx([q], growth_indices) for q in nbrs_top]
_nbr_to_growth_index_tuple = [(_nbr, _idx) for _nbr, _idx in zip(nbrs_top, nbr_growth_indices)]
_logger.debug(f"\t\t\t\t\tgrowth index of node: {_node_growth_index}")
_logger.debug(f"\t\t\t\t\tgrowth indices of neighbors: {_nbr_to_growth_index_tuple}")
if [tup[1] for tup in _nbr_to_growth_index_tuple].count(0) == 3:
_logger.warning(f"\t\t\t\t\tchiral atom {_node[1]['openmm_atom']} with neighbors {[networkx_graph.nodes[lst_nbr]['openmm_atom'] for lst_nbr in nbrs_top]} is surrounded by 3 core neighbors. omitting chirality bias torsion")
else:
#find p1:
p1_target_growth_index = min(tup[1] for tup in _nbr_to_growth_index_tuple if tup[1] > _node_growth_index)
p1 = [q[0] for q in _nbr_to_growth_index_tuple if q[1] == p1_target_growth_index][0] #take the first hit
#find p2:
p2 = _node[0]
#find p3:
p3_target_growth_index = max(tup[1] for tup in _nbr_to_growth_index_tuple if tup[1] <= _node_growth_index)
p3 = [q[0] for q in _nbr_to_growth_index_tuple if q[1] == p3_target_growth_index][0] #take the first hit
#find p4:
p4_target_growth_index = min(tup[1] for tup in _nbr_to_growth_index_tuple if tup[1] > p1_target_growth_index)
p4 = [q[0] for q in _nbr_to_growth_index_tuple if q[1] == p4_target_growth_index][0] #take the first hit
_logger.debug(f"\t\t\t\t\tgrowth index carrying this improper: {p4_target_growth_index}")
#now convert p1-p4 to oemol indices
oemol_indices = [reference_topology.residue_to_oemol_map[q] for q in [p1, p2, p3, p4]]
#calculate the improper torsion
# coords is dict of {idx: (x_0, y_0, z_0)}
phase = coordinate_numba.cartesian_to_internal(np.array(coords[oemol_indices[0]], dtype = 'float64'),
np.array(coords[oemol_indices[1]], dtype = 'float64'),
np.array(coords[oemol_indices[2]], dtype = 'float64'),
np.array(coords[oemol_indices[3]], dtype = 'float64'))[2]
adjusted_phase = self.adjust_phase(phase = phase)
growth_idx = self._calculate_growth_idx(nbrs_top, growth_indices)
_torsion_index = torsion_force.addTorsion(p1, p2, p3, p4, [periodicity, adjusted_phase, k, p4_target_growth_index])
self.extra_torsion_terms[_torsion_index] = (p1, p2, p3, p4, [periodicity, adjusted_phase, k, p4_target_growth_index])
_logger.debug(f"\t\t\t\t\t{(p1, p2, p3, p4)}, phase : {adjusted_phase}")
else:
#the atom of interest must have 4 substitutents to be chiral; TODO: chirality can also be maintained with >4 atoms.
pass
return torsion_force
def adjust_phase(self, phase):
"""
Utility function to adjust the phase properly
Parameters
----------
phase : float
phase angle
Returns
-------
adjusted_phase : float * unit.radians
adjusted phase with convention
"""
phase = phase + np.pi # TODO: Check that this is the correct convention?
while (phase >= np.pi):
phase -= 2*np.pi
while (phase < -np.pi):
phase += 2*np.pi
phase *= unit.radian
adjusted_phase = phase
return adjusted_phase
def _select_torsions_without_h(self, torsion_list):
"""
Return only torsions that do not contain hydrogen
Parameters
----------
torsion_list : list of oechem.OETorsion
Returns
-------
heavy_torsions : list of oechem.OETorsion
"""
heavy_torsions = []
for torsion in torsion_list:
is_h_present = [torsion.a.IsHydrogen(), torsion.b.IsHydrogen(), torsion.c.IsHydrogen(), torsion.d.IsHydrogen()]
if all(entry == False for entry in is_h_present):
heavy_torsions.append(torsion)
else:
#there is a hydrogen in this torsion, so it is omitted
pass
return heavy_torsions
def _determine_extra_angles(self, angle_force, reference_topology, growth_indices):
"""
Determine extra angles to be placed on aromatic ring members. Sometimes,
the native angle force is too weak to efficiently close the ring. As with the
torsion force, this method assumes that only one residue is changing at a time.
Parameters
----------
angle_force : simtk.openmm.CustomAngleForce
the force to which additional terms will be added
reference_topology : simtk.openmm.app.Topology
new/old topology if forward/backward
growth_indices : list of int
atom growth indices
Returns
-------
angle_force : simtk.openmm.CustomAngleForce
The modified angle force
"""
from simtk import openmm
import itertools
from openeye import oechem, oeomega
if len(growth_indices)==0:
return
angle_force_constant = 400.0*unit.kilojoules_per_mole/unit.radians**2
atoms = list(reference_topology.atoms())
growth_indices = list(growth_indices)
#get residue from first atom
residue = atoms[growth_indices[0].idx].residue
try:
oemol = FFAllAngleGeometryEngine._oemol_from_residue(residue)
except Exception as e:
print("Could not generate an oemol from the residue.")
print(e)
#get the omega geometry of the molecule:
omega = oeomega.OEOmega()
omega.SetMaxConfs(1)
omega.SetStrictStereo(False) #TODO: fix stereochem
omega(oemol)
#we now have the residue as an oemol. Time to find the relevant angles.
#There's no equivalent to OEGetTorsions, so first find atoms that are relevant
#TODO: find out if that's really true
aromatic_pred = oechem.OEIsAromaticAtom()
heavy_pred = oechem.OEIsHeavy()
angle_criteria = oechem.OEAndAtom(aromatic_pred, heavy_pred)
#get all heavy aromatic atoms:
#TODO: do this more efficiently
heavy_aromatics = list(oemol.GetAtoms(angle_criteria))
for atom in heavy_aromatics:
#bonded_atoms = [bonded_atom for bonded_atom in list(atom.GetAtoms()) if bonded_atom in heavy_aromatics]
bonded_atoms = list(atom.GetAtoms())
for angle_atoms in itertools.combinations(bonded_atoms, 2):
angle = oechem.OEGetAngle(oemol, angle_atoms[0], atom, angle_atoms[1])
atom_indices = [angle_atoms[0].GetData("topology_index"), atom.GetData("topology_index"), angle_atoms[1].GetData("topology_index")]
angle_radians = angle*unit.radian
growth_idx = self._calculate_growth_idx(atom_indices, growth_indices)
#If this is a CustomAngleForce, we need to pass the parameters as a list, and it will have the growth_idx parameter.
#If it's a regular HarmonicAngleForce, there is no growth_index and the parameters are passed separately.
if isinstance(angle_force, openmm.CustomAngleForce):
angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], [angle_radians, angle_force_constant, growth_idx])
elif isinstance(angle_force, openmm.HarmonicAngleForce):
angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], angle_radians, angle_force_constant)
else:
raise ValueError("Angle force must be either CustomAngleForce or HarmonicAngleForce")
return angle_force
def _calculate_growth_idx(self, particle_indices, growth_indices):
"""
Utility function to calculate the growth index of a particular force.
For each particle index, it will check to see if it is in growth_indices.
If not, 0 is added to an array, if yes, the index in growth_indices is added.
Finally, the method returns the max of the accumulated array
Parameters
----------
particle_indices : list of int
The indices of particles involved in this force
growth_indices : list of int
The ordered list of indices for atom position proposals
Returns
-------
growth_idx : int
The growth_idx parameter
"""
particle_indices_set = set(particle_indices)
growth_indices_set = set(growth_indices)
new_atoms_in_force = particle_indices_set.intersection(growth_indices_set)
if len(new_atoms_in_force) == 0:
return 0
new_atom_growth_order = [growth_indices.index(atom_idx)+1 for atom_idx in new_atoms_in_force]
return max(new_atom_growth_order)
class NetworkXProposalOrder(object):
"""
This is a proposal order generating object that uses just networkx and graph traversal for simplicity.
"""
def __init__(self, topology_proposal, direction="forward"):
"""
Create a NetworkXProposalOrder class
Parameters
----------
topology_proposal : perses.rjmc.topology_proposal.TopologyProposal
Container class for the transformation
direction: str, default forward
Whether to go forward or in reverse for the proposal.
TODO : reorganize this
"""
from simtk.openmm import app
self._topology_proposal = topology_proposal
self._direction = direction
self._hydrogen = app.Element.getByAtomicNumber(1.0)
# Set the direction
if direction == "forward":
self._destination_system = self._topology_proposal.new_system
self._new_atoms = self._topology_proposal.unique_new_atoms
self._destination_topology = self._topology_proposal.new_topology
self._atoms_with_positions = self._topology_proposal.new_to_old_atom_map.keys()
_nx_graph = self._topology_proposal._new_topology._get_networkx_molecule()
elif direction == "reverse":
self._destination_system = self._topology_proposal.old_system
self._new_atoms = self._topology_proposal.unique_old_atoms
self._destination_topology = self._topology_proposal.old_topology
self._atoms_with_positions = self._topology_proposal.old_to_new_atom_map.keys()
_nx_graph = self._topology_proposal._old_topology._get_networkx_molecule()
else:
raise ValueError("Direction must be either forward or reverse.")
self._new_atom_objects = list(self._destination_topology.atoms())
self._new_atoms_to_place = [atom for atom in self._destination_topology.atoms() if atom.index in self._new_atoms]
self._atoms_with_positions_set = set(self._atoms_with_positions)
self._hydrogens = []
self._heavy = []
# Sort the new atoms into hydrogen and heavy atoms:
for atom in self._new_atoms_to_place:
if atom.element == self._hydrogen:
self._hydrogens.append(atom.index)
else:
self._heavy.append(atom.index)
# Sanity check
if len(self._hydrogens)==0 and len(self._heavy)==0:
msg = 'NetworkXProposalOrder: No new atoms for direction {}\n'.format(direction)
msg += str(topology_proposal)
raise Exception(msg)
# Choose the first of the new atoms to find the corresponding residue:
#transforming_residue = self._new_atom_objects[self._new_atoms[0]].residue
self._residue_graph = _nx_graph
self._reference_connectivity_graph = self._create_reference_connectivity_graph()
def _create_reference_connectivity_graph(self):
"""
utility method to create a reference connectivity graph to check for omitted valence terms (the primary use of this graph is to check for ring closures)
"""
#take the self._residue_graph and create a replicate (without the misc attributes) with the atoms_with_positions
_reference_connectivity_graph = nx.Graph()
atoms_with_positions = set(self._atoms_with_positions)
#iterate over all the bonds
for bond in self._residue_graph.edges():
if set(bond).issubset(atoms_with_positions):
#if both of the atoms in the bond are in atoms_with_positions, we can add the atoms/bonds to the reference
_reference_connectivity_graph.add_edge(*bond)
return _reference_connectivity_graph
def determine_proposal_order(self):
"""
Determine the proposal order of this system pair.
This includes the choice of a torsion. As such, a logp is returned.
Parameters
----------
direction : str, optional
whether to determine the forward or reverse proposal order
Returns
-------
atom_torsions : list of list of int
A list of torsions, where the first atom in the torsion is the one being proposed
logp_torsion_choice : list
log probability of the chosen torsions as a list of sequential atom placements
omitted_bonds : list of tuples
list of tuples of atom_indices
#this is used when creating the growth system generator and the atoms_with_positions_system to account for unconnected atoms
"""
heavy_atoms_torsions, heavy_logp = self._propose_atoms_in_order(self._heavy)
hydrogen_atoms_torsions, hydrogen_logp = self._propose_atoms_in_order(self._hydrogens)
proposal_order = heavy_atoms_torsions + hydrogen_atoms_torsions
if len(proposal_order) == 0:
msg = 'NetworkXProposalOrder: proposal_order is empty\n'
raise Exception(msg)
#Check that no atom is placed until each atom in the corresponding torsion is in the set of atoms with positions
_set_of_atoms_with_positions = set(self._atoms_with_positions)
# Now iterate through the proposal_order, ensuring that each atom in the corresponding torsion list is in the _set_of_atoms_with_positions (appending to the set after each placement)
for torsion in proposal_order:
assert set(torsion[1:]).issubset(_set_of_atoms_with_positions), "Proposal Order Issue: a torsion atom is not position-defined"
_set_of_atoms_with_positions.add(torsion[0])
# Ensure lists are not ill-defined
assert heavy_logp + hydrogen_logp != [], "logp list of log_probabilities from torsion choices is an empty list"
assert len(heavy_logp + hydrogen_logp) == len(proposal_order), "There is a mismatch in the size of the atom torsion proposals and the associated logps"
#create a list of omitted_bonds tuples
omitted_bonds = []
omitted_bonds_forward_pass = [edge for edge in self._residue_graph.edges() if edge not in list(self._reference_connectivity_graph.edges())]
for omitted_bond in omitted_bonds_forward_pass:
if omitted_bond[::-1] not in list(self._reference_connectivity_graph.edges()):
omitted_bonds.append(omitted_bond)
#delete the residue graph and reference connectivity graph since they cannot be pickled...
del self._residue_graph
del self._reference_connectivity_graph
return proposal_order, heavy_logp + hydrogen_logp, omitted_bonds
def _propose_atoms_in_order(self, atom_group):
"""
Propose a group of atoms along with corresponding torsions and a total log probability for the choice
Parameters
----------
atom_group : list of int
The atoms to propose
Returns
-------
atom_torsions : list of list of int
A list of torsions, where the atom_torsions[0] is the one being proposed
logp : list
The contribution to the overall proposal log probability as a list of sequential logps
"""
atom_torsions= []
logp = []
assert len(atom_group) == len(set(atom_group)), "There are duplicate atom indices in the list of atom proposal indices"
while len(atom_group) > 0:
#initialise an eligible_torsions_list
eligible_torsions_list = list()
for atom_index in atom_group:
# Find the shortest path up to length four from the atom in question:
shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)
# Loop through the destination and path of each path and append to eligible_torsions_list
# if destination has a position and path[1:3] is a subset of atoms with positions
for destination, path in shortest_paths.items():
# Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.
if len(path) != 4 or destination not in self._atoms_with_positions_set:
continue
# If the last atom is in atoms with positions, check to see if the others are also.
# If they are, append the torsion to the list of possible torsions to propose
if set(path[1:3]).issubset(self._atoms_with_positions_set):
eligible_torsions_list.append(path)
assert len(eligible_torsions_list) != 0, "There is a connectivity issue; there are no torsions from which to choose"
#now we have to randomly choose a single torsion
ntorsions = len(eligible_torsions_list)
random_torsion_index = np.random.choice(range(ntorsions))
random_torsion = eligible_torsions_list[random_torsion_index]
#append random torsion to the atom_torsions and remove source atom from the atom_group
chosen_atom_index = random_torsion[0]
first_old_atom_index = random_torsion[1]
atom_torsions.append(random_torsion)
atom_group.remove(chosen_atom_index)
#add atom to atoms with positions and corresponding set
self._atoms_with_positions_set.add(chosen_atom_index)
#add a bond from the new to the previous torsion atom in the _reference_connectivity_graph
self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)
#add the log probability of the choice to logp
logp.append(np.log(1./ntorsions))
# Ensure that logp is not ill-defined
assert len(logp) == len(atom_torsions), "There is a mismatch in the size of the atom torsion proposals and the associated logps"
return atom_torsions, logp
class NoTorsionError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(NoTorsionError, self).__init__(message)
| mit | 60d95a9af6baf5d85aa3a19ac9e7311d | 51.596899 | 327 | 0.633372 | 4.065773 | false | false | false | false |
choderalab/perses | perses/app/relative_hydration.py | 1 | 5178 | import simtk.openmm as openmm
import simtk.unit as unit
from perses.annihilation.relative import HybridTopologyFactory
import numpy as np
from perses.tests.utils import generate_solvated_hybrid_test_topology
kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
temperature = 300.0 * unit.kelvin
kT = kB * temperature
beta = 1.0/kT
def simulate_hybrid(hybrid_system,functions, lambda_value, positions, nsteps=500, timestep=1.0*unit.femtoseconds, temperature=temperature, collision_rate=5.0/unit.picoseconds):
platform = openmm.Platform.getPlatformByName("CUDA")
integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
context = openmm.Context(hybrid_system, integrator, platform)
for parameter in functions.keys():
context.setParameter(parameter, lambda_value)
context.setPositions(positions)
integrator.step(nsteps)
positions = context.getState(getPositions=True, enforcePeriodicBox=True).getPositions(asNumpy=True)
return positions
def check_alchemical_hybrid_elimination_bar(topology_proposal, old_positions, new_positions, ncmc_nsteps=50, n_iterations=50, NSIGMA_MAX=6.0, geometry=False):
"""
Check that the hybrid topology, where both endpoints are identical, returns a free energy within NSIGMA_MAX of 0.
Parameters
----------
topology_proposal
positions
ncmc_nsteps
NSIGMA_MAX
Returns
-------
"""
#TODO this is a test
#this code is out of date
#make the hybrid topology factory:
factory = HybridTopologyFactory(topology_proposal, old_positions, new_positions)
platform = openmm.Platform.getPlatformByName("CUDA")
hybrid_system = factory.hybrid_system
hybrid_topology = factory.hybrid_topology
initial_hybrid_positions = factory.hybrid_positions
#alchemical functions
functions = {
'lambda_sterics' : '2*lambda * step(0.5 - lambda) + (1.0 - step(0.5 - lambda))',
'lambda_electrostatics' : '2*(lambda - 0.5) * step(lambda - 0.5)',
'lambda_bonds' : 'lambda',
'lambda_angles' : 'lambda',
'lambda_torsions' : 'lambda'
}
w_f = np.zeros(n_iterations)
w_r = np.zeros(n_iterations)
#make the alchemical integrators:
forward_integrator = NCMCGHMCAlchemicalIntegrator(temperature, hybrid_system, functions, nsteps=ncmc_nsteps, direction='insert')
forward_context = openmm.Context(hybrid_system, forward_integrator, platform)
print("Minimizing for forward protocol...")
forward_context.setPositions(initial_hybrid_positions)
for parm in functions.keys():
forward_context.setParameter(parm, 0.0)
openmm.LocalEnergyMinimizer.minimize(forward_context, maxIterations=10)
initial_state = forward_context.getState(getPositions=True, getEnergy=True)
print("The initial energy after minimization is %s" % str(initial_state.getPotentialEnergy()))
initial_forward_positions = initial_state.getPositions(asNumpy=True)
equil_positions = simulate_hybrid(hybrid_system,functions, 0.0, initial_forward_positions)
print("Beginning forward protocols")
#first, do forward protocol (lambda=0 -> 1)
for i in range(n_iterations):
equil_positions = simulate_hybrid(hybrid_system, functions, 0.0, equil_positions)
forward_context.setPositions(equil_positions)
forward_integrator.step(ncmc_nsteps)
w_f[i] = -1.0 * forward_integrator.getLogAcceptanceProbability(forward_context)
bar.update(i)
del forward_context, forward_integrator
reverse_integrator = NCMCGHMCAlchemicalIntegrator(temperature, hybrid_system, functions, nsteps=ncmc_nsteps, direction='delete')
print("Minimizing for reverse protocol...")
reverse_context = openmm.Context(hybrid_system, reverse_integrator, platform)
reverse_context.setPositions(initial_hybrid_positions)
for parm in functions.keys():
reverse_context.setParameter(parm, 1.0)
openmm.LocalEnergyMinimizer.minimize(reverse_context, maxIterations=10)
initial_state = reverse_context.getState(getPositions=True, getEnergy=True)
print("The initial energy after minimization is %s" % str(initial_state.getPotentialEnergy()))
initial_reverse_positions = initial_state.getPositions(asNumpy=True)
equil_positions = simulate_hybrid(hybrid_system,functions, 1.0, initial_reverse_positions, nsteps=1000)
#now, reverse protocol
print("Beginning reverse protocols...")
for i in range(n_iterations):
equil_positions = simulate_hybrid(hybrid_system,functions, 1.0, equil_positions)
reverse_context.setPositions(equil_positions)
reverse_integrator.step(ncmc_nsteps)
w_r[i] = -1.0 * reverse_integrator.getLogAcceptanceProbability(reverse_context)
bar.update(i)
del reverse_context, reverse_integrator
from pymbar import BAR
[df, ddf] = BAR(w_f, w_r)
print("df = %12.6f +- %12.5f kT" % (df, ddf))
if __name__=="__main__":
topology_proposal, old_positions, new_positions = generate_solvated_hybrid_test_topology()
check_alchemical_hybrid_elimination_bar(topology_proposal, old_positions, new_positions, ncmc_nsteps=100, n_iterations=500) | mit | 1805e1bef1d432f7f8733dae33069782 | 42.889831 | 176 | 0.72557 | 3.51289 | false | false | false | false |
vmalloc/dessert | dessert/rewrite.py | 1 | 38778 | """Rewrite assertion AST to produce nice error messages"""
# pylint: disable=protected-access,unused-import,logging-not-lazy,duplicate-string-formatting-argument,logging-format-interpolation,too-many-lines
import ast
import errno
import functools
import importlib.abc
import importlib.machinery
import importlib.util
import io
import itertools
import marshal
import os
import struct
import sys
import tokenize
import types
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
import atomicwrites
from . import util
from .util import format_explanation as _format_explanation
from .pathlib import PurePath, fnmatch_ex
import logging
from munch import Munch
from .__version__ import __version__ as version
from .saferepr import saferepr
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.ERROR)
_MARK_ASSERTION_INTROSPECTION = False
# pytest caches rewritten pycs in __pycache__.
PYTEST_TAG = "{}-pytest-{}".format(sys.implementation.cache_tag, version)
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
class AssertRewritingSession(object):
def __init__(self):
self._initialpaths = frozenset()
def isinitpath(self, filename):
return True
ast_Call = ast.Call
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self, config=None):
self.config = config
self.modules = {}
self.session = AssertRewritingSession()
self.state = Munch()
self.fnpats = []
self._rewritten_names = set() # type: Set[str]
self._must_rewrite = set() # type: Set[str]
# flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
# which might result in infinite recursion (#3506)
self._writing_pyc = False
self._basenames_to_check_rewrite = {"conftest"}
self._marked_for_rewrite_cache = {} # type: Dict[str, bool]
self._session_paths_checked = False
def find_spec(self, name, path=None, target=None):
if self._writing_pyc:
return None
state = self.state
if self._early_rewrite_bailout(name, state):
return None
_logger.debug("find_spec called for %s (path: %s, target: %s)" % (name, path, target))
spec = importlib.machinery.PathFinder.find_spec(name, path)
if (
# the import machinery could not find a file to import
spec is None
# this is a namespace package (without `__init__.py`)
# there's nothing to rewrite there
# python3.5 - python3.6: `namespace`
# python3.7+: `None`
or spec.origin in {None, "namespace"}
# we can only rewrite source files
or not isinstance(spec.loader, importlib.machinery.SourceFileLoader)
# if the file doesn't exist, we can't rewrite it
or not os.path.exists(spec.origin)
):
return None
else:
fn = spec.origin
if not self._should_rewrite(name, fn, state):
return None
return importlib.util.spec_from_file_location(
name,
fn,
loader=self,
submodule_search_locations=spec.submodule_search_locations,
)
def create_module(self, spec):
return None # default behaviour is fine
def exec_module(self, module):
fn = module.__spec__.origin
state = self.state
self._rewritten_names.add(module.__name__)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn), "__pycache__")
if write:
ok = try_mkdir(cache_dir)
if not ok:
write = False
_logger.debug("read only directory: {}".format(os.path.dirname(fn)))
cache_name = os.path.basename(fn)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn, pyc, _logger.debug)
if co is None:
_logger.debug("rewriting {!r}".format(fn))
source_stat, co = _rewrite_test(fn, self.config)
if write:
self._writing_pyc = True
try:
_write_pyc(state, co, source_stat, pyc)
finally:
self._writing_pyc = False
else:
_logger.debug("found cached rewritten pyc for {!r}".format(fn))
exec(co, module.__dict__) # pylint: disable=exec-used
def _early_rewrite_bailout(self, name, state):
"""This is a fast way to get out of rewriting modules. Profiling has
shown that the call to PathFinder.find_spec (inside of the find_spec
from this class) is a major slowdown, so, this method tries to
filter what we're sure won't be rewritten before getting to it.
"""
# Dessert couldn't use the pytest implementation for this method, because it removes
# its reformatting
return False
def _should_rewrite(self, name, fn_pypath, state):
return True
def _is_marked_for_rewrite(self, name: str, state):
try:
return self._marked_for_rewrite_cache[name]
except KeyError:
for marked in self._must_rewrite:
if name == marked or name.startswith(marked + "."):
_logger.debug(
"matched marked file {!r} (from {!r})".format(name, marked)
)
self._marked_for_rewrite_cache[name] = True
return True
self._marked_for_rewrite_cache[name] = False
return False
def mark_rewrite(self, *names: str) -> None:
"""Mark import names as needing to be rewritten.
The named module or package as well as any nested modules will
be rewritten on import.
"""
already_imported = (
set(names).intersection(sys.modules).difference(self._rewritten_names)
)
for name in already_imported:
mod = sys.modules[name]
if not AssertionRewriter.is_rewrite_disabled(
mod.__doc__ or ""
) and not isinstance(mod.__loader__, type(self)):
self._warn_already_imported(name)
self._must_rewrite.update(names)
self._marked_for_rewrite_cache.clear()
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_stat, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate.
try:
with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp:
fp.write(importlib.util.MAGIC_NUMBER)
# as of now, bytecode header expects 32-bit numbers for size and mtime (#4903)
mtime = int(source_stat.st_mtime) & 0xFFFFFFFF
size = source_stat.st_size & 0xFFFFFFFF
# "<LL" stands for 2 unsigned longs, little-ending
fp.write(struct.pack("<LL", mtime, size))
fp.write(marshal.dumps(co))
except EnvironmentError as e:
_logger.debug("error writing pyc file at {}: errno={}".format(pyc, e.errno))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
return True
def _rewrite_test(fn, config):
"""read and rewrite *fn* and return the code object."""
stat = os.stat(fn)
with open(fn, "rb") as f:
source = f.read()
tree = ast.parse(source, filename=fn)
rewrite_asserts(tree, source, fn, config)
co = compile(tree, fn, "exec", dont_inherit=True)
return stat, co
def _read_pyc(source, pyc, trace=lambda x: None):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
with fp:
try:
stat_result = os.stat(source)
mtime = int(stat_result.st_mtime)
size = stat_result.st_size
data = fp.read(12)
except EnvironmentError as e:
trace("_read_pyc({}): EnvironmentError {}".format(source, e))
return None
# Check for invalid or out of date pyc file.
if (
len(data) != 12
or data[:4] != importlib.util.MAGIC_NUMBER
or struct.unpack("<LL", data[4:]) != (mtime & 0xFFFFFFFF, size & 0xFFFFFFFF)
):
trace("_read_pyc(%s): invalid or out of date pyc" % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
trace("_read_pyc({}): marshal.load error {}".format(source, e))
return None
if not isinstance(co, types.CodeType):
trace("_read_pyc(%s): not a code object" % source)
return None
return co
def rewrite_asserts(mod, source, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config, source).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
return saferepr(obj).replace("\n", "\\n")
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
replaces = [("\n", "\n~"), ("%", "%%")]
if not isinstance(obj, str):
obj = saferepr(obj)
replaces.append(("\\n", "\n~"))
for r1, r2 in replaces:
obj = obj.replace(r1, r2)
return obj
def _should_repr_global_name(obj):
if callable(obj):
return False
try:
return not hasattr(obj, "__name__")
except Exception:
return True
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if isinstance(explanation, str):
return explanation.replace("%", "%%")
else:
return explanation.replace(b"%", b"%%")
def _call_reprcompare(ops, results, expls, each_obj):
# type: (Tuple[str, ...], Tuple[bool, ...], Tuple[str, ...], Tuple[object, ...]) -> str
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
# pylint: disable=not-callable, undefined-loop-variable
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
def _call_assertion_pass(lineno, orig, expl):
# type: (int, str, str) -> None
# pylint: disable=not-callable
if util._assertion_pass is not None:
util._assertion_pass(lineno, orig, expl)
def _check_if_assertion_pass_impl():
# type: () -> bool
"""Checks if any plugins implement the pytest_assertion_pass hook
in order not to generate explanation unecessarily (might be expensive)"""
return True if util._assertion_pass else False
UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"}
BINOP_MAP = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in",
ast.MatMult: "@",
}
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
def _get_assertion_exprs(src: bytes) -> Dict[int, str]:
"""Returns a mapping from {lineno: "assertion test expression"}"""
ret = {} # type: Dict[int, str]
depth = 0
lines = [] # type: List[str]
assert_lineno = None # type: Optional[int]
seen_lines = set() # type: Set[int]
def _write_and_reset() -> None:
nonlocal depth, lines, assert_lineno, seen_lines
assert assert_lineno is not None
ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\")
depth = 0
lines = []
assert_lineno = None
seen_lines = set()
tokens = tokenize.tokenize(io.BytesIO(src).readline)
for tp, source, (lineno, offset), _, line in tokens:
if tp == tokenize.NAME and source == "assert":
assert_lineno = lineno
elif assert_lineno is not None:
# keep track of depth for the assert-message `,` lookup
if tp == tokenize.OP and source in "([{":
depth += 1
elif tp == tokenize.OP and source in ")]}":
depth -= 1
if not lines:
lines.append(line[offset:])
seen_lines.add(lineno)
# a non-nested comma separates the expression from the message
elif depth == 0 and tp == tokenize.OP and source == ",":
# one line assert with message
if lineno in seen_lines and len(lines) == 1:
offset_in_trimmed = offset + len(lines[-1]) - len(line)
lines[-1] = lines[-1][:offset_in_trimmed]
# multi-line assert with message
elif lineno in seen_lines:
lines[-1] = lines[-1][:offset]
# multi line assert with escapd newline before message
else:
lines.append(line[:offset])
_write_and_reset()
elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}:
_write_and_reset()
elif lines and lineno not in seen_lines:
lines.append(line)
seen_lines.add(lineno)
return ret
class AssertionRewriter(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and rewrite them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it rewrites the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false and calls pytest_assertion_pass hook
if expression is true.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:expl_stmts: The AST statements which will be executed to get
data from the assertion. This is the code which will construct
the detailed assertion message that is used in the AssertionError
or for the pytest_assertion_pass hook.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
This state is reset on every new assert statement visited and used
by the other visitors.
"""
def __init__(self, module_path, config, source):
super().__init__()
self.module_path = module_path
self.config = config
if config is not None:
self.enable_assertion_pass_hook = config.getini(
"enable_assertion_pass_hook"
)
else:
self.enable_assertion_pass_hook = False
self.source = source
@functools.lru_cache(maxsize=1)
def _assert_expr_to_lineno(self):
return _get_assertion_exprs(self.source)
def run(self, mod: ast.Module) -> None:
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
doc = getattr(mod, "docstring", None)
expect_docstring = doc is None
if doc is not None and self.is_rewrite_disabled(doc):
return
pos = 0
lineno = 1
for item in mod.body:
if (
expect_docstring
and isinstance(item, ast.Expr)
and isinstance(item.value, ast.Str)
):
doc = item.value.s
if self.is_rewrite_disabled(doc):
return
expect_docstring = False
elif (
not isinstance(item, ast.ImportFrom)
or item.level > 0
or item.module != "__future__"
):
lineno = item.lineno
break
pos += 1
else:
lineno = item.lineno
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
if sys.version_info < (3, 10):
aliases = [
ast.alias("builtins", "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar"),
]
else:
aliases = [
ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0),
ast.alias("dessert.rewrite", "@dessert_ar", lineno=lineno, col_offset=0),
]
imports = [
ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases
]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod] # type: List[ast.AST]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = [] # type: List
for _, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (
isinstance(field, ast.AST)
# Don't recurse into expressions as they can't contain
# asserts.
and not isinstance(field, ast.expr)
):
nodes.append(field)
@staticmethod
def is_rewrite_disabled(docstring):
return "PYTEST_DONT_REWRITE" in docstring
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call saferepr on the expression."""
return self.helper("_saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, name, ast.Load())
return ast.Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers = {} # type: Dict[str, ast.expr]
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
"""Format the %-formatted string with current format context.
The expl_expr should be an ast.Str instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .expl_stmts and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
if self.enable_assertion_pass_hook:
self.format_variables.append(name)
self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This rewrites the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
from dessert.warning_types import DessertAssertRewriteWarning
import warnings
# Ignore type: typeshed bug https://github.com/python/typeshed/pull/3121
warnings.warn_explicit( # type: ignore
DessertAssertRewriteWarning(
"assertion is always true, perhaps remove parentheses?"
),
category=None,
filename=self.module_path,
lineno=assert_.lineno,
)
self.statements = [] # type: List[ast.stmt]
self.variables = [] # type: List[str]
self.variable_counter = itertools.count()
if self.enable_assertion_pass_hook:
self.format_variables = [] # type: List[str]
self.stack = [] # type: List[Dict[str, ast.expr]]
self.expl_stmts = [] # type: List[ast.stmt]
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# If in a test module, check if directly asserting None, in order to warn [Issue #3191]
if self.module_path is not None:
self.statements.append(
self.warn_about_none_ast(
top_condition, module_path=self.module_path, lineno=assert_.lineno
)
)
if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook
negation = ast.UnaryOp(ast.Not(), top_condition)
msg = self.pop_format_context(ast.Str(explanation))
# Failed
if assert_.msg:
assertmsg = self.helper("_format_assertmsg", assert_.msg)
gluestr = "\n>assert "
else:
assertmsg = ast.Str("")
gluestr = "assert "
err_explanation = ast.BinOp(ast.Str(gluestr), ast.Add(), msg)
err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation)
err_name = ast.Name("AssertionError", ast.Load())
fmt = self.helper("_format_explanation", err_msg)
exc = ast.Call(err_name, [fmt], [])
raise_ = ast.Raise(exc, None)
statements_fail = []
statements_fail.extend(self.expl_stmts)
statements_fail.append(raise_)
# Passed
fmt_pass = self.helper("_format_explanation", msg)
orig = self._assert_expr_to_lineno()[assert_.lineno]
hook_call_pass = ast.Expr(
self.helper(
"_call_assertion_pass",
ast.Num(assert_.lineno),
ast.Str(orig),
fmt_pass,
)
)
# If any hooks implement assert_pass hook
hook_impl_test = ast.If(
self.helper("_check_if_assertion_pass_impl"),
self.expl_stmts + [hook_call_pass],
[],
)
statements_pass = [hook_impl_test]
# Test for assertion condition
main_test = ast.If(negation, statements_fail, statements_pass)
self.statements.append(main_test)
if self.format_variables:
variables = [
ast.Name(name, ast.Store()) for name in self.format_variables
]
clear_format = ast.Assign(variables, ast.NameConstant(None))
self.statements.append(clear_format)
else: # Original assertion rewriting
# Create failure message.
body = self.expl_stmts
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper("_format_assertmsg", assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = "dessert* " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("_format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast.Call(err_name, [fmt], [])
raise_ = ast.Raise(exc, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store()) for name in self.variables]
clear = ast.Assign(variables, ast.NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def warn_about_none_ast(self, node, module_path, lineno):
"""
Returns an AST issuing a warning if the value of node is `None`.
This is used to warn the user when asserting a function that asserts
internally already.
See issue #3191 for more details.
"""
val_is_none = ast.Compare(node, [ast.Is()], [ast.NameConstant(None)])
send_warning = ast.parse(
"""\
from dessert.warning_types import DessertAssertRewriteWarning
from warnings import warn_explicit
warn_explicit(
DessertAssertRewriteWarning('asserting the value None, please use "assert is None"'),
category=None,
filename={filename!r},
lineno={lineno},
)
""".format(
filename=module_path, lineno=lineno
)
).body
return ast.If(val_is_none, send_warning, [])
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("_should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.expl_stmts
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuiting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = [] # type: List[ast.stmt]
# cond is set in a prior loop iteration below
self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa
self.expl_stmts = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast.Call(app, [expl_format], [])
self.expl_stmts.append(ast.Expr(call))
if i < levels:
cond = res # type: ast.expr
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = [] # type: List[ast.stmt]
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.expl_stmts = fail_save
expl_template = self.helper("_format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = UNARY_MAP[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = BINOP_MAP[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "({} {} {})".format(left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call(self, call):
"""
visit `ast.Call` nodes
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: # **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "{}({})".format(func_expl, ", ".join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "{}\n{{{} = {}\n}}".format(res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
new_starred = ast.Starred(res, starred.ctx)
return new_starred, "*" + expl
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp: ast.Compare):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
left_expl = "({})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (ast.Compare, ast.BoolOp)):
next_expl = "({})".format(next_expl)
results.append(next_res)
sym = BINOP_MAP[op.__class__]
syms.append(ast.Str(sym))
expl = "{} {} {}".format(left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper(
"_call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()),
)
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names) # type: ast.expr
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
def try_mkdir(cache_dir):
"""Attempts to create the given directory, returns True if successful"""
try:
os.mkdir(cache_dir)
except FileExistsError:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
return True
except (FileNotFoundError, NotADirectoryError):
# One of the path components was not a directory, likely
# because we're in a zip file.
return False
except PermissionError:
return False
except OSError as e:
# as of now, EROFS doesn't have an equivalent OSError-subclass
if e.errno == errno.EROFS:
return False
raise
return True
| mit | e216ebb0c5900e58bd743a428271a7ee | 37.394059 | 146 | 0.583501 | 4.012624 | false | false | false | false |
vmalloc/dessert | dessert/util.py | 1 | 14757 | """Utilities for assertion debugging"""
import attr
import pprint
import logging
from collections.abc import Sequence
from typing import Callable
from typing import List
from typing import Optional
_logger = logging.getLogger(__name__)
from .conf import conf
from .saferepr import saferepr
if getattr(attr, "__version_info__", ()) >= (19, 2):
ATTRS_EQ_FIELD = "eq"
else:
ATTRS_EQ_FIELD = "cmp"
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None # type: Optional[Callable[[str, object, object], Optional[str]]]
# Works similarly as _reprcompare attribute. Is populated with the hook call
# when pytest_runtest_setup is called.
_assertion_pass = None # type: Optional[Callable[[int, str, str], None]]
def format_explanation(explanation, original_msg=None):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
if not conf.is_message_introspection_enabled() and original_msg:
return original_msg
explanation = explanation
lines = _split_explanation(explanation)
result = _format_lines(lines)
return "\n".join(result)
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or "").split("\n")
lines = [raw_lines[0]]
for values in raw_lines[1:]:
if values and values[0] in ["{", "}", "~", ">"]:
lines.append(values)
else:
lines[-1] += "\\n" + values
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith("{"):
if stackcnt[-1]:
s = "and "
else:
s = "where "
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(" +" + " " * (len(stack) - 1) + s + line[1:])
elif line.startswith("}"):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ["~", ">"]
stack[-1] += 1
indent = len(stack) if line.startswith("~") else len(stack) - 1
result.append(" " * indent + line[1:])
assert len(stack) == 1
return result
def issequence(x):
return isinstance(x, Sequence) and not isinstance(x, str)
def istext(x):
return isinstance(x, str)
def isdict(x):
return isinstance(x, dict)
def isset(x):
return isinstance(x, (set, frozenset))
def isdatacls(obj):
return getattr(obj, "__dataclass_fields__", None) is not None
def isattrs(obj):
return getattr(obj, "__attrs_attrs__", None) is not None
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
maxsize = (80 - 15 - len(op) - 2) // 2 # 15 chars indentation, 1 space around op
left_repr = saferepr(left, maxsize=maxsize)
right_repr = saferepr(right, maxsize=maxsize)
summary = "{} {} {}".format(left_repr, op, right_repr)
verbose = config.getoption("verbose")
explanation = None
try:
if op == "==":
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
elif type(left) == type(right) and (isdatacls(left) or isattrs(left)): # pylint: disable=unidiomatic-typecheck
type_fn = (isdatacls, isattrs)
explanation = _compare_eq_cls(left, right, verbose, type_fn)
elif verbose > 0:
explanation = _compare_eq_verbose(left, right)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == "not in":
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
_logger.exception("dessert: representation of details failed. "
"Probably an object has a faulty __repr__.")
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=0):
"""Return the explanation for the diff between text or bytes.
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = [] # type: List[str]
def escape_for_readable_diff(binary_text):
"""
Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode.
This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape
newlines and carriage returns (#429).
"""
r = str(repr(binary_text)[1:-1])
r = r.replace(r"\n", "\n")
r = r.replace(r"\r", "\r")
return r
if isinstance(left, bytes):
left = escape_for_readable_diff(left)
if isinstance(right, bytes):
right = escape_for_readable_diff(right)
if verbose < 1:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [
"Skipping %s identical leading characters in diff, use -v to show" % i
]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [
"Skipping {} identical trailing "
"characters in diff, use -v to show".format(i)
]
left = left[:-i]
right = right[:-i]
keepends = True
if left.isspace() or right.isspace():
left = repr(str(left))
right = repr(str(right))
explanation += ["Strings contain only whitespace, escaping them using repr()"]
explanation += [
line.strip("\n")
for line in ndiff(left.splitlines(keepends), right.splitlines(keepends))
]
return explanation
def _compare_eq_verbose(left, right):
keepends = True
left_lines = repr(left).splitlines(keepends)
right_lines = repr(right).splitlines(keepends)
explanation = [] # type: List[str]
explanation += ["-" + line for line in left_lines]
explanation += ["+" + line for line in right_lines]
return explanation
def _compare_eq_iterable(left, right, verbose=0):
if not verbose:
return ["Use -v to get the full diff"]
# dynamic import to speedup pytest
import difflib
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = ["Full diff:"]
explanation.extend(
line.strip() for line in difflib.ndiff(left_formatting, right_formatting)
)
return explanation
def _compare_eq_iterable(left, right, verbose=0):
if not verbose:
return ["Use -v to get the full diff"]
# dynamic import to speedup pytest
import difflib
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = ["Full diff:"]
explanation.extend(
line.strip() for line in difflib.ndiff(left_formatting, right_formatting)
)
return explanation
def _compare_eq_sequence(left, right, verbose=0):
comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)
explanation = [] # type: List[str]
len_left = len(left)
len_right = len(right)
for i in range(min(len_left, len_right)):
if left[i] != right[i]:
if comparing_bytes:
# when comparing bytes, we want to see their ascii representation
# instead of their numeric values (#5260)
# using a slice gives us the ascii representation:
# >>> s = b'foo'
# >>> s[0]
# 102
# >>> s[0:1]
# b'f'
left_value = left[i : i + 1]
right_value = right[i : i + 1]
else:
left_value = left[i]
right_value = right[i]
explanation += [
"At index {} diff: {!r} != {!r}".format(i, left_value, right_value)
]
break
if comparing_bytes:
# when comparing bytes, it doesn't help to show the "sides contain one or more items"
# longer explanation, so skip it
return explanation
len_diff = len_left - len_right
if len_diff:
if len_diff > 0:
dir_with_more = "Left"
extra = saferepr(left[len_right])
else:
len_diff = 0 - len_diff
dir_with_more = "Right"
extra = saferepr(right[len_left])
if len_diff == 1:
explanation += [
"{} contains one more item: {}".format(dir_with_more, extra)
]
else:
explanation += [
"%s contains %d more items, first extra item: %s"
% (dir_with_more, len_diff, extra)
]
return explanation
def _compare_eq_set(left, right, verbose=0):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append("Extra items in the left set:")
for item in diff_left:
explanation.append(saferepr(item))
if diff_right:
explanation.append("Extra items in the right set:")
for item in diff_right:
explanation.append(saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = [] # type: List[str]
set_left = set(left)
set_right = set(right)
common = set_left.intersection(set_right)
same = {k: left[k] for k in common if left[k] == right[k]}
if same and verbose < 2:
explanation += ["Omitting %s identical items, use -vv to show" % len(same)]
elif same:
explanation += ["Common items:"]
explanation += pprint.pformat(same).splitlines()
diff = {k for k in common if left[k] != right[k]}
if diff:
explanation += ["Differing items:"]
for k in diff:
explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})]
extra_left = set_left - set_right
len_extra_left = len(extra_left)
if len_extra_left:
explanation.append(
"Left contains %d more item%s:"
% (len_extra_left, "" if len_extra_left == 1 else "s")
)
explanation.extend(
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
)
extra_right = set_right - set_left
len_extra_right = len(extra_right)
if len_extra_right:
explanation.append(
"Right contains %d more item%s:"
% (len_extra_right, "" if len_extra_right == 1 else "s")
)
explanation.extend(
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
)
return explanation
def _compare_eq_cls(left, right, verbose, type_fns):
isdatacls, isattrs = type_fns
if isdatacls(left):
all_fields = left.__dataclass_fields__
fields_to_check = [field for field, info in all_fields.items() if info.compare]
elif isattrs(left):
all_fields = left.__attrs_attrs__
fields_to_check = [
field.name for field in all_fields if getattr(field, ATTRS_EQ_FIELD)
]
same = []
diff = []
for field in fields_to_check:
if getattr(left, field) == getattr(right, field):
same.append(field)
else:
diff.append(field)
explanation = []
if same and verbose < 2:
explanation.append("Omitting %s identical items, use -vv to show" % len(same))
elif same:
explanation += ["Matching attributes:"]
explanation += pprint.pformat(same).splitlines()
if diff:
explanation += ["Differing attributes:"]
for field in diff:
explanation += [
("%s: %r != %r") % (field, getattr(left, field), getattr(right, field))
]
return explanation
def _notin_text(term, text, verbose=0):
index = text.find(term)
head = text[:index]
tail = text[index + len(term) :]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = ["%s is contained here:" % saferepr(term, maxsize=42)]
for line in diff:
if line.startswith("Skipping"):
continue
if line.startswith("- "):
continue
if line.startswith("+ "):
newdiff.append(" " + line[2:])
else:
newdiff.append(line)
return newdiff
| mit | 8dee54d0f5a2359cd3194bc8f50adacb | 32.462585 | 127 | 0.576133 | 3.998103 | false | false | false | false |
learningequality/ricecooker | tests/test_pdfutils.py | 1 | 12165 | import os
import re
from tempfile import TemporaryDirectory
import pytest
import requests
from PyPDF2 import PdfFileReader
from ricecooker.utils.pdf import PDFParser # SIT
# Fixtures
################################################################################
@pytest.fixture
def downloads_dir():
with TemporaryDirectory() as temp_dir:
yield temp_dir
@pytest.fixture
def doc1_with_toc_path():
doc1_with_toc_path = os.path.join(
"tests", "testcontent", "samples", "sample_doc_with_toc.pdf"
)
assert os.path.exists(doc1_with_toc_path), (
"Error mising test file " + doc1_with_toc_path
)
return doc1_with_toc_path
def _save_file_url_to_path(url, path):
if not os.path.exists(path):
with open(path, "wb") as f:
resp = requests.get(url, stream=True)
for chunk in resp.iter_content(chunk_size=1048576):
f.write(chunk)
f.flush()
assert os.path.exists(path), "Error mising test file " + path
@pytest.fixture
def doc2_with_toc_path():
"""
A PDF with lots of chapters.
"""
doc2_with_toc_path = os.path.join(
"tests", "testcontent", "downloaded", "Beyond-Good-and-Evil-Galbraithcolor.pdf"
)
_save_file_url_to_path(
"https://s3-us-west-2.amazonaws.com/pressbooks-samplefiles/"
"GalbraithColorTheme/Beyond-Good-and-Evil-Galbraithcolor.pdf",
doc2_with_toc_path,
)
assert os.path.exists(doc2_with_toc_path), (
"Error mising test file " + doc2_with_toc_path
)
return doc2_with_toc_path
@pytest.fixture
def doc3_with_toc_path():
"""
A Gutenberg textbook PDF with a chapter-subchapter structure.
"""
doc3_with_toc_path = os.path.join(
"tests", "testcontent", "downloaded", "41568-pdf.pdf"
)
_save_file_url_to_path(
"https://www.gutenberg.org/files/41568/41568-pdf.pdf", doc3_with_toc_path
)
assert os.path.exists(doc3_with_toc_path), (
"Error mising test file " + doc3_with_toc_path
)
return doc3_with_toc_path
# Chapters only
################################################################################
def test_get_toc(doc1_with_toc_path, downloads_dir):
with PDFParser(doc1_with_toc_path, directory=downloads_dir) as pdfparser:
chapters_toc = pdfparser.get_toc()
for chapter_dict in chapters_toc:
_check_pagerange_matches_title_len(chapter_dict)
def test_split_chapters(doc1_with_toc_path, downloads_dir):
with PDFParser(doc1_with_toc_path, directory=downloads_dir) as pdfparser:
chapters = pdfparser.split_chapters()
# pprint(chapters)
for chapter in chapters:
chapter_path = chapter["path"]
assert chapter_path.endswith(".pdf"), "wrong extension -- expected .pdf"
assert os.path.exists(chapter_path), "missing split PDF file"
_check_path_matches_title_len(chapter)
def test_split_chapters_alt(doc1_with_toc_path, downloads_dir):
with PDFParser(doc1_with_toc_path, directory=downloads_dir) as pdfparser:
chapters_toc = pdfparser.get_toc()
chapters = pdfparser.split_chapters(jsondata=chapters_toc)
# pprint(chapters)
for chapter in chapters:
chapter_path = chapter["path"]
assert chapter_path.endswith(".pdf"), "wrong extension -- expected .pdf"
assert os.path.exists(chapter_path), "missing split PDF file"
_check_path_matches_title_len(chapter)
def test_split_chapters2(doc2_with_toc_path, downloads_dir):
with PDFParser(doc2_with_toc_path, directory=downloads_dir) as pdfparser:
chapters = pdfparser.split_chapters()
# pprint(chapters)
for chapter in chapters:
chapter_path = chapter["path"]
assert chapter_path.endswith(".pdf"), "wrong extension -- expected .pdf"
assert os.path.exists(chapter_path), "missing split PDF file"
#
assert _get_pdf_len(chapters[0]) == 2, "wrong length for ch " + str(chapters[0])
assert _get_pdf_len(chapters[1]) == 2, "wrong length for ch " + str(chapters[1])
assert _get_pdf_len(chapters[2]) == 4, "wrong length for ch " + str(chapters[2])
assert _get_pdf_len(chapters[3]) == 21, "wrong length for ch " + str(
chapters[3]
)
assert _get_pdf_len(chapters[4]) == 19, "wrong length for ch " + str(
chapters[4]
)
assert _get_pdf_len(chapters[5]) == 16, "wrong length for ch " + str(
chapters[5]
)
assert _get_pdf_len(chapters[6]) == 9, "wrong length for ch " + str(chapters[6])
assert _get_pdf_len(chapters[7]) == 21, "wrong length for ch " + str(
chapters[7]
)
assert _get_pdf_len(chapters[8]) == 18, "wrong length for ch " + str(
chapters[8]
)
assert _get_pdf_len(chapters[9]) == 23, "wrong length for ch " + str(
chapters[9]
)
assert _get_pdf_len(chapters[10]) == 23, "wrong length for ch " + str(
chapters[10]
)
assert _get_pdf_len(chapters[11]) == 30, "wrong length for ch " + str(
chapters[11]
)
assert _get_pdf_len(chapters[12]) == 4, "wrong length for ch " + str(
chapters[12]
)
def test_split_chapters3(doc3_with_toc_path, downloads_dir):
# print(doc3_with_toc_path)
with PDFParser(doc3_with_toc_path, directory=downloads_dir) as pdfparser:
chapters = pdfparser.split_chapters()
# pprint(chapters)
for chapter in chapters:
chapter_path = chapter["path"]
assert chapter_path.endswith(".pdf"), "wrong extension -- expected .pdf"
assert os.path.exists(chapter_path), "missing split PDF file"
assert _get_pdf_len(chapters[0]) == 1, "wrong length for ch " + str(
chapters[0]
)
assert _get_pdf_len(chapters[1]) == 1, "wrong length for ch " + str(
chapters[1]
)
assert _get_pdf_len(chapters[2]) == 2, "wrong length for ch " + str(
chapters[2]
)
assert _get_pdf_len(chapters[3]) == 206, "wrong length for ch " + str(
chapters[3]
)
assert _get_pdf_len(chapters[4]) == 9, "wrong length for ch " + str(
chapters[4]
)
assert _get_pdf_len(chapters[5]) == 9, "wrong length for ch " + str(
chapters[5]
)
# print('assert _get_pdf_len(chapters[]) ==', str(_get_pdf_len(chapter))+', \'wrong length for ch \' + str(chapters[])')
# Chapters and subchapters
################################################################################
def test_get_toc_subchapters(doc1_with_toc_path, downloads_dir):
with PDFParser(doc1_with_toc_path, directory=downloads_dir) as pdfparser:
chapters_toc = pdfparser.get_toc(subchapters=True)
for chapter_dict in chapters_toc:
if "children" in chapter_dict and chapter_dict["children"]:
for subchapter_dict in chapter_dict["children"]:
_check_pagerange_matches_title_len(subchapter_dict)
else:
_check_pagerange_matches_title_len(chapter_dict)
def test_split_subchapters(doc1_with_toc_path, downloads_dir):
with PDFParser(doc1_with_toc_path, directory=downloads_dir) as pdfparser:
chapters = pdfparser.split_subchapters()
# pprint(chapters)
for ch in chapters[0:4]:
assert "children" not in ch, "first four chapters have no subchapters..."
assert _get_pdf_len(chapters[0]) == 1, "wrong num pages in " + str(chapters[0])
assert _get_pdf_len(chapters[1]) == 1, "wrong num pages in " + str(chapters[1])
assert _get_pdf_len(chapters[2]) == 2, "wrong num pages in " + str(chapters[2])
assert _get_pdf_len(chapters[3]) == 3, "wrong num pages in " + str(chapters[3])
ch4 = chapters[4]
assert "children" in ch4, "no children"
assert len(ch4["children"]) == 2
assert _get_pdf_len(ch4["children"][0]) == 1, "wrong num pages in " + str(
ch4["children"][0]
)
assert _get_pdf_len(ch4["children"][1]) == 1, "wrong num pages in " + str(
ch4["children"][1]
)
ch5 = chapters[5]
assert "children" in ch5, "no children"
assert len(ch5["children"]) == 3
assert _get_pdf_len(ch5["children"][0]) == 1, "wrong num pages in " + str(
ch5["children"][0]
)
assert _get_pdf_len(ch5["children"][1]) == 1, "wrong num pages in " + str(
ch5["children"][1]
)
assert _get_pdf_len(ch5["children"][2]) == 1, "wrong num pages in " + str(
ch5["children"][2]
)
def test_split_subchapters3(doc3_with_toc_path, downloads_dir):
with PDFParser(doc3_with_toc_path, directory=downloads_dir) as pdfparser:
chapters = pdfparser.split_subchapters()
ch3 = chapters[3]
assert "children" in ch3, "no subchapters found in ch3"
assert len(ch3["children"]) == 17, "wrong number of subchapters"
subchs = ch3["children"]
assert _get_pdf_len(subchs[0]) == 6, "wrong length for subch " + str(subchs[0])
assert _get_pdf_len(subchs[1]) == 8, "wrong length for subch " + str(subchs[1])
assert _get_pdf_len(subchs[2]) == 14, "wrong length for subch " + str(subchs[2])
assert _get_pdf_len(subchs[3]) == 14, "wrong length for subch " + str(subchs[3])
assert _get_pdf_len(subchs[4]) == 11, "wrong length for subch " + str(subchs[4])
assert _get_pdf_len(subchs[5]) == 13, "wrong length for subch " + str(subchs[5])
assert _get_pdf_len(subchs[6]) == 13, "wrong length for subch " + str(subchs[6])
assert _get_pdf_len(subchs[7]) == 10, "wrong length for subch " + str(subchs[7])
assert _get_pdf_len(subchs[8]) == 13, "wrong length for subch " + str(subchs[8])
assert _get_pdf_len(subchs[9]) == 15, "wrong length for subch " + str(subchs[9])
assert _get_pdf_len(subchs[10]) == 16, "wrong length for subch " + str(
subchs[10]
)
assert _get_pdf_len(subchs[11]) == 7, "wrong length for subch " + str(
subchs[11]
)
assert _get_pdf_len(subchs[12]) == 18, "wrong length for subch " + str(
subchs[12]
)
assert _get_pdf_len(subchs[13]) == 20, "wrong length for subch " + str(
subchs[13]
)
assert _get_pdf_len(subchs[14]) == 15, "wrong length for subch " + str(
subchs[14]
)
assert _get_pdf_len(subchs[15]) == 8, "wrong length for subch " + str(
subchs[15]
)
assert _get_pdf_len(subchs[16]) == 5, "wrong length for subch " + str(
subchs[16]
)
# Test helpers
################################################################################
def _get_pdf_len(str_or_dict_with_path_attr):
if isinstance(str_or_dict_with_path_attr, str):
path = str_or_dict_with_path_attr
else:
path = str_or_dict_with_path_attr["path"]
with open(path, "rb") as pdffile:
pdf = PdfFileReader(pdffile)
return pdf.numPages
def _check_pagerange_matches_title_len(pagerange):
# print(chapter_dict)
title = pagerange["title"]
m = re.search(r"\(len=(?P<len>\d*)\)", title)
assert m, "no len=?? found in title"
len_expected = int(m.groupdict()["len"])
len_observed = pagerange["page_end"] - pagerange["page_start"]
assert len_observed == len_expected, "Wrong page_range len detected in " + str(
pagerange
)
def _check_path_matches_title_len(chapter_dict):
# print(chapter_dict)
title = chapter_dict["title"]
m = re.search(r"\(len=(?P<len>\d*)\)", title)
assert m, "no len=?? found in title"
len_expected = int(m.groupdict()["len"])
len_observed = _get_pdf_len(chapter_dict["path"])
assert len_observed == len_expected, "Wrong len detected in " + str(chapter_dict)
| mit | 79eaa0459fa8d477b49dbeb30208e9a4 | 38.496753 | 132 | 0.573942 | 3.362355 | false | true | false | false |
learningequality/ricecooker | ricecooker/utils/corrections.py | 1 | 25289 | #!/usr/bin/env python
import argparse
import copy
import csv
import json
import os
from datetime import datetime
import dictdiffer
import requests
from ricecooker.config import LOGGER
from ricecooker.utils.libstudio import StudioApi
# CONFIG CONSTANTS for data directories
################################################################################
STUDIO_CREDENTIALS = "credentials/studio.json"
CHEFDATA_DIR = "chefdata"
STUDIO_TREES_DIR = os.path.join(CHEFDATA_DIR, "studiotrees")
if not os.path.exists(STUDIO_TREES_DIR):
os.makedirs(STUDIO_TREES_DIR)
CORRECTIONS_DIR = os.path.join(CHEFDATA_DIR, "corrections")
if not os.path.exists(CORRECTIONS_DIR):
os.makedirs(CORRECTIONS_DIR)
# CORRECTIONS STRUCTURE v0.2
################################################################################
ACTION_KEY = "Action"
NODE_ID_KEY = "Node ID"
CONTENT_ID_KEY = "Content ID"
PATH_KEY = "Path"
CONTENT_KIND_KEY = "Content Kind"
OLD_TITLE_KEY = "Old Title"
NEW_TITLE_KEY = "New Title"
OLD_DESCR_KEY = "Old Description"
NEW_DESCR_KEY = "New Description"
OLD_TAGS_KEY = "Old Tags"
NEW_TAGS_KEY = "New Tags"
OLD_COPYRIGHT_HOLDER_KEY = "Old Copyright Holder"
NEW_COPYRIGHT_HOLDER_KEY = "New Copyright Holder"
OLD_AUTHOR_KEY = "Old Author"
NEW_AUTHOR_KEY = "New Author"
CORRECTIONS_HEADER = [
ACTION_KEY,
NODE_ID_KEY,
CONTENT_ID_KEY,
PATH_KEY,
CONTENT_KIND_KEY,
OLD_TITLE_KEY,
NEW_TITLE_KEY,
OLD_DESCR_KEY,
NEW_DESCR_KEY,
OLD_TAGS_KEY,
NEW_TAGS_KEY,
OLD_COPYRIGHT_HOLDER_KEY,
NEW_COPYRIGHT_HOLDER_KEY,
OLD_AUTHOR_KEY,
NEW_AUTHOR_KEY,
]
# What columns to export metadata to...
TARGET_COLUMNS = {
"title": [OLD_TITLE_KEY, NEW_TITLE_KEY],
"description": [OLD_DESCR_KEY, NEW_DESCR_KEY],
"tags": [OLD_TAGS_KEY, NEW_TAGS_KEY],
"copyright_holder": [OLD_COPYRIGHT_HOLDER_KEY, NEW_COPYRIGHT_HOLDER_KEY],
"author": [OLD_AUTHOR_KEY, NEW_AUTHOR_KEY],
}
# default_keys = ['node_id', 'content_id'] # 'studio_id', 'source_id']
default_export = ["title", "description", "tags", "copyright_holder", "author"]
# Studio Tree Local Cache queries
################################################################################
def get_channel_tree(api, channel_id, suffix="", update=True):
"""
Downloads the entire main tree of a Studio channel to a local json file.
"""
filename = os.path.join(STUDIO_TREES_DIR, channel_id + suffix + ".json")
if os.path.exists(filename) and not update:
print(" Loading cached tree for channel_id=", channel_id, "from", filename)
channel_tree = json.load(open(filename, "r"))
return channel_tree
else:
print(
" Downloading tree for channel_id=", channel_id, " and saving to", filename
)
root_studio_id = api.get_channel_root_studio_id(channel_id)
# next step takes long since recursively making O(n) API calls!
channel_tree = api.get_tree_for_studio_id(root_studio_id)
json.dump(
channel_tree,
open(filename, "w"),
indent=4,
ensure_ascii=False,
sort_keys=True,
)
return channel_tree
def print_channel_tree(channel_tree):
"""
Print tree structure.
"""
def print_tree(subtree, indent=""):
kind = subtree.get("kind", "topic") # topic default to handle channel root
if kind == "exercise":
print(
indent,
subtree["title"],
"kind=",
subtree["kind"],
len(subtree["assessment_items"]),
"questions",
len(subtree["files"]),
"files",
)
else:
print(
indent,
subtree["title"],
"kind=",
subtree["kind"],
len(subtree["files"]),
"files",
)
for child in subtree["children"]:
print_tree(child, indent=indent + " ")
print_tree(channel_tree)
# CORECTIONS EXPORT
################################################################################
class CorretionsCsvFileExporter(object):
def __init__(
self, csvfilepath="corrections-export.csv", exportattrs=default_export
):
self.csvfilepath = csvfilepath
self.exportattrs = exportattrs
def download_channel_tree(self, api, channel_id):
"""
Downloads a complete studio channel_tree from the Studio API.
"""
channel_tree = get_channel_tree(api, channel_id, suffix="-export")
return channel_tree
# Export CSV metadata from external corrections
############################################################################
def export_channel_tree_as_corrections_csv(self, channel_tree):
"""
Create rows in corrections.csv from a Studio channel, specified based on
node_id and content_id.
"""
file_path = self.csvfilepath
if os.path.exists(file_path):
print("Overwriting previous export", file_path)
with open(file_path, "w") as csv_file:
csvwriter = csv.DictWriter(csv_file, CORRECTIONS_HEADER)
csvwriter.writeheader()
def _write_subtree(path_tuple, subtree, is_root=False):
# print(' '*len(path_tuple) + ' - ', subtree['title'])
kind = subtree["kind"]
# TOPIC ############################################################
if kind == "topic":
if is_root:
self.write_topic_row_from_studio_dict(
path_tuple, subtree, is_root=is_root
)
for child in subtree["children"]:
_write_subtree(path_tuple, child)
else:
self.write_topic_row_from_studio_dict(path_tuple, subtree)
for child in subtree["children"]:
_write_subtree(path_tuple + [subtree["title"]], child)
# CONTENT NODES ####################################################
elif kind in ["video", "audio", "document", "html5"]:
self.write_content_row_from_studio_dict(path_tuple, subtree)
# EXERCISE NODES ###################################################
# elif kind == 'exercise':
# content_id = subtree['content_id']
# self.write_exercice_row_from_studio_dict(path_tuple, subtree, content_id)
# for question_dict in subtree['assessment_items']:
# self.write_question_row_from_question_dict(source_id, question_dict)
else:
print(">>>>> skipping node", subtree["title"])
path_tuple = []
_write_subtree(path_tuple, channel_tree, is_root=True)
def write_common_row_attributes_from_studio_dict(self, row, studio_dict):
# 1. IDENTIFIERS
row[NODE_ID_KEY] = studio_dict["node_id"]
row[CONTENT_ID_KEY] = studio_dict["content_id"]
# PATH_KEY is set in specific function
row[CONTENT_KIND_KEY] = studio_dict["kind"]
# 2. METADATA
for exportattr in self.exportattrs:
target_cols = TARGET_COLUMNS[exportattr]
for target_col in target_cols:
if exportattr == "tags":
tags = studio_dict["tags"]
tags_semicolon_separated = ";".join(tags)
row[target_col] = tags_semicolon_separated
else:
row[target_col] = studio_dict[exportattr]
def write_topic_row_from_studio_dict(self, path_tuple, studio_dict, is_root=False):
if is_root:
return
print(
"Generating corrections-export.csv rows for path_tuple ",
path_tuple,
studio_dict["title"],
)
file_path = self.csvfilepath
with open(file_path, "a") as csv_file:
csvwriter = csv.DictWriter(csv_file, CORRECTIONS_HEADER)
title = studio_dict["title"]
path_with_self = "/".join(path_tuple + [title])
topic_row = {}
self.write_common_row_attributes_from_studio_dict(topic_row, studio_dict)
# WRITE TOPIC ROW
topic_row[PATH_KEY] = path_with_self
csvwriter.writerow(topic_row)
def write_content_row_from_studio_dict(self, path_tuple, studio_dict):
file_path = self.csvfilepath
with open(file_path, "a") as csv_file:
csvwriter = csv.DictWriter(csv_file, CORRECTIONS_HEADER)
row = {}
self.write_common_row_attributes_from_studio_dict(row, studio_dict)
title = studio_dict["title"]
row[PATH_KEY] = "/".join(path_tuple + [title])
# WRITE ROW
csvwriter.writerow(row)
# CSV CORRECTIONS LOADERS
################################################################################
def save_gsheet_to_local_csv(gsheet_id, gid, csvfilepath="corrections-import.csv"):
GSHEETS_BASE = "https://docs.google.com/spreadsheets/d/"
SHEET_CSV_URL = GSHEETS_BASE + gsheet_id + "/export?format=csv&gid=" + gid
print(SHEET_CSV_URL)
response = requests.get(SHEET_CSV_URL)
csv_data = response.content.decode("utf-8")
with open(csvfilepath, "w") as csvfile:
csvfile.write(csv_data)
print("Succesfully saved " + csvfilepath)
return csvfilepath
def _clean_dict(row):
"""
Transform empty strings values of dict `row` to None.
"""
row_cleaned = {}
for key, val in row.items():
if val is None or val == "":
row_cleaned[key] = None
else:
row_cleaned[key] = val.strip()
return row_cleaned
def load_corrections_from_csv(csvfilepath):
csv_path = csvfilepath # download_structure_csv()
struct_list = []
with open(csv_path, "r") as csvfile:
reader = csv.DictReader(csvfile, fieldnames=CORRECTIONS_HEADER)
next(reader) # Skip Headers row
for row in reader:
clean_row = _clean_dict(row)
struct_list.append(clean_row)
return struct_list
def get_csv_corrections(csvfilepath):
"""
Return a GROUP BY `corrkind` dictionary of rows from the CSV file.
"""
modifications = []
deletions = []
rows = load_corrections_from_csv(csvfilepath)
for i, row in enumerate(rows):
if row[ACTION_KEY] == "" or row[ACTION_KEY] is None:
print("Skipping no-action row", i + 1)
elif row[ACTION_KEY] == "modify":
modifications.append(row)
elif row[ACTION_KEY] == "delete":
deletions.append(row)
else:
print("Uknown Action", row[ACTION_KEY])
return {"modifications": modifications, "deletions": deletions}
def get_corrections_by_node_id(csvfilepath, modifyattrs):
"""
Convert CSV to internal representaiton of corrections as dicts by node_id.
"""
corrections_by_node_id = {
"nodes_modified": {},
"nodes_added": {},
"nodes_deleted": {},
"nodes_moved": {},
}
csv_corrections = get_csv_corrections(csvfilepath) # CSV rows GROUP BY corrkind
#
# Modifications
for row in csv_corrections["modifications"]:
node_id = row[NODE_ID_KEY]
# print('Found MODIFY row of CSV for node_id', node_id)
#
# find all modified attributes
attributes = {}
for attr in modifyattrs:
# print('Found MODIFY', attr, 'in row of CSV for node_id', node_id)
old_key = TARGET_COLUMNS[attr][0]
new_key = TARGET_COLUMNS[attr][1]
if row[new_key] == row[old_key]: # skip if the same
continue
else:
attributes[attr] = {
"changed": True,
"value": row[new_key],
"old_value": row[old_key],
}
# prepare modifications_dict
modifications_dict = {"attributes": attributes}
# add to to corrections_by_node_id
corrections_by_node_id["nodes_modified"][node_id] = modifications_dict
#
# Deletions
for row in csv_corrections["deletions"]:
node_id = row[NODE_ID_KEY]
# print('Found DELETE row in CSV for node_id', node_id)
corrections_by_node_id["nodes_deleted"][node_id] = {"node_id": node_id}
#
# TODO: Additions
# TODO: Moves
datetimesuffix = datetime.now().strftime("%Y-%m-%d__%H%M")
correctionspath = os.path.join(
CORRECTIONS_DIR, "imported-" + datetimesuffix + ".json"
)
json.dump(
corrections_by_node_id,
open(correctionspath, "w"),
indent=4,
ensure_ascii=False,
sort_keys=True,
)
#
return correctionspath
# Tree querying API
################################################################################
def find_nodes_by_attr(subtree, attr, value):
"""
Returns list of nodes in `subtree` that have attribute `attr` equal to `value`.
"""
results = []
if subtree[attr] == value:
results.append(subtree)
if "children" in subtree:
for child in subtree["children"]:
child_restuls = find_nodes_by_attr(child, attr, value)
results.extend(child_restuls)
return results
def find_nodes_by_content_id(subtree, content_id):
return find_nodes_by_attr(subtree, "content_id", content_id)
def find_nodes_by_node_id(subtree, node_id):
return find_nodes_by_attr(subtree, "node_id", node_id)
def find_nodes_by_original_source_node_id(subtree, original_source_node_id):
return find_nodes_by_attr(
subtree, "original_source_node_id", original_source_node_id
)
def unresolve_children(node):
"""
Return copy of node with children = list of studio_id references instead of full data.
"""
node = copy.deepcopy(node)
if "children" in node:
new_children = []
for child in node["children"]:
new_children.append(child["id"])
node["children"] = new_children
return node
# SPECIAL REMAP NEEDED FOR ALDARYN CORRECTIONS
################################################################################
def remap_original_source_node_id_to_node_id(
channel_tree, corrections_by_original_source_node_id
):
ALL_COORECTIONS_KINDS = [
"nodes_modified",
"nodes_added",
"nodes_deleted",
"nodes_moved",
]
corrections_by_node_id = {}
for correction_kind in ALL_COORECTIONS_KINDS:
if correction_kind in corrections_by_original_source_node_id:
corrections_by_node_id[correction_kind] = {}
corrections_dict = corrections_by_original_source_node_id[correction_kind]
for original_source_node_id, correction in corrections_dict.items():
results = find_nodes_by_original_source_node_id(
channel_tree, original_source_node_id
)
assert results, "no match found based on original_source_node_id search"
assert len(results) == 1, "multiple matches found..."
tree_node = results[0]
node_id = tree_node["node_id"]
corrections_by_node_id[correction_kind][node_id] = correction
return corrections_by_node_id
# CORRECTIONS API CALLS
################################################################################
def apply_modifications_for_node_id(api, channel_tree, node_id, modifications_dict):
"""
Given a modification dict of the form,
modifications_dict = {
'attributes': {
'title': {
'changed': (bool),
'value': (str),
'old_value': (str),
},
'files': ([{
'filename': (str),
'file_size': (int),
'preset': (str)
}]),
'assessment_items': ([AssessmentItem]),
'tags': ([Tag]),
...
}
}
this function will make obtain GET the current node data from Studio API,
apply the modifications to the local json data, then PUT the data on Studio.
"""
# print('MODIFYING node_id=', node_id)
results = find_nodes_by_node_id(channel_tree, node_id)
assert results, "no match found based on node_id search"
assert len(results) == 1, "multiple matches found..."
tree_node = results[0]
studio_id = tree_node["id"]
# node_before = unresolve_children(tree_node)
node_before = api.get_contentnode(studio_id)
# print('node_before', node_before)
# PREPARE data for PUT request (starting form copy of old)
data = {}
ATTRS_TO_COPY = ["kind", "id", "tags", "prerequisite", "parent"]
for attr in ATTRS_TO_COPY:
data[attr] = node_before[attr]
#
# ADD new_values modified
modifications = modifications_dict["attributes"]
for attr, values_diff in modifications.items():
if values_diff["changed"]:
current_value = node_before[attr]
expected_old_value = values_diff["old_value"]
new_value = values_diff["value"]
if expected_old_value == new_value: # skip if the same
continue
if current_value != expected_old_value:
print(
"WARNING expected old value",
expected_old_value,
"for",
attr,
"but current node value is",
current_value,
)
# print('Changing current_value', current_value, 'for', attr, 'to new value', new_value)
data[attr] = new_value
else:
print("Skipping attribute", attr, "because key changed==False")
# PUT
print("PUT studio_id=", studio_id, "node_id=", node_id)
response_data = api.put_contentnode(data)
# Check what changed
node_after = api.get_contentnode(studio_id)
diffs = list(dictdiffer.diff(node_before, node_after))
print(" diff=", diffs)
return response_data
def apply_deletion_for_node_id(api, channel_tree, channel_id, node_id, deletion_dict):
results = find_nodes_by_node_id(channel_tree, node_id)
assert results, "no match found based on node_id search"
assert len(results) == 1, "multiple matches found..."
tree_node = results[0]
studio_id = tree_node["id"]
# node_before = unresolve_children(tree_node)
node_before = api.get_contentnode(studio_id)
# PREPARE data for DLETE request
data = {}
data["id"] = node_before["id"]
# DELETE
print("DELETE studio_id=", studio_id, "node_id=", node_id)
response_data = api.delete_contentnode(data, channel_id)
# Check what changed
node_after = api.get_contentnode(studio_id)
diffs = list(dictdiffer.diff(node_before, node_after))
print(" diff=", diffs)
return response_data
def apply_corrections_by_node_id(api, channel_tree, channel_id, corrections_by_node_id):
"""
Given a dict `corrections_by_node_id` of the form,
{
'nodes_modified': {
'<node_id (str)>': { modification dict1 },
'<node_id (str)>': { modification dict2 },
}
'nodes_added': {
'<node_id (str)>': { 'new_parent': (str), 'attributes': {...}},
},
'nodes_deleted': {
'<node_id (str)>': {'old_parent': (str), 'attributes': {...}},
},
'nodes_moved': {
'<node_id (str)>': {'old_parent': (str), 'new_parent': (str), 'attributes': {...}},
},
}
this function will make the appropriate Studio API calls to apply the patch.
"""
LOGGER.debug("Applying corrections...")
#
# Modifications
for node_id, modifications_dict in corrections_by_node_id["nodes_modified"].items():
apply_modifications_for_node_id(api, channel_tree, node_id, modifications_dict)
#
# Deletions
for node_id, deletion_dict in corrections_by_node_id["nodes_deleted"].items():
apply_deletion_for_node_id(
api, channel_tree, channel_id, node_id, deletion_dict
)
# TODO: Additions
# TODO: Moves
def get_studio_api(studio_creds=None):
if studio_creds is None:
if not os.path.exists(STUDIO_CREDENTIALS):
print("ERROR: Studio credentials file", STUDIO_CREDENTIALS, "not found")
print(
"""Please create the file and put the following informaiton in it:
{
"token": "<your studio token>",
"username": "<your studio username>",
"password": "<your studio password>",
}
"""
)
raise ValueError("Missing credentials")
studio_creds = json.load(open(STUDIO_CREDENTIALS))
#
# Studio API client (note currently needs both session auth and token as well)
api = StudioApi(
token=studio_creds["token"],
username=studio_creds["username"],
password=studio_creds["password"],
studio_url=studio_creds.get(
"studio_url", "https://studio.learningequality.org"
),
)
return api
def export_corrections_csv(args):
api = get_studio_api()
channel_tree = get_channel_tree(api, args.channel_id, suffix="-export")
print_channel_tree(channel_tree)
csvexporter = CorretionsCsvFileExporter()
csvexporter.export_channel_tree_as_corrections_csv(channel_tree)
def apply_corrections(args):
# 1. LOAD Studio channel_tree (needed for lookups by node_id, content_id, etc.)
api = get_studio_api()
channel_tree = get_channel_tree(api, args.channel_id, suffix="-before")
#
# 2. IMPORT the corrections from the Spreadsheet
csvfilepath = "corrections-import.csv"
save_gsheet_to_local_csv(args.gsheet_id, args.gid, csvfilepath=csvfilepath)
#
# 3. TRANSFORM corrections-import.csv to Studio detailed diff format
modifyattrs = args.modifyattrs.split(",") # using only selected attributes
correctionspath = get_corrections_by_node_id(csvfilepath, modifyattrs)
#
# Special case: when export was performed on source channel, but we want to
# apply the corrections to a cloned channel. In that cases, the `Node ID`
# column in the CSV corresponds to the `original_source_node_id` attribute
# of the nodes in the derivative channel so we must do a remapping:
if args.primarykey == "original_source_node_id":
corrections_by_original_source_node_id = json.load(open(correctionspath))
corrections_by_node_id = remap_original_source_node_id_to_node_id(
channel_tree, corrections_by_original_source_node_id
)
json.dump(
corrections_by_node_id,
open(correctionspath, "w"),
indent=4,
ensure_ascii=False,
sort_keys=True,
)
print("Finished original_source_node_id-->node_id lookup and remapping.")
elif args.primarykey in ["content_id", "studio_id"]:
raise NotImplementedError("Using content_id and studio_id not ready yet.")
#
# Early exit if running the `importonly` command
if args.command == "importonly":
print("Corrections json file imported. See", correctionspath)
return correctionspath
#
# 4. LOAD corrections.json (four lists of corrections organized by nod_id)
corrections_by_node_id = json.load(open(correctionspath))
#
# 5. Apply the corrections
apply_corrections_by_node_id(
api, channel_tree, args.channel_id, corrections_by_node_id
)
#
# 6. SAVE the Studio tree after corrections for review of what was changed
channel_tree = get_channel_tree(api, args.channel_id, suffix="-after")
def correctionsmain():
"""
Command line interface for applying bulk-edit corrections:
"""
parser = argparse.ArgumentParser(description="Bulk channel edits via CSV/sheets.")
parser.add_argument(
"command",
help="One of export|importonly|apply",
choices=["export", "importonly", "apply"],
)
parser.add_argument("channel_id", help="The studio Channel ID to edit")
parser.add_argument(
"--primarykey",
help="Which idendifier to use when looking up nodes",
choices=["node_id", "content_id", "original_source_node_id", "studio_id"],
default="node_id",
)
parser.add_argument("--gsheet_id", help="Google spreadsheets sheet ID (public)")
parser.add_argument(
"--gid", help="The gid argument to indicate which sheet", default="0"
)
parser.add_argument(
"--modifyattrs",
help="Which attributes to modify",
default="title,description,author,copyright_holder",
)
args = parser.parse_args()
# print("in corrections.main with cliargs", args)
if args.command == "export":
export_corrections_csv(args)
elif args.command in ["importonly", "apply"]:
apply_corrections(args)
else:
raise ValueError("Unrecognized command")
if __name__ == "__main__":
correctionsmain()
| mit | a5c986c58a574e8e55e4b341b9e7a1d0 | 34.568214 | 100 | 0.573055 | 3.821823 | false | false | false | false |
learningequality/ricecooker | ricecooker/utils/html_writer.py | 1 | 4419 | import os
import zipfile
from ricecooker.utils.downloader import read
class HTMLWriter:
"""
Class for writing zipfiles
"""
zf = None # Zip file to write to
write_to_path = None # Where to write zip file
def __init__(self, write_to_path, mode="w"):
"""Args: write_to_path: (str) where to write zip file"""
self.map = {} # Keeps track of content to write to csv
self.write_to_path = write_to_path # Where to write zip file
self.mode = mode # What mode to open zipfile in
def __enter__(self):
"""Called when opening context (e.g. with HTMLWriter() as writer: )"""
self.open()
return self
def __exit__(self, type, value, traceback):
"""Called when closing context"""
self.close()
def _write_to_zipfile(self, filename, content):
if not self.contains(filename):
info = zipfile.ZipInfo(filename, date_time=(2013, 3, 14, 1, 59, 26))
info.comment = "HTML FILE".encode()
info.compress_type = zipfile.ZIP_STORED
info.create_system = 0
self.zf.writestr(info, content)
def _copy_to_zipfile(self, filepath, arcname=None):
filename = arcname or filepath
if not self.contains(filename):
self.zf.write(filepath, arcname=arcname)
""" USER-FACING METHODS """
def open(self):
"""open: Opens zipfile to write to
Args: None
Returns: None
"""
self.zf = zipfile.ZipFile(self.write_to_path, self.mode)
def close(self):
"""close: Close zipfile when done
Args: None
Returns: None
"""
index_present = self.contains("index.html")
self.zf.close() # Make sure zipfile closes no matter what
if not index_present:
raise ReferenceError(
"Invalid Zip at {}: missing index.html file (use write_index_contents method)".format(
self.write_to_path
)
)
def contains(self, filename):
"""contains: Checks if filename is in the zipfile
Args: filename: (str) name of file to check
Returns: boolean indicating whether or not filename is in the zip
"""
return filename in self.zf.namelist()
def write_contents(self, filename, contents, directory=None):
"""write_contents: Write contents to filename in zip
Args:
contents: (str) contents of file
filename: (str) name of file in zip
directory: (str) directory in zipfile to write file to (optional)
Returns: path to file in zip
"""
filepath = (
"{}/{}".format(directory.rstrip("/"), filename) if directory else filename
)
self._write_to_zipfile(filepath, contents)
return filepath
def write_file(self, filepath, filename=None, directory=None):
"""write_file: Write local file to zip
Args:
filepath: (str) location to local file
directory: (str) directory in zipfile to write file to (optional)
Returns: path to file in zip
Note: filepath must be a relative path
"""
arcname = None
if filename or directory:
directory = directory.rstrip("/") + "/" if directory else ""
filename = filename or os.path.basename(filepath)
arcname = "{}{}".format(directory, filename)
self._copy_to_zipfile(filepath, arcname=arcname)
return arcname or filepath
def write_url(self, url, filename, directory=None):
"""write_url: Write contents from url to filename in zip
Args:
url: (str) url to file to download
filename: (str) name of file in zip
directory: (str) directory in zipfile to write file to (optional)
Returns: path to file in zip
"""
filepath = (
"{}/{}".format(directory.rstrip("/"), filename) if directory else filename
)
if not self.contains(filepath):
self._write_to_zipfile(filepath, read(url))
return filepath
def write_index_contents(self, contents):
"""write_index_contents: Write main index file to zip
Args:
contents: (str) contents of file
Returns: path to file in zip
"""
self._write_to_zipfile("index.html", contents)
| mit | 3f4e8896ed2c46fac89ce57a791082b6 | 34.352 | 102 | 0.586332 | 4.353695 | false | false | false | false |
joeyespo/grip | tests/test_cli.py | 1 | 1732 | """
Tests the Grip command-line interface.
"""
from __future__ import print_function, unicode_literals
import sys
from subprocess import PIPE, STDOUT, CalledProcessError, Popen
import pytest
from grip.command import usage, version
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
class CalledProcessError(CalledProcessError):
def __init__(self, returncode, cmd, output):
super(CalledProcessError, self).__init__(returncode, cmd)
self.output = output
def run(*args, **kwargs):
command = kwargs.pop('command', 'grip')
stdin = kwargs.pop('stdin', None)
cmd = [command] + list(args)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT,
universal_newlines=True)
# Sent input as STDIN then close it
output, _ = p.communicate(input=stdin)
p.stdin.close()
# Wait for process to terminate
returncode = p.wait()
# Raise exception on failed process calls
if returncode != 0:
raise CalledProcessError(returncode, cmd, output=output)
return output
def test_help():
assert run('-h') == usage
assert run('--help') == usage
def test_version():
assert run('-V') == version + '\n'
assert run('--version') == version + '\n'
def test_bad_command():
simple_usage = '\n\n'.join(usage.split('\n\n')[:1])
with pytest.raises(CalledProcessError) as excinfo:
run('--does-not-exist')
assert excinfo.value.output == simple_usage + '\n'
# TODO: Figure out how to run the CLI and still capture requests
# TODO: Test all Grip CLI commands and arguments
# TODO: Test settings wire-up (settings.py, settings_local.py, ~/.grip)
# TODO: Test `cat README.md | ~/.local/bin/grip - --export -` (#152)
| mit | b4c63facdb15f4f638ed2bac27562a39 | 27.393443 | 71 | 0.651848 | 3.685106 | false | true | false | false |
jhpyle/docassemble | docassemble_webapp/docassemble/webapp/app_object.py | 1 | 2070 | # import sys
try:
from werkzeug.middleware.proxy_fix import ProxyFix
proxyfix_version = 15
except ImportError:
from werkzeug.contrib.fixers import ProxyFix
proxyfix_version = 14
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from flask_babel import Babel
from flask_cors import CORS
import docassemble.base.functions
def create_app():
the_app = Flask(__name__)
the_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
from docassemble.base.config import daconfig # pylint: disable=import-outside-toplevel
import docassemble.webapp.database # pylint: disable=import-outside-toplevel,redefined-outer-name
import docassemble.webapp.db_object # pylint: disable=import-outside-toplevel,redefined-outer-name
alchemy_connect_string = docassemble.webapp.database.alchemy_connection_string()
the_app.config['SQLALCHEMY_DATABASE_URI'] = alchemy_connect_string
if alchemy_connect_string.startswith('postgres'):
the_app.config['SQLALCHEMY_ENGINE_OPTIONS'] = dict(
connect_args=docassemble.webapp.database.connect_args()
)
the_app.secret_key = daconfig.get('secretkey', '38ihfiFehfoU34mcq_4clirglw3g4o87')
the_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
the_db = docassemble.webapp.db_object.init_flask()
the_db.init_app(the_app)
the_csrf = CSRFProtect()
the_csrf.init_app(the_app)
the_babel = Babel()
the_babel.init_app(the_app)
if daconfig.get('behind https load balancer', False):
if proxyfix_version >= 15:
the_app.wsgi_app = ProxyFix(the_app.wsgi_app, x_proto=1, x_host=1)
else:
the_app.wsgi_app = ProxyFix(the_app.wsgi_app)
if 'cross site domains' in daconfig:
CORS(the_app, origins=daconfig['cross site domains'], supports_credentials=True)
return the_app, the_csrf, the_babel
if docassemble.base.functions.server_context.context == 'websockets':
from docassemble.webapp.app_socket import app # pylint: disable=unused-import
else:
app, csrf, flaskbabel = create_app()
| mit | 098015aa696b5009bc5d4c120de58484 | 43.042553 | 103 | 0.719324 | 3.344103 | false | true | false | false |
jhpyle/docassemble | docassemble_demo/docassemble/demo/random-test.py | 1 | 5854 | #! /usr/bin/env python
import json
import sys
import random
import string
import requests
root = 'http://localhost'
key = 'H3PLMKJKIVATLDPWHJH3AGWEJPFU5GRT'
i = 'docassemble.demo:data/questions/questions.yml'
iterations = 100
while iterations:
r = requests.get(root + '/api/session/new', params={'key': key, 'i': i})
if r.status_code != 200:
sys.exit(r.text)
info = json.loads(r.text)
session = info['session']
secret = info['secret']
r = requests.get(root + '/api/session/question', params={'key': key, 'i': i, 'secret': secret, 'session': session})
if r.status_code != 200:
sys.exit(r.text)
info = json.loads(r.text)
print(r.text)
steps = 0
while steps < 1000 and info['questionType'] not in ('deadend', 'restart', 'exit', 'leave'):
variables = {}
file_variables = {}
file_uploads = {}
for field in info.get('fields', []):
if field.get('datatype', None) in ('html', 'note'):
continue
if 'variable_name' not in field:
if field.get('fieldtype', None) == 'multiple_choice' and 'choices' in field:
indexno = random.choice(range(len(field['choices'])))
if info['questionText'] == 'What language do you speak?':
indexno = 0
variables[field['choices'][indexno]['variable_name']] = field['choices'][indexno]['value']
else:
sys.exit("Field not recognized:\n" + repr(field))
elif 'datatype' not in field and 'fieldtype' not in field and info['questionType'] != 'signature':
variables[field['variable_name']] = True
elif field.get('datatype', None) == 'object':
if not field.get('required', True):
continue
sys.exit("Field not recognized:\n" + repr(field))
elif field.get('fieldtype', None) == 'multiple_choice' or 'choices' in field:
indexno = random.choice(range(len(field['choices'])))
if 'value' not in field['choices'][indexno]:
continue
variables[field['variable_name']] = field['choices'][indexno]['value']
elif field.get('datatype', None) == 'boolean':
variables[field['variable_name']] = bool(random.random() > 0.5)
elif field.get('datatype', None) == 'threestate':
variables[field['variable_name']] = True if random.random() > 0.66 else (False if random.random() > 0.5 else None)
elif field.get('datatype', None) in ('text', 'area'):
variables[field['variable_name']] = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
elif field.get('datatype', None) == 'email':
variables[field['variable_name']] = ''.join(random.choice(string.ascii_lowercase) for _ in range(5)) + '@' + ''.join(random.choice(string.ascii_lowercase) for _ in range(5)) + random.choice(['.com', '.net', '.org'])
elif field.get('datatype', None) == 'currency':
variables[field['variable_name']] = "{0:.2f}".format(random.random() * 100.0)
elif field.get('datatype', None) == 'date':
variables[field['variable_name']] = "2015-04-15"
elif field.get('datatype', None) in ('file', 'files', 'camera', 'user', 'environment'):
file_var_name = "file" + str(len(file_uploads))
file_variables[file_var_name] = field['variable_name']
file_uploads[file_var_name] = open("data/static/art.jpg", "rb")
elif info['questionType'] == 'signature':
file_var_name = "file" + str(len(file_uploads))
file_variables[file_var_name] = field['variable_name']
file_uploads[file_var_name] = open("data/static/canvas.png", "rb")
elif field.get('datatype', None) == 'range':
variables[field['variable_name']] = float(field.get('min', 1)) + int(random.random() * (float(field.get('max', 10)) - float(field.get('min', 1))))
else:
sys.exit("Field not recognized:\n" + repr(field))
if 'question_variable_name' in info:
variables[info['question_variable_name']] = True
if len(variables) == 0 and len(file_variables) == 0:
if 'fields' in info:
sys.exit("Fields not recognized:\n" + repr(info['fields']))
sys.exit("Question not recognized:\n" + repr(info))
print("Session is " + session)
if len(variables):
print("Setting variables:\n" + repr(variables))
data = {'key': key, 'i': i, 'secret': secret, 'session': session, 'variables': json.dumps(variables)}
if len(file_variables):
data = {'key': key, 'i': i, 'secret': secret, 'session': session}
data['question_name'] = info['questionName']
if 'event_list' in info:
data['event_list'] = json.dumps(info['event_list'])
if len(file_uploads):
print("Setting file variables:\n" + repr(file_variables))
data['file_variables'] = json.dumps(file_variables)
r = requests.post(root + '/api/session', data=data, files=file_uploads)
else:
r = requests.post(root + '/api/session', data=data)
if r.status_code != 200:
sys.exit(r.text)
print("Got question:\n" + r.text)
try:
info = json.loads(r.text)
assert isinstance(info, dict)
except:
sys.exit(r.text)
steps += 1
# r = requests.delete(root + '/api/session', params={'key': key, 'i': i, 'session': session})
# if r.status_code != 204:
# sys.exit(r.text)
iterations -= 1
sys.exit(0)
| mit | 98aa1008f68f5447d96f1bd42d86194f | 49.465517 | 231 | 0.554151 | 3.784098 | false | false | false | false |
jhpyle/docassemble | docassemble_webapp/docassemble/webapp/files.py | 1 | 36027 | import datetime
import json
import mimetypes
import os
import re
import shutil
import subprocess
# import sys
import tempfile
import zipfile
from packaging import version
from flask import url_for
from flask_login import current_user
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
import requests
import pycurl # pylint: disable=import-error
from docassemble.base.config import daconfig
from docassemble.base.error import DAError
from docassemble.base.generate_key import random_alphanumeric
from docassemble.base.logger import logmessage
import docassemble.base.functions
from docassemble.webapp.update import get_pip_info
import docassemble.webapp.cloud
cloud = docassemble.webapp.cloud.get_cloud()
UPLOAD_DIRECTORY = daconfig.get('uploads', '/usr/share/docassemble/files')
def listfiles(directory):
result = []
directory = directory.rstrip(os.sep)
trimstart = len(directory) + 1
for root, dirs, files in os.walk(directory): # pylint: disable=unused-variable
for filename in files:
result.append(os.path.join(root, filename)[trimstart:])
return result
def listdirs(directory):
result = []
directory = directory.rstrip(os.sep)
trimstart = len(directory) + 1
for root, dirs, files in os.walk(directory): # pylint: disable=unused-variable
for subdir in dirs:
result.append(os.path.join(root, subdir)[trimstart:])
return result
def path_to_key(path):
return '/'.join(str(path).split(os.sep))
def url_sanitize(url):
return re.sub(r'\s', ' ', url)
class SavedFile:
def __init__(self, file_number, extension=None, fix=False, section='files', filename='file', subdir=None, should_not_exist=False):
file_number = int(file_number)
section = str(section)
if section not in docassemble.base.functions.this_thread.saved_files:
docassemble.base.functions.this_thread.saved_files[section] = {}
if file_number in docassemble.base.functions.this_thread.saved_files[section]:
# logmessage("SavedFile: using cache for " + section + '/' + str(file_number))
sf = docassemble.base.functions.this_thread.saved_files[section][file_number]
for attribute in ['file_number', 'fixed', 'section', 'filename', 'extension', 'directory', 'path', 'modtimes', 'keydict', 'subdir']:
if hasattr(sf, attribute):
setattr(self, attribute, getattr(sf, attribute))
self.extension = extension
self.filename = filename
self.subdir = subdir
else:
# logmessage("SavedFile: not using cache for " + section + '/' + str(file_number))
self.fixed = False
self.file_number = file_number
self.section = section
self.extension = extension
self.filename = filename
self.subdir = subdir
if cloud is None:
if self.section == 'files':
parts = re.sub(r'(...)', r'\1/', '{0:012x}'.format(int(file_number))).split('/')
self.directory = os.path.join(UPLOAD_DIRECTORY, *parts)
else:
self.directory = os.path.join(UPLOAD_DIRECTORY, str(self.section), str(file_number))
else:
self.directory = os.path.join(tempfile.gettempdir(), str(self.section), str(self.file_number))
docassemble.base.functions.this_thread.saved_files[section][file_number] = self
if self.subdir and self.subdir != '' and self.subdir != 'default':
self.path = os.path.join(self.directory, self.subdir, self.filename)
else:
self.path = os.path.join(self.directory, self.filename)
if fix:
self.fix()
if should_not_exist and os.path.isdir(self.directory):
found_error = False
for root, dirs, files in os.walk(self.directory): # pylint: disable=unused-variable
if len(files) > 0 or len(dirs) > 0:
found_error = True
break
if found_error:
logmessage("WARNING! Possible database corruption due to an unsafe shutdown. Your database indicated that the next file number is " + str(file_number) + ", but there is already a file in the file storage for that number. It is recommended that you restart your system. If that does not make this error go away, you should investigate why there are existing files in the file system.")
if cloud is not None:
prefix = str(self.section) + '/' + str(self.file_number) + '/'
for key in list(cloud.list_keys(prefix)):
try:
key.delete()
except:
pass
if hasattr(self, 'directory') and os.path.isdir(self.directory):
shutil.rmtree(self.directory)
if not os.path.isdir(self.directory):
os.makedirs(self.directory)
def fix(self):
if self.fixed:
return
# logmessage("fix: starting " + str(self.section) + '/' + str(self.file_number))
if cloud is not None:
dirs_in_use = set()
self.modtimes = {}
self.keydict = {}
if not os.path.isdir(self.directory):
os.makedirs(self.directory)
prefix = str(self.section) + '/' + str(self.file_number) + '/'
# logmessage("fix: prefix is " + prefix)
for key in cloud.list_keys(prefix):
filename = os.path.join(*key.name[len(prefix):].split('/'))
fullpath = os.path.join(self.directory, filename)
fulldir = os.path.dirname(fullpath)
dirs_in_use.add(fulldir)
if not os.path.isdir(fulldir):
os.makedirs(fulldir)
server_time = key.get_epoch_modtime()
if not os.path.isfile(fullpath):
key.get_contents_to_filename(fullpath)
else:
local_time = os.path.getmtime(fullpath)
if self.section == 'files':
if local_time != server_time:
key.get_contents_to_filename(fullpath)
update_access_time(fullpath)
else:
if local_time != server_time:
key.get_contents_to_filename(fullpath)
self.modtimes[filename] = server_time
# logmessage("cloud modtime for file " + filename + " is " + str(key.last_modified))
self.keydict[filename] = key
if self.subdir and self.subdir != '' and self.subdir != 'default':
self.path = os.path.join(self.directory, self.subdir, self.filename)
else:
self.path = os.path.join(self.directory, self.filename)
for filename in listfiles(self.directory):
if filename not in self.modtimes:
os.remove(os.path.join(self.directory, filename))
for subdir in listdirs(self.directory):
if subdir not in dirs_in_use and os.path.isdir(subdir):
shutil.rmtree(subdir)
else:
if not os.path.isdir(self.directory):
os.makedirs(self.directory)
self.fixed = True
# logmessage("fix: ending " + str(self.section) + '/' + str(self.file_number))
def delete_file(self, filename):
if cloud is not None:
prefix = str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(filename)
to_delete = []
for key in cloud.list_keys(prefix):
to_delete.append(key)
for key in to_delete:
try:
key.delete()
except:
pass
if hasattr(self, 'directory') and os.path.isdir(self.directory):
the_path = os.path.join(self.directory, filename)
if os.path.isfile(the_path):
os.remove(the_path)
def delete_directory(self, directory):
if cloud is not None:
prefix = str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(directory) + '/'
to_delete = []
for key in cloud.list_keys(prefix):
to_delete.append(key)
for key in to_delete:
try:
key.delete()
except:
pass
if hasattr(self, 'directory') and os.path.isdir(self.directory):
the_path = os.path.join(self.directory, directory)
if os.path.isdir(the_path):
shutil.rmtree(the_path)
def delete(self):
if cloud is not None:
prefix = str(self.section) + '/' + str(self.file_number) + '/'
for key in list(cloud.list_keys(prefix)):
try:
key.delete()
except:
pass
if hasattr(self, 'directory') and os.path.isdir(self.directory):
shutil.rmtree(self.directory)
del docassemble.base.functions.this_thread.saved_files[str(self.section)][int(self.file_number)]
def save(self, finalize=False):
self.fix()
if self.extension is not None:
if os.path.isfile(self.path + '.' + self.extension):
os.remove(self.path + '.' + self.extension)
try:
os.symlink(self.path, self.path + '.' + self.extension)
except:
shutil.copyfile(self.path, self.path + '.' + self.extension)
if finalize:
self.finalize()
def fetch_url(self, url, **kwargs):
filename = kwargs.get('filename', self.filename)
self.fix()
cookiefile = tempfile.NamedTemporaryFile(suffix='.txt')
f = open(os.path.join(self.directory, filename), 'wb')
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.WRITEDATA, f)
c.setopt(pycurl.USERAGENT, 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; Googlebot/2.1; +http://www.google.com/bot.html) Safari/537.36')
c.setopt(pycurl.COOKIEFILE, cookiefile.name)
c.perform()
c.close()
cookiefile.close()
self.save()
def fetch_url_post(self, url, post_args, **kwargs):
filename = kwargs.get('filename', self.filename)
self.fix()
r = requests.post(url_sanitize(url), data=post_args)
if r.status_code != 200:
raise DAError('fetch_url_post: retrieval from ' + url + 'failed')
with open(os.path.join(self.directory, filename), 'wb') as fp:
for block in r.iter_content(1024):
fp.write(block)
self.save()
def size_in_bytes(self, **kwargs):
filename = kwargs.get('filename', self.filename)
if cloud is not None and not self.fixed:
key = cloud.search_key(str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(filename))
if key is None or not key.does_exist:
raise DAError("size_in_bytes: file " + filename + " in " + self.section + " did not exist")
return key.size
return os.path.getsize(os.path.join(self.directory, filename))
def list_of_files(self):
output = []
if cloud is not None and not self.fixed:
prefix = str(self.section) + '/' + str(self.file_number) + '/'
for key in cloud.list_keys(prefix):
output.append(os.path.join(*key.name[len(prefix):].split('/')))
else:
if os.path.isdir(self.directory):
for filename in listfiles(self.directory):
output.append(filename)
return sorted(output)
def list_of_dirs(self):
dir_list = set()
for path in self.list_of_files():
parts = path.split(os.sep)
if len(parts) > 1:
dir_list.add(parts[0])
return sorted(list(dir_list))
def copy_from(self, orig_path, **kwargs):
filename = kwargs.get('filename', self.filename)
self.fix()
# logmessage("Saving to " + os.path.join(self.directory, filename))
new_file = os.path.join(self.directory, filename)
new_file_dir = os.path.dirname(new_file)
if not os.path.isdir(new_file_dir):
os.makedirs(new_file_dir)
shutil.copyfile(orig_path, new_file)
if 'filename' not in kwargs:
self.save()
def get_modtime(self, **kwargs):
filename = kwargs.get('filename', self.filename)
# logmessage("Get modtime called with filename " + str(filename))
if cloud is not None and not self.fixed:
key_name = str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(filename)
key = cloud.search_key(key_name)
if key is None or not key.does_exist:
raise DAError("get_modtime: file " + filename + " in " + self.section + " did not exist")
# logmessage("Modtime for key " + key_name + " is now " + str(key.last_modified))
return key.get_epoch_modtime()
the_path = os.path.join(self.directory, filename)
if not os.path.isfile(the_path):
raise DAError("get_modtime: file " + filename + " in " + self.section + " did not exist")
return os.path.getmtime(the_path)
def write_content(self, content, **kwargs):
filename = kwargs.get('filename', self.filename)
self.fix()
if kwargs.get('binary', False):
with open(os.path.join(self.directory, filename), 'wb') as ifile:
ifile.write(content)
else:
with open(os.path.join(self.directory, filename), 'w', encoding='utf-8') as ifile:
ifile.write(content)
if kwargs.get('save', True):
self.save()
def write_as_json(self, obj, **kwargs):
filename = kwargs.get('filename', self.filename)
self.fix()
# logmessage("write_as_json: writing to " + os.path.join(self.directory, filename))
with open(os.path.join(self.directory, filename), 'w', encoding='utf-8') as ifile:
json.dump(obj, ifile, sort_keys=True, indent=2)
if kwargs.get('save', True):
self.save()
def temp_url_for(self, **kwargs):
if kwargs.get('_attachment', False):
suffix = 'download'
else:
suffix = ''
filename = kwargs.get('filename', self.filename)
seconds = kwargs.get('seconds', None)
if isinstance(seconds, float):
seconds = int(seconds)
if not isinstance(seconds, int):
seconds = 30
if cloud is not None and daconfig.get('use cloud urls', False):
keyname = str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(filename)
key = cloud.get_key(keyname)
inline = not bool(kwargs.get('_attachment', False))
if key.does_exist:
return key.generate_url(seconds, display_filename=kwargs.get('display_filename', None), inline=inline, content_type=kwargs.get('content_type', None))
logmessage("key " + str(keyname) + " did not exist")
return 'about:blank'
r = docassemble.base.functions.server.server_redis
while True:
code = random_alphanumeric(32)
keyname = 'da:tempfile:' + code
if r.setnx(keyname, str(self.section) + '^' + str(self.file_number)):
r.expire(keyname, seconds)
break
use_external = kwargs.get('_external', bool('jsembed' in docassemble.base.functions.this_thread.misc))
url = url_for('rootindex', _external=use_external).rstrip('/')
url += '/tempfile' + suffix + '/' + code + '/' + path_to_key(kwargs.get('display_filename', filename))
return url
def cloud_path(self, filename=None):
if cloud is None:
return None
if filename is None:
filename = self.filename
return str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(filename)
def url_for(self, **kwargs):
if 'ext' in kwargs and kwargs['ext'] is not None:
extn = kwargs['ext']
extn = re.sub(r'^\.', '', extn)
else:
extn = None
filename = kwargs.get('filename', self.filename)
if cloud is not None and not (self.section == 'files' and 'page' in kwargs and kwargs['page']) and daconfig.get('use cloud urls', False):
keyname = str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(filename)
page = kwargs.get('page', None)
if page:
size = kwargs.get('size', 'page')
page = re.sub(r'[^0-9]', '', str(page))
if size == 'screen':
keyname += 'screen-' + str(page) + '.png'
else:
keyname += 'page-' + str(page) + '.png'
elif extn:
keyname += '.' + extn
key = cloud.get_key(keyname)
inline = not bool(kwargs.get('_attachment', False))
if key.does_exist:
return key.generate_url(3600, display_filename=kwargs.get('display_filename', None), inline=inline, content_type=kwargs.get('content_type', None))
# why not serve right from uploadedpage in this case?
logmessage("key " + str(keyname) + " did not exist")
return 'about:blank'
if kwargs.get('_attachment', False):
suffix = 'download'
else:
suffix = ''
use_external = kwargs.get('_external', bool('jsembed' in docassemble.base.functions.this_thread.misc))
base_url = url_for('rootindex', _external=use_external).rstrip('/')
if extn is None:
extn = ''
else:
extn = '.' + extn
filename = kwargs.get('display_filename', filename)
if self.section == 'files':
if 'page' in kwargs and kwargs['page']:
page = re.sub(r'[^0-9]', '', str(kwargs['page']))
size = kwargs.get('size', 'page')
url = base_url + '/uploadedpage'
if size == 'screen':
url += 'screen'
url += suffix
url += '/' + str(self.file_number) + '/' + str(page)
else:
if re.search(r'\.', str(filename)):
url = base_url + '/uploadedfile' + suffix + '/' + str(self.file_number) + '/' + path_to_key(filename)
elif extn != '':
url = base_url + '/uploadedfile' + suffix + '/' + str(self.file_number) + '/' + path_to_key(filename) + extn
else:
url = base_url + '/uploadedfile' + suffix + '/' + str(self.file_number)
else:
logmessage("section " + self.section + " was wrong")
url = 'about:blank'
return url
def finalize(self):
# logmessage("finalize: starting " + str(self.section) + '/' + str(self.file_number))
if cloud is None:
return
if not self.fixed:
raise DAError("SavedFile: finalize called before fix")
for filename in listfiles(self.directory):
fullpath = os.path.join(self.directory, filename)
# logmessage("Found " + fullpath)
if os.path.isfile(fullpath):
save = True
if filename in self.keydict:
key = self.keydict[filename]
if self.modtimes[filename] == os.path.getmtime(fullpath):
save = False
else:
key = cloud.get_key(str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(filename))
if save:
if self.extension is not None and filename == self.filename:
extension, mimetype = get_ext_and_mimetype(filename + '.' + self.extension) # pylint: disable=unused-variable
else:
extension, mimetype = get_ext_and_mimetype(filename)
key.content_type = mimetype
# logmessage("finalize: saving " + str(self.section) + '/' + str(self.file_number) + '/' + str(filename))
if not os.path.isfile(fullpath):
continue
try:
key.set_contents_from_filename(fullpath)
self.modtimes[filename] = key.get_epoch_modtime()
except FileNotFoundError:
logmessage("finalize: error while saving " + str(self.section) + '/' + str(self.file_number) + '/' + str(filename) + "; path " + str(fullpath) + " disappeared")
for filename, key in self.keydict.items():
if not os.path.isfile(os.path.join(self.directory, filename)):
logmessage("finalize: deleting " + str(self.section) + '/' + str(self.file_number) + '/' + path_to_key(filename))
try:
key.delete()
except:
pass
# logmessage("finalize: ending " + str(self.section) + '/' + str(self.file_number))
def get_ext_and_mimetype(filename):
mimetype, encoding = mimetypes.guess_type(filename) # pylint: disable=unused-variable
extension = filename.lower()
extension = re.sub(r'.*\.', '', extension)
if extension == "jpeg":
extension = "jpg"
if extension == "tiff":
extension = "tif"
if extension == '3gpp':
mimetype = 'audio/3gpp'
if extension in ('yaml', 'yml'):
mimetype = 'text/plain'
return (extension, mimetype)
def publish_package(pkgname, info, author_info, current_project='default'):
directory = make_package_dir(pkgname, info, author_info, current_project=current_project)
packagedir = os.path.join(directory, 'docassemble-' + str(pkgname))
output = "Publishing docassemble." + pkgname + " to PyPI . . .\n\n"
try:
output += subprocess.check_output(['python', 'setup.py', 'sdist'], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += err.output.decode()
dist_dir = os.path.join(packagedir, 'dist')
had_error = False
if not os.path.isdir(dist_dir):
output += "dist directory " + str(dist_dir) + " did not exist after calling sdist"
had_error = True
else:
try:
output += subprocess.check_output(['twine', 'upload', '--repository', 'pypi', '--username', str(current_user.pypi_username), '--password', str(current_user.pypi_password), os.path.join('dist', '*')], cwd=packagedir, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output += "Error calling twine upload.\n"
output += err.output.decode()
had_error = True
output = re.sub(r'\n', '<br>', output)
shutil.rmtree(directory)
logmessage(output)
return (had_error, output)
def make_package_zip(pkgname, info, author_info, tz_name, current_project='default'):
directory = make_package_dir(pkgname, info, author_info, current_project=current_project)
trimlength = len(directory) + 1
packagedir = os.path.join(directory, 'docassemble-' + str(pkgname))
temp_zip = tempfile.NamedTemporaryFile(suffix=".zip")
zf = zipfile.ZipFile(temp_zip, mode='w')
the_timezone = zoneinfo.ZoneInfo(tz_name)
for root, dirs, files in os.walk(packagedir): # pylint: disable=unused-variable
for file in files:
thefilename = os.path.join(root, file)
zinfo = zipfile.ZipInfo(thefilename[trimlength:], date_time=datetime.datetime.utcfromtimestamp(os.path.getmtime(thefilename)).replace(tzinfo=datetime.timezone.utc).astimezone(the_timezone).timetuple())
zinfo.compress_type = zipfile.ZIP_DEFLATED
zinfo.external_attr = 0o644 << 16
with open(thefilename, 'rb') as fp:
zf.writestr(zinfo, fp.read())
zf.close()
shutil.rmtree(directory)
return temp_zip
def get_version_suffix(package_name):
info = get_pip_info(package_name)
if 'Version' in info:
the_version = info['Version']
if the_version is None:
the_version = '1.0'
installed_version = version.parse(the_version.strip())
latest_release = None
printable_latest_release = None
try:
r = requests.get("https://pypi.org/pypi/%s/json" % package_name, timeout=5)
assert r.status_code == 200
pypi_info = r.json()
for the_version in pypi_info['releases'].keys():
past_version = version.parse(the_version)
if past_version <= installed_version and (latest_release is None or past_version > latest_release):
latest_release = past_version
printable_latest_release = the_version
if past_version == installed_version:
break
except:
pass
if printable_latest_release:
return '>=' + printable_latest_release
return ''
def make_package_dir(pkgname, info, author_info, directory=None, current_project='default'):
area = {}
for sec in ['playground', 'playgroundtemplate', 'playgroundstatic', 'playgroundsources', 'playgroundmodules']:
area[sec] = SavedFile(author_info['id'], fix=True, section=sec)
dependencies = ", ".join(map(lambda x: repr(x + get_version_suffix(x)), sorted(info['dependencies'])))
initpy = """\
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
"""
licensetext = str(info['license'])
if re.search(r'MIT License', licensetext):
licensetext += '\n\nCopyright (c) ' + str(datetime.datetime.now().year) + ' ' + str(info.get('author_name', '')) + """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
if info['readme'] and re.search(r'[A-Za-z]', info['readme']):
readme = str(info['readme'])
else:
readme = '# docassemble.' + str(pkgname) + "\n\n" + info['description'] + "\n\n## Author\n\n" + author_info['author name and email'] + "\n\n"
manifestin = """\
include README.md
"""
setupcfg = """\
[metadata]
description-file = README.md
"""
setuppy = """\
import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils.util import convert_path
standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + '/', package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
"""
setuppy += "setup(name='docassemble." + str(pkgname) + "',\n" + """\
version=""" + repr(info.get('version', '')) + """,
description=(""" + repr(info.get('description', '')) + """),
long_description=""" + repr(readme) + """,
long_description_content_type='text/markdown',
author=""" + repr(info.get('author_name', '')) + """,
author_email=""" + repr(info.get('author_email', '')) + """,
license=""" + repr(info.get('license', '')) + """,
url=""" + repr(info['url'] if info['url'] else 'https://docassemble.org') + """,
packages=find_packages(),
namespace_packages=['docassemble'],
install_requires=[""" + dependencies + """],
zip_safe=False,
package_data=find_package_data(where='docassemble/""" + str(pkgname) + """/', package='docassemble.""" + str(pkgname) + """'),
)
"""
templatereadme = """\
# Template directory
If you want to use templates for document assembly, put them in this directory.
"""
staticreadme = """\
# Static file directory
If you want to make files available in the web app, put them in
this directory.
"""
sourcesreadme = """\
# Sources directory
This directory is used to store word translation files,
machine learning training files, and other source files.
"""
if directory is None:
directory = tempfile.mkdtemp(prefix='SavedFile')
packagedir = os.path.join(directory, 'docassemble-' + str(pkgname))
maindir = os.path.join(packagedir, 'docassemble', str(pkgname))
questionsdir = os.path.join(packagedir, 'docassemble', str(pkgname), 'data', 'questions')
templatesdir = os.path.join(packagedir, 'docassemble', str(pkgname), 'data', 'templates')
staticdir = os.path.join(packagedir, 'docassemble', str(pkgname), 'data', 'static')
sourcesdir = os.path.join(packagedir, 'docassemble', str(pkgname), 'data', 'sources')
if not os.path.isdir(questionsdir):
os.makedirs(questionsdir)
if not os.path.isdir(templatesdir):
os.makedirs(templatesdir)
if not os.path.isdir(staticdir):
os.makedirs(staticdir)
if not os.path.isdir(sourcesdir):
os.makedirs(sourcesdir)
dir_questions = directory_for(area['playground'], current_project)
dir_template = directory_for(area['playgroundtemplate'], current_project)
dir_modules = directory_for(area['playgroundmodules'], current_project)
dir_static = directory_for(area['playgroundstatic'], current_project)
dir_sources = directory_for(area['playgroundsources'], current_project)
for the_file in info['interview_files']:
orig_file = os.path.join(dir_questions, the_file)
if os.path.exists(orig_file):
shutil.copy2(orig_file, os.path.join(questionsdir, the_file))
else:
logmessage("failure on " + orig_file)
for the_file in info['template_files']:
orig_file = os.path.join(dir_template, the_file)
if os.path.exists(orig_file):
shutil.copy2(orig_file, os.path.join(templatesdir, the_file))
else:
logmessage("failure on " + orig_file)
for the_file in info['module_files']:
orig_file = os.path.join(dir_modules, the_file)
if os.path.exists(orig_file):
shutil.copy2(orig_file, os.path.join(maindir, the_file))
else:
logmessage("failure on " + orig_file)
for the_file in info['static_files']:
orig_file = os.path.join(dir_static, the_file)
if os.path.exists(orig_file):
shutil.copy2(orig_file, os.path.join(staticdir, the_file))
else:
logmessage("failure on " + orig_file)
for the_file in info['sources_files']:
orig_file = os.path.join(dir_sources, the_file)
if os.path.exists(orig_file):
shutil.copy2(orig_file, os.path.join(sourcesdir, the_file))
else:
logmessage("failure on " + orig_file)
with open(os.path.join(packagedir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write(readme)
os.utime(os.path.join(packagedir, 'README.md'), (info['modtime'], info['modtime']))
with open(os.path.join(packagedir, 'LICENSE'), 'w', encoding='utf-8') as the_file:
the_file.write(licensetext)
os.utime(os.path.join(packagedir, 'LICENSE'), (info['modtime'], info['modtime']))
with open(os.path.join(packagedir, 'setup.py'), 'w', encoding='utf-8') as the_file:
the_file.write(setuppy)
os.utime(os.path.join(packagedir, 'setup.py'), (info['modtime'], info['modtime']))
with open(os.path.join(packagedir, 'setup.cfg'), 'w', encoding='utf-8') as the_file:
the_file.write(setupcfg)
os.utime(os.path.join(packagedir, 'setup.cfg'), (info['modtime'], info['modtime']))
with open(os.path.join(packagedir, 'MANIFEST.in'), 'w', encoding='utf-8') as the_file:
the_file.write(manifestin)
os.utime(os.path.join(packagedir, 'MANIFEST.in'), (info['modtime'], info['modtime']))
with open(os.path.join(packagedir, 'docassemble', '__init__.py'), 'w', encoding='utf-8') as the_file:
the_file.write(initpy)
os.utime(os.path.join(packagedir, 'docassemble', '__init__.py'), (info['modtime'], info['modtime']))
with open(os.path.join(packagedir, 'docassemble', pkgname, '__init__.py'), 'w', encoding='utf-8') as the_file:
the_file.write("__version__ = " + repr(info.get('version', '')) + "\n")
os.utime(os.path.join(packagedir, 'docassemble', pkgname, '__init__.py'), (info['modtime'], info['modtime']))
with open(os.path.join(templatesdir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write(templatereadme)
os.utime(os.path.join(templatesdir, 'README.md'), (info['modtime'], info['modtime']))
with open(os.path.join(staticdir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write(staticreadme)
os.utime(os.path.join(staticdir, 'README.md'), (info['modtime'], info['modtime']))
with open(os.path.join(sourcesdir, 'README.md'), 'w', encoding='utf-8') as the_file:
the_file.write(sourcesreadme)
os.utime(os.path.join(sourcesdir, 'README.md'), (info['modtime'], info['modtime']))
return directory
def directory_for(area, current_project):
if current_project == 'default':
return area.directory
return os.path.join(area.directory, current_project)
def update_access_time(filepath):
with open(filepath, "rb") as fp:
fp.seek(0, 0)
fp.read(1)
| mit | fd52128d039db2a874f2627df2c8ded2 | 45.546512 | 404 | 0.579371 | 3.887666 | false | false | false | false |
jhpyle/docassemble | docassemble_base/docassemble/base/mako/filters.py | 10 | 4658 | # mako/filters.py
# Copyright 2006-2022 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
from html.entities import codepoint2name
from html.entities import name2codepoint
import re
from urllib.parse import quote_plus
import markupsafe
html_escape = markupsafe.escape
xml_escapes = {
"&": "&",
">": ">",
"<": "<",
'"': """, # also " in html-only
"'": "'", # also ' in html-only
}
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return quote_plus(string)
def trim(string):
return string.strip()
class Decode:
def __getattr__(self, key):
def decode(x):
if isinstance(x, str):
return x
elif not isinstance(x, bytes):
return decode(str(x))
else:
return str(x, encoding=key)
return decode
decode = Decode()
class XMLEntityEscaper:
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = {
c: str("&%s;" % n) for c, n in codepoint2name.items()
}
self.name2codepoint = name2codepoint
def escape_entities(self, text):
"""Replace characters with their character entity references.
Only characters corresponding to a named entity are replaced.
"""
return str(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
try:
return self.codepoint2entity[codepoint]
except (KeyError, IndexError):
return "&#x%X;" % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, str(text)).encode("ascii")
# XXX: This regexp will not match all valid XML entity names__.
# (It punts on details involving involving CombiningChars and Extenders.)
#
# .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
__characterrefs = re.compile(
r"""& (?:
\#(\d+)
| \#x([\da-f]+)
| ( (?!\d) [:\w] [-.:\w]+ )
) ;""",
re.X | re.UNICODE,
)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
codepoint = int(dval)
elif hval:
codepoint = int(hval, 16)
else:
codepoint = self.name2codepoint.get(name, 0xFFFD)
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return chr(codepoint)
def unescape(self, text):
"""Unescape character references.
All character references (both entity references and numerical
character references) are unescaped.
"""
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(codepoint2name, name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python codecs error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references::
>>> 'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was €12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start : ex.end]
text = _html_entities_escaper.escape(bad_text)
return (str(text), ex.end)
raise ex
codecs.register_error("htmlentityreplace", htmlentityreplace_errors)
DEFAULT_ESCAPES = {
"x": "filters.xml_escape",
"h": "filters.html_escape",
"u": "filters.url_escape",
"trim": "filters.trim",
"entity": "filters.html_entities_escape",
"unicode": "str",
"decode": "decode",
"str": "str",
"n": "n",
}
| mit | dc3a0fb0959fc9d60c58b5f63c65963e | 27.576687 | 77 | 0.592958 | 3.884904 | false | false | false | false |
jhpyle/docassemble | docassemble_webapp/docassemble/webapp/listlog.py | 1 | 1921 | import os
import subprocess
import time
from packaging import version
from flask import Flask, abort, make_response, render_template, request
app = Flask(__name__)
LOG_DIRECTORY = '/var/www/html/log'
version_file = '/usr/share/docassemble/webapp/VERSION'
if os.path.isfile(version_file) and os.access(version_file, os.R_OK):
with open(version_file, 'r', encoding='utf-8') as fp:
system_version = fp.read().strip()
else:
system_version = '0.1.12'
if version.parse(system_version) < version.parse('1.4.0'):
READY_FILE = '/usr/share/docassemble/webapp/ready'
else:
READY_FILE = '/var/run/docassemble/ready'
@app.route('/listlog')
def list_log_files():
cmd = "supervisorctl "
if os.getenv('DASUPERVISORUSERNAME', None):
cmd += '--username ' + os.getenv('DASUPERVISORUSERNAME') + ' --password ' + os.getenv('DASUPERVISORPASSWORD') + ' '
cmd += "--serverurl http://localhost:9001 start sync > /dev/null && while supervisorctl --serverurl http://localhost:9001 status sync | grep -q RUNNING; do sleep 1; done"
result = subprocess.run(cmd, shell=True, check=False).returncode
if result == 0:
file_listing = [f for f in os.listdir(LOG_DIRECTORY) if os.path.isfile(os.path.join(LOG_DIRECTORY, f))]
if len(file_listing) == 0:
time.sleep(2)
file_listing = [f for f in os.listdir(LOG_DIRECTORY) if os.path.isfile(os.path.join(LOG_DIRECTORY, f))]
return "\n".join(sorted(file_listing))
return "There was an error."
@app.route("/listlog/health_check", methods=['GET'])
def health_check():
if request.args.get('ready', False):
if not os.path.isfile(READY_FILE):
abort(400)
response = make_response(render_template('pages/health_check.html', content="OK"), 200)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
return response
| mit | 76d476eeea640fe2e385c0c7e0af2ad6 | 39.87234 | 174 | 0.668402 | 3.234007 | false | false | false | false |
fedora-python/pyp2rpm | pyp2rpm/module_runners.py | 1 | 2763 | import os
import sys
import logging
import json
import runpy
from subprocess import Popen, PIPE
from abc import ABCMeta
from pyp2rpm import utils
from pyp2rpm import main_dir
from pyp2rpm.exceptions import ExtractionError
from pyp2rpm.command import extract_dist
logger = logging.getLogger(__name__)
class ModuleRunner(object):
"""Abstract base class for module runners."""
__metaclass__ = ABCMeta
def __init__(self, module, *args):
self.dirname = os.path.dirname(module)
self.filename = os.path.basename(module)
self.args = args
class RunpyModuleRunner(ModuleRunner):
"""Runs given module in current interpreter using runpy."""
@staticmethod
def not_suffixed(module):
if module.endswith('py'):
return module[:-3]
def run(self):
"""Executes the code of the specified module."""
with utils.ChangeDir(self.dirname):
sys.path.insert(0, self.dirname)
sys.argv[1:] = self.args
runpy.run_module(self.not_suffixed(self.filename),
run_name='__main__',
alter_sys=True)
@property
def results(self):
return extract_dist.extract_dist.class_metadata
class SubprocessModuleRunner(ModuleRunner):
"""Runs module in external interpreter using subprocess."""
def run(self, interpreter):
"""Executes the code of the specified module. Deserializes captured
json data.
"""
with utils.ChangeDir(self.dirname):
command_list = ['PYTHONPATH=' + main_dir, interpreter,
self.filename] + list(self.args)
try:
proc = Popen(' '.join(command_list), stdout=PIPE, stderr=PIPE,
shell=True)
stream_data = proc.communicate()
except Exception as e:
logger.error(
"Error {0} while executing extract_dist command.".format(e))
raise ExtractionError
stream_data = [utils.console_to_str(s) for s in stream_data]
if proc.returncode:
logger.error(
"Subprocess failed, working dir: {}".format(os.getcwd()))
logger.error(
"Subprocess failed, command: {}".format(command_list))
logger.error(
"Subprocess failed, stdout: {0[0]}, stderr: {0[1]}".format(
stream_data))
self._result = json.loads(stream_data[0].split(
"extracted json data:\n")[-1].split("\n")[0])
@property
def results(self):
try:
return self._result
except AttributeError:
return None
| mit | 1abb1031265a564374a42dbaa6fca582 | 31.892857 | 80 | 0.5751 | 4.442122 | false | false | false | false |
trungdong/prov | src/prov/tests/examples.py | 1 | 19911 | from prov.model import ProvDocument, Namespace, Literal, PROV, Identifier
import datetime
def primer_example():
# https://github.com/lucmoreau/ProvToolbox/blob/master/prov-n/src/test/resources/prov/primer.pn
# ===========================================================================
# document
g = ProvDocument()
# prefix ex <http://example/>
# prefix dcterms <http://purl.org/dc/terms/>
# prefix foaf <http://xmlns.com/foaf/0.1/>
ex = Namespace(
"ex", "http://example/"
) # namespaces do not need to be explicitly added to a document
g.add_namespace("dcterms", "http://purl.org/dc/terms/")
g.add_namespace("foaf", "http://xmlns.com/foaf/0.1/")
# entity(ex:article, [dcterms:title="Crime rises in cities"])
# first time the ex namespace was used, it is added to the document automatically
g.entity(ex["article"], {"dcterms:title": "Crime rises in cities"})
# entity(ex:articleV1)
g.entity(ex["articleV1"])
# entity(ex:articleV2)
g.entity(ex["articleV2"])
# entity(ex:dataSet1)
g.entity(ex["dataSet1"])
# entity(ex:dataSet2)
g.entity(ex["dataSet2"])
# entity(ex:regionList)
g.entity(ex["regionList"])
# entity(ex:composition)
g.entity(ex["composition"])
# entity(ex:chart1)
g.entity(ex["chart1"])
# entity(ex:chart2)
g.entity(ex["chart2"])
# entity(ex:blogEntry)
g.entity(ex["blogEntry"])
# activity(ex:compile)
g.activity("ex:compile") # since ex is registered, it can be used like this
# activity(ex:compile2)
g.activity("ex:compile2")
# activity(ex:compose)
g.activity("ex:compose")
# activity(ex:correct, 2012-03-31T09:21:00, 2012-04-01T15:21:00)
g.activity(
"ex:correct", "2012-03-31T09:21:00", "2012-04-01T15:21:00"
) # date time can be provided as strings
# activity(ex:illustrate)
g.activity("ex:illustrate")
# used(ex:compose, ex:dataSet1, -, [ prov:role = "ex:dataToCompose"])
g.used(
"ex:compose", "ex:dataSet1", other_attributes={"prov:role": "ex:dataToCompose"}
)
# used(ex:compose, ex:regionList, -, [ prov:role = "ex:regionsToAggregateBy"])
g.used(
"ex:compose",
"ex:regionList",
other_attributes={"prov:role": "ex:regionsToAggregateBy"},
)
# wasGeneratedBy(ex:composition, ex:compose, -)
g.wasGeneratedBy("ex:composition", "ex:compose")
# used(ex:illustrate, ex:composition, -)
g.used("ex:illustrate", "ex:composition")
# wasGeneratedBy(ex:chart1, ex:illustrate, -)
g.wasGeneratedBy("ex:chart1", "ex:illustrate")
# wasGeneratedBy(ex:chart1, ex:compile, 2012-03-02T10:30:00)
g.wasGeneratedBy("ex:chart1", "ex:compile", "2012-03-02T10:30:00")
# wasGeneratedBy(ex:chart2, ex:compile2, 2012-04-01T15:21:00)
#
#
# agent(ex:derek, [ prov:type="prov:Person", foaf:givenName = "Derek",
# foaf:mbox= "<mailto:derek@example.org>"])
g.agent(
"ex:derek",
{
"prov:type": PROV["Person"],
"foaf:givenName": "Derek",
"foaf:mbox": "<mailto:derek@example.org>",
},
)
# wasAssociatedWith(ex:compose, ex:derek, -)
g.wasAssociatedWith("ex:compose", "ex:derek")
# wasAssociatedWith(ex:illustrate, ex:derek, -)
g.wasAssociatedWith("ex:illustrate", "ex:derek")
#
# agent(ex:chartgen, [ prov:type="prov:Organization",
# foaf:name = "Chart Generators Inc"])
g.agent(
"ex:chartgen",
{"prov:type": PROV["Organization"], "foaf:name": "Chart Generators Inc"},
)
# actedOnBehalfOf(ex:derek, ex:chartgen, ex:compose)
g.actedOnBehalfOf("ex:derek", "ex:chartgen", "ex:compose")
# wasAttributedTo(ex:chart1, ex:derek)
g.wasAttributedTo("ex:chart1", "ex:derek")
# wasGeneratedBy(ex:dataSet2, ex:correct, -)
g.wasGeneratedBy("ex:dataSet2", "ex:correct")
# used(ex:correct, ex:dataSet1, -)
g.used("ex:correct", "ex:dataSet1")
# wasDerivedFrom(ex:dataSet2, ex:dataSet1, [prov:type='prov:Revision'])
g.wasDerivedFrom(
"ex:dataSet2", "ex:dataSet1", other_attributes={"prov:type": PROV["Revision"]}
)
# wasDerivedFrom(ex:chart2, ex:dataSet2)
g.wasDerivedFrom("ex:chart2", "ex:dataSet2")
# wasDerivedFrom(ex:blogEntry, ex:article, [prov:type='prov:Quotation'])
g.wasDerivedFrom(
"ex:blogEntry", "ex:article", other_attributes={"prov:type": PROV["Quotation"]}
)
# specializationOf(ex:articleV1, ex:article)
g.specializationOf("ex:articleV1", "ex:article")
# wasDerivedFrom(ex:articleV1, ex:dataSet1)
g.wasDerivedFrom("ex:articleV1", "ex:dataSet1")
# specializationOf(ex:articleV2, ex:article)
g.specializationOf("ex:articleV2", "ex:article")
# wasDerivedFrom(ex:articleV2, ex:dataSet2)
g.wasDerivedFrom("ex:articleV2", "ex:dataSet2")
# alternateOf(ex:articleV2, ex:articleV1)
g.alternateOf("ex:articleV2", "ex:articleV1")
# endDocument
return g
def primer_example_alternate():
g = ProvDocument(
namespaces={
"ex": "http://example/",
"dcterms": "http://purl.org/dc/terms/",
"foaf": "http://xmlns.com/foaf/0.1/",
}
)
article = g.entity("ex:article", {"dcterms:title": "Crime rises in cities"})
articleV1 = g.entity("ex:articleV1")
articleV2 = g.entity("ex:articleV2")
dataSet1 = g.entity("ex:dataSet1")
dataSet2 = g.entity("ex:dataSet2")
regionList = g.entity("ex:regionList")
composition = g.entity("ex:composition")
chart1 = g.entity("ex:chart1")
chart2 = g.entity("ex:chart2")
blogEntry = g.entity("ex:blogEntry")
compile = g.activity("ex:compile")
compile2 = g.activity("ex:compile2")
compose = g.activity("ex:compose")
correct = g.activity("ex:correct", "2012-03-31T09:21:00", "2012-04-01T15:21:00")
illustrate = g.activity("ex:illustrate")
compose.used(dataSet1, attributes={"prov:role": "ex:dataToCompose"})
compose.used(regionList, attributes={"prov:role": "ex:regionsToAggregateBy"})
composition.wasGeneratedBy(compose)
illustrate.used(composition)
chart1.wasGeneratedBy(illustrate)
chart1.wasGeneratedBy(compile, "2012-03-02T10:30:00")
derek = g.agent(
"ex:derek",
{
"prov:type": PROV["Person"],
"foaf:givenName": "Derek",
"foaf:mbox": "<mailto:derek@example.org>",
},
)
compose.wasAssociatedWith(derek)
illustrate.wasAssociatedWith(derek)
chartgen = g.agent(
"ex:chartgen",
{"prov:type": PROV["Organization"], "foaf:name": "Chart Generators Inc"},
)
derek.actedOnBehalfOf(chartgen, compose)
chart1.wasAttributedTo(derek)
dataSet2.wasGeneratedBy(correct)
correct.used(dataSet1)
dataSet2.wasDerivedFrom(dataSet1, attributes={"prov:type": PROV["Revision"]})
chart2.wasDerivedFrom(dataSet2)
blogEntry.wasDerivedFrom(article, attributes={"prov:type": PROV["Quotation"]})
articleV1.specializationOf(article)
articleV1.wasDerivedFrom(dataSet1)
articleV2.specializationOf(article)
articleV2.wasDerivedFrom(dataSet2)
articleV2.alternateOf(articleV1)
return g
def w3c_publication_1():
# https://github.com/lucmoreau/ProvToolbox/blob/master/asn/src/test/resources/prov/w3c-publication1.prov-asn
# ===========================================================================
# bundle
#
# prefix ex <http://example.org/>
#
# prefix w3 <http://www.w3.org/>
# prefix tr <http://www.w3.org/TR/2011/>
# prefix process <http://www.w3.org/2005/10/Process-20051014/tr.html#>
# prefix email <https://lists.w3.org/Archives/Member/w3c-archive/>
# prefix chairs <https://lists.w3.org/Archives/Member/chairs/>
# prefix trans <http://www.w3.org/2005/08/01-transitions.html#>
# prefix rec54 <http://www.w3.org/2001/02pd/rec54#>
#
#
# entity(tr:WD-prov-dm-20111018, [ prov:type='rec54:WD' ])
# entity(tr:WD-prov-dm-20111215, [ prov:type='rec54:WD' ])
# entity(process:rec-advance, [ prov:type='prov:Plan' ])
#
#
# entity(chairs:2011OctDec/0004, [ prov:type='trans:transreq' ])
# entity(email:2011Oct/0141, [ prov:type='trans:pubreq' ])
# entity(email:2011Dec/0111, [ prov:type='trans:pubreq' ])
#
#
# wasDerivedFrom(tr:WD-prov-dm-20111215, tr:WD-prov-dm-20111018)
#
#
# activity(ex:act1,-,-,[prov:type="publish"])
# activity(ex:act2,-,-,[prov:type="publish"])
#
# wasGeneratedBy(tr:WD-prov-dm-20111018, ex:act1, -)
# wasGeneratedBy(tr:WD-prov-dm-20111215, ex:act2, -)
#
# used(ex:act1, chairs:2011OctDec/0004, -)
# used(ex:act1, email:2011Oct/0141, -)
# used(ex:act2, email:2011Dec/0111, -)
#
# agent(w3:Consortium, [ prov:type='prov:Organization' ])
#
# wasAssociatedWith(ex:act1, w3:Consortium, process:rec-advance)
# wasAssociatedWith(ex:act2, w3:Consortium, process:rec-advance)
#
# endBundle
# ===========================================================================
g = ProvDocument()
g.add_namespace("ex", "http://example.org/")
g.add_namespace("w3", "http://www.w3.org/")
g.add_namespace("tr", "http://www.w3.org/TR/2011/")
g.add_namespace("process", "http://www.w3.org/2005/10/Process-20051014/tr.html#")
g.add_namespace("email", "https://lists.w3.org/Archives/Member/w3c-archive/")
g.add_namespace("chairs", "https://lists.w3.org/Archives/Member/chairs/")
g.add_namespace("trans", "http://www.w3.org/2005/08/01-transitions.html#")
g.add_namespace("rec54", "http://www.w3.org/2001/02pd/rec54#")
g.entity("tr:WD-prov-dm-20111018", {"prov:type": "rec54:WD"})
g.entity("tr:WD-prov-dm-20111215", {"prov:type": "rec54:WD"})
g.entity("process:rec-advance", {"prov:type": "prov:Plan"})
g.entity("chairs:2011OctDec/0004", {"prov:type": "trans:transreq"})
g.entity("email:2011Oct/0141", {"prov:type": "trans:pubreq"})
g.entity("email:2011Dec/0111", {"prov:type": "trans:pubreq"})
g.wasDerivedFrom("tr:WD-prov-dm-20111215", "tr:WD-prov-dm-20111018")
g.activity("ex:act1", other_attributes={"prov:type": "publish"})
g.activity("ex:act2", other_attributes={"prov:type": "publish"})
g.wasGeneratedBy("tr:WD-prov-dm-20111018", "ex:act1")
g.wasGeneratedBy("tr:WD-prov-dm-20111215", "ex:act2")
g.used("ex:act1", "chairs:2011OctDec/0004")
g.used("ex:act1", "email:2011Oct/0141")
g.used("ex:act2", "email:2011Dec/0111")
g.agent("w3:Consortium", other_attributes={"prov:type": "Organization"})
g.wasAssociatedWith("ex:act1", "w3:Consortium", "process:rec-advance")
g.wasAssociatedWith("ex:act2", "w3:Consortium", "process:rec-advance")
return g
def w3c_publication_2():
# https://github.com/lucmoreau/ProvToolbox/blob/master/asn/src/test/resources/prov/w3c-publication2.prov-asn
# ===========================================================================
# bundle
#
# prefix ex <http://example.org/>
# prefix rec <http://example.org/record>
#
# prefix w3 <http://www.w3.org/TR/2011/>
# prefix hg <http://dvcs.w3.org/hg/prov/raw-file/9628aaff6e20/model/releases/WD-prov-dm-20111215/>
#
#
# entity(hg:Overview.html, [ prov:type="file in hg" ])
# entity(w3:WD-prov-dm-20111215, [ prov:type="html4" ])
#
#
# activity(ex:rcp,-,-,[prov:type="copy directory"])
#
# wasGeneratedBy(rec:g; w3:WD-prov-dm-20111215, ex:rcp, -)
#
# entity(ex:req3, [ prov:type="http://www.w3.org/2005/08/01-transitions.html#pubreq" %% xsd:anyURI ])
#
# used(rec:u; ex:rcp,hg:Overview.html,-)
# used(ex:rcp, ex:req3, -)
#
#
# wasDerivedFrom(w3:WD-prov-dm-20111215, hg:Overview.html, ex:rcp, rec:g, rec:u)
#
# agent(ex:webmaster, [ prov:type='prov:Person' ])
#
# wasAssociatedWith(ex:rcp, ex:webmaster, -)
#
# endBundle
# ===========================================================================
ex = Namespace("ex", "http://example.org/")
rec = Namespace("rec", "http://example.org/record")
w3 = Namespace("w3", "http://www.w3.org/TR/2011/")
hg = Namespace(
"hg",
"http://dvcs.w3.org/hg/prov/raw-file/9628aaff6e20/model/releases/WD-prov-dm-20111215/",
)
g = ProvDocument()
g.entity(hg["Overview.html"], {"prov:type": "file in hg"})
g.entity(w3["WD-prov-dm-20111215"], {"prov:type": "html4"})
g.activity(ex["rcp"], None, None, {"prov:type": "copy directory"})
g.wasGeneratedBy("w3:WD-prov-dm-20111215", "ex:rcp", identifier=rec["g"])
g.entity(
"ex:req3",
{
"prov:type": Identifier(
"http://www.w3.org/2005/08/01-transitions.html#pubreq"
)
},
)
g.used("ex:rcp", "hg:Overview.html", identifier="rec:u")
g.used("ex:rcp", "ex:req3")
g.wasDerivedFrom(
"w3:WD-prov-dm-20111215", "hg:Overview.html", "ex:rcp", "rec:g", "rec:u"
)
g.agent("ex:webmaster", {"prov:type": "Person"})
g.wasAssociatedWith("ex:rcp", "ex:webmaster")
return g
def bundles1():
# https://github.com/lucmoreau/ProvToolbox/blob/master/prov-n/src/test/resources/prov/bundles1.provn
# ===============================================================================
# document
g = ProvDocument()
# prefix ex <http://example.org/example/>
EX = Namespace("ex", "http://www.example.com/")
g.add_namespace(EX)
# prefix alice <http://example.org/alice/>
# prefix bob <http://example.org/bob/>
g.add_namespace("alice", "http://example.org/alice/")
g.add_namespace("bob", "http://example.org/bob/")
# entity(bob:bundle1, [prov:type='prov:Bundle'])
g.entity("bob:bundle1", {"prov:type": PROV["Bundle"]})
# wasGeneratedBy(bob:bundle1, -, 2012-05-24T10:30:00)
g.wasGeneratedBy("bob:bundle1", time="2012-05-24T10:30:00")
# agent(ex:Bob)
g.agent("ex:Bob")
# wasAttributedTo(bob:bundle1, ex:Bob)
g.wasAttributedTo("bob:bundle1", "ex:Bob")
# entity(alice:bundle2, [ prov:type='prov:Bundle' ])
g.entity("alice:bundle2", {"prov:type": PROV["Bundle"]})
# wasGeneratedBy(alice:bundle2, -, 2012-05-25T11:15:00)
g.wasGeneratedBy("alice:bundle2", time="2012-05-25T11:15:00")
# agent(ex:Alice)
g.agent("ex:Alice")
# wasAttributedTo(alice:bundle2, ex:Alice)
g.wasAttributedTo("alice:bundle2", "ex:Alice")
# bundle bob:bundle1
b1 = g.bundle("bob:bundle1")
# entity(ex:report1, [ prov:type="report", ex:version=1 ])
b1.entity("ex:report1", {"prov:type": "report", "ex:version": 1})
# wasGeneratedBy(ex:report1, -, 2012-05-24T10:00:01)
b1.wasGeneratedBy("ex:report1", time="2012-05-24T10:00:01")
# endBundle
# bundle alice:bundle2
b2 = g.bundle("alice:bundle2")
# entity(ex:report1)
b2.entity("ex:report1")
# entity(ex:report2, [ prov:type="report", ex:version=2 ])
b2.entity("ex:report2", {"prov:type": "report", "ex:version": 2})
# wasGeneratedBy(ex:report2, -, 2012-05-25T11:00:01)
b2.wasGeneratedBy("ex:report2", time="2012-05-25T11:00:01")
# wasDerivedFrom(ex:report2, ex:report1)
b2.wasDerivedFrom("ex:report2", "ex:report1")
# endBundle
# endDocument
return g
def bundles2():
# https://github.com/lucmoreau/ProvToolbox/blob/master/prov-n/src/test/resources/prov/bundles2.provn
# ===========================================================================
# document
g = ProvDocument()
# prefix ex <http://example.org/example/>
g.add_namespace("ex", "http://www.example.com/")
# prefix alice <http://example.org/alice/>
# prefix bob <http://example.org/bob/>
g.add_namespace("alice", "http://example.org/alice/")
g.add_namespace("bob", "http://example.org/bob/")
# entity(bob:bundle4, [prov:type='prov:Bundle'])
# wasGeneratedBy(bob:bundle4, -, 2012-05-24T10:30:00)
# agent(ex:Bob)
# wasAttributedTo(bob:bundle4, ex:Bob)
g.entity("bob:bundle4", {"prov:type": PROV["Bundle"]})
g.wasGeneratedBy("bob:bundle4", time="2012-05-24T10:30:00")
g.agent("ex:Bob")
g.wasAttributedTo("bob:bundle4", "ex:Bob")
# entity(alice:bundle5, [ prov:type='prov:Bundle' ])
# wasGeneratedBy(alice:bundle5, -, 2012-05-25T11:15:00)
# agent(ex:Alice)
# wasAttributedTo(alice:bundle5, ex:Alice)
g.entity("alice:bundle5", {"prov:type": PROV["Bundle"]})
g.wasGeneratedBy("alice:bundle5", time="2012-05-25T11:15:00")
g.agent("ex:Alice")
g.wasAttributedTo("alice:bundle5", "ex:Alice")
# bundle bob:bundle4
# entity(ex:report1, [ prov:type="report", ex:version=1 ])
# wasGeneratedBy(ex:report1, -, 2012-05-24T10:00:01)
# endBundle
b4 = g.bundle("bob:bundle4")
b4.entity("ex:report1", {"prov:type": "report", "ex:version": 1})
b4.wasGeneratedBy("ex:report1", time="2012-05-24T10:00:01")
# bundle alice:bundle5
# entity(ex:report1bis)
# mentionOf(ex:report1bis, ex:report1, bob:bundle4)
# entity(ex:report2, [ prov:type="report", ex:version=2 ])
# wasGeneratedBy(ex:report2, -, 2012-05-25T11:00:01)
# wasDerivedFrom(ex:report2, ex:report1bis)
# endBundle
b5 = g.bundle("alice:bundle5")
b5.entity("ex:report1bis")
b5.mentionOf("ex:report1bis", "ex:report1", "bob:bundle4")
b5.entity("ex:report2", [("prov:type", "report"), ("ex:version", 2)])
b5.wasGeneratedBy("ex:report2", time="2012-05-25T11:00:01")
b5.wasDerivedFrom("ex:report2", "ex:report1bis")
# endDocument
return g
def collections():
g = ProvDocument()
ex = Namespace("ex", "http://example.org/")
c1 = g.collection(ex["c1"])
e1 = g.entity("ex:e1")
g.hadMember(c1, e1)
return g
def datatypes():
g = ProvDocument()
ex = Namespace("ex", "http://example.org/")
g.add_namespace(ex)
attributes = {
"ex:int": 100,
"ex:float": 100.123456,
"ex:long": 123456789000,
"ex:bool": True,
"ex:str": "Some string",
"ex:unicode": "Some unicode string with accents: Huỳnh Trung Đông",
"ex:timedate": datetime.datetime(2012, 12, 12, 14, 7, 48),
"ex:intstr": Literal(
"PROV Internationalized string", PROV["InternationalizedString"], "en"
),
}
multiline = """Line1
Line2
Line3"""
attributes["ex:multi-line"] = multiline
g.entity("ex:e1", attributes)
return g
def long_literals():
g = ProvDocument()
long_uri = (
"http://Lorem.ipsum/dolor/sit/amet/consectetur/adipiscing/elit/Quisque/vel/sollicitudin/felis/nec/"
"venenatis/massa/Aenean/lectus/arcu/sagittis/sit/amet/nisl/nec/varius/eleifend/sem/In/hac/habitasse/"
"platea/dictumst/Aliquam/eget/fermentum/enim/Curabitur/auctor/elit/non/ipsum/interdum/at/orci/aliquam/"
)
ex = Namespace("ex", long_uri)
g.add_namespace(ex)
g.entity(
"ex:e1",
{
"prov:label": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec pellentesque luctus nulla vel "
"ullamcorper. Donec sit amet ligula sit amet lorem pretium rhoncus vel vel lorem. Sed at "
"consequat metus, eget eleifend massa. Fusce a facilisis turpis. Lorem volutpat."
},
)
return g
tests = [
("Bundle1", bundles1),
("Bundle2", bundles2),
("Primer", primer_example),
("W3C Publication 1", w3c_publication_1),
("W3C Publication 2", w3c_publication_2),
("collections", collections),
("datatypes", datatypes),
("Long literals", long_literals),
]
| mit | 859711ec898a4db3153e923321106390 | 35.260474 | 121 | 0.602351 | 2.878814 | false | false | false | false |
trungdong/prov | src/prov/dot.py | 1 | 13274 | """Graphical visualisation support for prov.model.
This module produces graphical visualisation for provenanve graphs.
Requires pydot module and Graphviz.
References:
* pydot homepage: https://github.com/erocarrera/pydot
* Graphviz: http://www.graphviz.org/
* DOT Language: http://www.graphviz.org/doc/info/lang.html
.. moduleauthor:: Trung Dong Huynh <trungdong@donggiang.com>
"""
from datetime import datetime
from prov.graph import INFERRED_ELEMENT_CLASS
from prov.model import (
ProvEntity,
ProvActivity,
ProvAgent,
ProvBundle,
PROV_ACTIVITY,
PROV_AGENT,
PROV_ALTERNATE,
PROV_ASSOCIATION,
PROV_ATTRIBUTION,
PROV_BUNDLE,
PROV_COMMUNICATION,
PROV_DERIVATION,
PROV_DELEGATION,
PROV_ENTITY,
PROV_GENERATION,
PROV_INFLUENCE,
PROV_INVALIDATION,
PROV_END,
PROV_MEMBERSHIP,
PROV_MENTION,
PROV_SPECIALIZATION,
PROV_START,
PROV_USAGE,
Identifier,
PROV_ATTRIBUTE_QNAMES,
sorted_attributes,
ProvException,
)
import pydot
try:
from html import escape
except ImportError:
from cgi import escape
__author__ = "Trung Dong Huynh"
__email__ = "trungdong@donggiang.com"
# Visual styles for various elements (nodes) and relations (edges)
# see http://graphviz.org/content/attrs
GENERIC_NODE_STYLE = {
None: {
"shape": "oval",
"style": "filled",
"fillcolor": "lightgray",
"color": "dimgray",
},
ProvEntity: {
"shape": "oval",
"style": "filled",
"fillcolor": "lightgray",
"color": "dimgray",
},
ProvActivity: {
"shape": "box",
"style": "filled",
"fillcolor": "lightgray",
"color": "dimgray",
},
ProvAgent: {
"shape": "house",
"style": "filled",
"fillcolor": "lightgray",
"color": "dimgray",
},
ProvBundle: {
"shape": "folder",
"style": "filled",
"fillcolor": "lightgray",
"color": "dimgray",
},
}
DOT_PROV_STYLE = {
# Generic node
0: {
"shape": "oval",
"style": "filled",
"fillcolor": "lightgray",
"color": "dimgray",
},
# Elements
PROV_ENTITY: {
"shape": "oval",
"style": "filled",
"fillcolor": "#FFFC87",
"color": "#808080",
},
PROV_ACTIVITY: {
"shape": "box",
"style": "filled",
"fillcolor": "#9FB1FC",
"color": "#0000FF",
},
PROV_AGENT: {"shape": "house", "style": "filled", "fillcolor": "#FED37F"},
PROV_BUNDLE: {"shape": "folder", "style": "filled", "fillcolor": "aliceblue"},
# Relations
PROV_GENERATION: {
"label": "wasGeneratedBy",
"fontsize": "10.0",
"color": "darkgreen",
"fontcolor": "darkgreen",
},
PROV_USAGE: {
"label": "used",
"fontsize": "10.0",
"color": "red4",
"fontcolor": "red",
},
PROV_COMMUNICATION: {"label": "wasInformedBy", "fontsize": "10.0"},
PROV_START: {"label": "wasStartedBy", "fontsize": "10.0"},
PROV_END: {"label": "wasEndedBy", "fontsize": "10.0"},
PROV_INVALIDATION: {"label": "wasInvalidatedBy", "fontsize": "10.0"},
PROV_DERIVATION: {"label": "wasDerivedFrom", "fontsize": "10.0"},
PROV_ATTRIBUTION: {
"label": "wasAttributedTo",
"fontsize": "10.0",
"color": "#FED37F",
},
PROV_ASSOCIATION: {
"label": "wasAssociatedWith",
"fontsize": "10.0",
"color": "#FED37F",
},
PROV_DELEGATION: {
"label": "actedOnBehalfOf",
"fontsize": "10.0",
"color": "#FED37F",
},
PROV_INFLUENCE: {"label": "wasInfluencedBy", "fontsize": "10.0", "color": "grey"},
PROV_ALTERNATE: {"label": "alternateOf", "fontsize": "10.0"},
PROV_SPECIALIZATION: {"label": "specializationOf", "fontsize": "10.0"},
PROV_MENTION: {"label": "mentionOf", "fontsize": "10.0"},
PROV_MEMBERSHIP: {"label": "hadMember", "fontsize": "10.0"},
}
ANNOTATION_STYLE = {
"shape": "note",
"color": "gray",
"fontcolor": "black",
"fontsize": "10",
}
ANNOTATION_LINK_STYLE = {"arrowhead": "none", "style": "dashed", "color": "gray"}
ANNOTATION_START_ROW = '<<TABLE cellpadding="0" border="0">'
ANNOTATION_ROW_TEMPLATE = """ <TR>
<TD align=\"left\" href=\"%s\">%s</TD>
<TD align=\"left\"%s>%s</TD>
</TR>"""
ANNOTATION_END_ROW = " </TABLE>>"
def htlm_link_if_uri(value):
try:
uri = value.uri
return '<a href="%s">%s</a>' % (uri, str(value))
except AttributeError:
return str(value)
def prov_to_dot(
bundle,
show_nary=True,
use_labels=False,
direction="BT",
show_element_attributes=True,
show_relation_attributes=True,
):
"""
Convert a provenance bundle/document into a DOT graphical representation.
:param bundle: The provenance bundle/document to be converted.
:type bundle: :class:`ProvBundle`
:param show_nary: shows all elements in n-ary relations.
:type show_nary: bool
:param use_labels: uses the prov:label property of an element as its name (instead of its identifier).
:type use_labels: bool
:param direction: specifies the direction of the graph. Valid values are "BT" (default), "TB", "LR", "RL".
:param show_element_attributes: shows attributes of elements.
:type show_element_attributes: bool
:param show_relation_attributes: shows attributes of relations.
:type show_relation_attributes: bool
:returns: :class:`pydot.Dot` -- the Dot object.
"""
if direction not in {"BT", "TB", "LR", "RL"}:
# Invalid direction is provided
direction = "BT" # reset it to the default value
maindot = pydot.Dot(graph_type="digraph", rankdir=direction, charset="utf-8")
node_map = {}
count = [0, 0, 0, 0] # counters for node ids
def _bundle_to_dot(dot, bundle):
def _attach_attribute_annotation(node, record):
# Adding a node to show all attributes
attributes = list(
(attr_name, value)
for attr_name, value in record.attributes
if attr_name not in PROV_ATTRIBUTE_QNAMES
)
if not attributes:
return # No attribute to display
# Sort the attributes.
attributes = sorted_attributes(record.get_type(), attributes)
ann_rows = [ANNOTATION_START_ROW]
ann_rows.extend(
ANNOTATION_ROW_TEMPLATE
% (
attr.uri,
escape(str(attr)),
' href="%s"' % value.uri if isinstance(value, Identifier) else "",
escape(
str(value)
if not isinstance(value, datetime)
else str(value.isoformat())
),
)
for attr, value in attributes
)
ann_rows.append(ANNOTATION_END_ROW)
count[3] += 1
annotations = pydot.Node(
"ann%d" % count[3], label="\n".join(ann_rows), **ANNOTATION_STYLE
)
dot.add_node(annotations)
dot.add_edge(pydot.Edge(annotations, node, **ANNOTATION_LINK_STYLE))
def _add_bundle(bundle):
count[2] += 1
subdot = pydot.Cluster(
graph_name="c%d" % count[2], URL=f'"{bundle.identifier.uri}"'
)
subdot.set_label('"%s"' % str(bundle.identifier))
_bundle_to_dot(subdot, bundle)
dot.add_subgraph(subdot)
return subdot
def _add_node(record):
count[0] += 1
node_id = "n%d" % count[0]
if use_labels:
if record.label == record.identifier:
node_label = f'"{record.label}"'
else:
# Fancier label if both are different. The label will be
# the main node text, whereas the identifier will be a
# kind of subtitle.
node_label = (
f"<{record.label}<br />"
f'<font color="#333333" point-size="10">'
f'{record.identifier}</font>>'
)
else:
node_label = f'"{record.identifier}"'
uri = record.identifier.uri
style = DOT_PROV_STYLE[record.get_type()]
node = pydot.Node(node_id, label=node_label, URL='"%s"' % uri, **style)
node_map[uri] = node
dot.add_node(node)
if show_element_attributes:
_attach_attribute_annotation(node, rec)
return node
def _add_generic_node(qname, prov_type=None):
count[0] += 1
node_id = "n%d" % count[0]
node_label = f'"{qname}"'
uri = qname.uri
style = GENERIC_NODE_STYLE[prov_type] if prov_type else DOT_PROV_STYLE[0]
node = pydot.Node(node_id, label=node_label, URL='"%s"' % uri, **style)
node_map[uri] = node
dot.add_node(node)
return node
def _get_bnode():
count[1] += 1
bnode_id = "b%d" % count[1]
bnode = pydot.Node(bnode_id, label='""', shape="point", color="gray")
dot.add_node(bnode)
return bnode
def _get_node(qname, prov_type=None):
if qname is None:
return _get_bnode()
uri = qname.uri
if uri not in node_map:
_add_generic_node(qname, prov_type)
return node_map[uri]
records = bundle.get_records()
relations = []
for rec in records:
if rec.is_element():
_add_node(rec)
else:
# Saving the relations for later processing
relations.append(rec)
if not bundle.is_bundle():
for bundle in bundle.bundles:
_add_bundle(bundle)
for rec in relations:
args = rec.args
# skipping empty records
if not args:
continue
# picking element nodes
attr_names, nodes = zip(
*(
(attr_name, value)
for attr_name, value in rec.formal_attributes
if attr_name in PROV_ATTRIBUTE_QNAMES
)
)
inferred_types = list(map(INFERRED_ELEMENT_CLASS.get, attr_names))
other_attributes = [
(attr_name, value)
for attr_name, value in rec.attributes
if attr_name not in PROV_ATTRIBUTE_QNAMES
]
add_attribute_annotation = show_relation_attributes and other_attributes
add_nary_elements = len(nodes) > 2 and show_nary
style = DOT_PROV_STYLE[rec.get_type()]
if len(nodes) < 2: # too few elements for a relation?
continue # cannot draw this
if add_nary_elements or add_attribute_annotation:
# a blank node for n-ary relations or the attribute annotation
bnode = _get_bnode()
# the first segment
dot.add_edge(
pydot.Edge(
_get_node(nodes[0], inferred_types[0]),
bnode,
arrowhead="none",
**style,
)
)
style = dict(style) # copy the style
del style["label"] # not showing label in the second segment
# the second segment
dot.add_edge(
pydot.Edge(bnode, _get_node(nodes[1], inferred_types[1]), **style)
)
if add_nary_elements:
style["color"] = "gray" # all remaining segment to be gray
style["fontcolor"] = "dimgray" # text in darker gray
for attr_name, node, inferred_type in zip(
attr_names[2:], nodes[2:], inferred_types[2:]
):
if node is not None:
style["label"] = attr_name.localpart
dot.add_edge(
pydot.Edge(
bnode, _get_node(node, inferred_type), **style
)
)
if add_attribute_annotation:
_attach_attribute_annotation(bnode, rec)
else:
# show a simple binary relations with no annotation
dot.add_edge(
pydot.Edge(
_get_node(nodes[0], inferred_types[0]),
_get_node(nodes[1], inferred_types[1]),
**style,
)
)
try:
unified = bundle.unified()
except ProvException:
# Could not unify this bundle
# try the original document anyway
unified = bundle
_bundle_to_dot(maindot, unified)
return maindot
| mit | 62b5fe3b5cf15c742a15bfbf0bbebb02 | 32.0199 | 110 | 0.514012 | 3.818757 | false | false | false | false |
fedora-python/pyp2rpm | tests/test_package_getters.py | 1 | 6925 | import json
import os
import tempfile
import shutil
import pytest
from flexmock import flexmock
from pyp2rpm.convertor import PyPIClient
from pyp2rpm.package_getters import LocalFileGetter, PypiDownloader, get_url
from pyp2rpm.exceptions import MissingUrlException, NoSuchPackageException
tests_dir = os.path.split(os.path.abspath(__file__))[0]
class TestPackageGetters(object):
client = PyPIClient()
@pytest.mark.parametrize(('name', 'version', 'wheel', 'hf', 'expected_url', 'expected_md5'), [
('setuptools', '18.3.1', False, False,
'https://files.pythonhosted.org/packages/source/s/setuptools/setuptools-18.3.1.tar.gz',
'748187b93152fa60287dfb896837fd7c'),
('setuptools', '18.3.1', True, False,
'https://files.pythonhosted.org/packages/source/s/setuptools/setuptools-18.3.1-py2.py3-none-any.whl',
'a21a4d02d0bab2eac499cca72faeb076'),
('setuptools', '18.3.1', False, True,
'https://files.pythonhosted.org/packages/86/8a/c4666b05c74e840eb9b09d28f4e7ae76fc9075e8c653d0eb4d265a5b49d9/setuptools-18.3.1.tar.gz',
'748187b93152fa60287dfb896837fd7c'),
('pypandoc', '1.1.3', False, False,
'https://files.pythonhosted.org/packages/source/p/pypandoc/pypandoc-1.1.3.zip',
'771f376bf9c936a90159cd94235998c2'),
])
@pytest.mark.webtest
def test_get_url(self, name, version, wheel, hf,
expected_url, expected_md5):
assert (expected_url, expected_md5) == get_url(
self.client, name, version, wheel, hf)
@pytest.mark.parametrize(('name', 'version', 'wheel', 'hf',
'exception', 'error_msg'), [
('nonexistent_pkg', '0.0.0', False, False, MissingUrlException,
'Url of source archive not found.'),
('Pymacs', '0.25', False, False, MissingUrlException,
'Pymacs package has no sources on PyPI, Please ask the maintainer to upload sources.'),
])
@pytest.mark.webtest
def test_get_url_raises(self, name, version, wheel, hf,
exception, error_msg):
with pytest.raises(exception) as exc_info:
get_url(self.client, name, version, wheel, hf)
assert error_msg == str(exc_info.value)
class TestPypiDownloader(object):
class StaticPyPIClient(PyPIClient):
def get_json(self, name, version):
with open('{0}/test_data/django.json'.format(tests_dir)) as json_info:
return json.loads(json_info.read())
client = StaticPyPIClient()
@pytest.mark.parametrize(('name', 'expected_ver'), [
('django', '3.0.11'),
])
def test_init_good_data(self, name, expected_ver):
d = PypiDownloader(self.client, name)
assert d.version == expected_ver
@pytest.mark.parametrize(('name', 'expected_ver'), [
('django', '3.1rc1'),
])
def test_init_good_data_pre(self, name, expected_ver):
d = PypiDownloader(self.client, name, prerelease=True)
assert d.version == expected_ver
class TestPypiFileGetter(object):
client = flexmock(
package_releases=lambda n, hidden: n == 'spam' and ['3.rc1', '2', '1'] or [],
release_urls=lambda n, v: n == 'spam' and v in [
'3.rc1', '2', '1'] and [{'url': 'spam'}] or []
)
@pytest.mark.parametrize(('name', 'version'), [
('eggs', '2'),
('spam', '3'),
])
def test_init_bad_data(self, name, version):
with pytest.raises(NoSuchPackageException):
PypiDownloader(self.client, name, version)
@pytest.mark.parametrize(('name', 'version', 'expected_ver'), [
('spam', '1', '1'),
('spam', None, '2'),
])
def test_init_good_data(self, name, version, expected_ver):
d = PypiDownloader(self.client, name, version)
assert d.version == expected_ver
@pytest.mark.parametrize(('name', 'version', 'expected_ver'), [
('spam', '1', '1'),
('spam', None, '3.rc1'),
])
def test_init_good_data_pre(self, name, version, expected_ver):
d = PypiDownloader(self.client, name, version, prerelease=True)
assert d.version == expected_ver
class TestLocalFileGetter(object):
td_dir = '{0}/test_data/'.format(tests_dir)
def setup_method(self, method):
self.l = [LocalFileGetter('{0}plumbum-0.9.0.tar.gz'.format(
self.td_dir)),
LocalFileGetter('{0}Sphinx-1.1.3-py2.6.egg'.format(
self.td_dir)),
LocalFileGetter('{0}unextractable-1.tar'.format(
self.td_dir)),
LocalFileGetter(
'{0}setuptools-19.6-py2.py3-none-any.whl'.format(
self.td_dir)),
LocalFileGetter(
'{0}py2exe-0.9.2.2-py33.py34-none-any.whl'.format(
self.td_dir)),
LocalFileGetter('python-foo-1.tar'),
LocalFileGetter('python-many-dashes-foo-1.tar'),
]
def teardown_method(self, method):
for file_getter in self.l:
if hasattr(file_getter, 'temp_dir'):
shutil.rmtree(file_getter.temp_dir)
@pytest.mark.parametrize(('i', 'expected'), [
(0, 'plumbum-0.9.0'),
(1, 'Sphinx-1.1.3-py2.6'),
(2, 'unextractable-1'),
(3, 'setuptools-19.6-py2.py3-none-any'),
(4, 'py2exe-0.9.2.2-py33.py34-none-any'),
])
def test_stripped_name_version(self, i, expected):
assert self.l[i]._stripped_name_version == expected
@pytest.mark.parametrize(('i', 'expected'), [
(0, ('plumbum', '0.9.0')),
(1, ('Sphinx', '1.1.3')),
(3, ('setuptools', '19.6')),
(4, ('py2exe', '0.9.2.2')),
(5, ('python-foo', '1')),
(6, ('python-many-dashes-foo', '1')),
])
def test_get_name_version(self, i, expected):
assert self.l[i].get_name_version() == expected
def test_get_non_existent_file(self):
with pytest.raises(EnvironmentError):
LocalFileGetter('/this/path/doesnot/exist',
tempfile.gettempdir()).get()
def test_get_existent_file(self):
tmpdir = tempfile.gettempdir()
in_tmp_dir = os.path.join(tmpdir, 'plumbum-0.9.0.tar.gz')
self.l[0].save_dir = tmpdir
if os.path.exists(in_tmp_dir):
os.unlink(in_tmp_dir)
assert self.l[0].get() == in_tmp_dir
assert os.path.exists(self.l[0].get())
os.unlink(in_tmp_dir)
def test_get_to_same_location(self):
tmpdir = tempfile.gettempdir()
self.l[1].save_dir = self.td_dir
assert os.path.samefile(self.l[1].get(), os.path.join(
self.td_dir, 'Sphinx-1.1.3-py2.6.egg'))
assert not os.path.exists(os.path.join(tmpdir,
'Sphinx-1.1.3-py2.6.egg'))
| mit | 2035cc8d7ad0d5ee430e43935e787c81 | 38.346591 | 143 | 0.578339 | 3.23296 | false | true | false | false |
nvbn/thefuck | tests/rules/test_sed_unterminated_s.py | 4 | 1186 | import pytest
from thefuck.rules.sed_unterminated_s import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def sed_unterminated_s():
return "sed: -e expression #1, char 9: unterminated `s' command"
def test_match(sed_unterminated_s):
assert match(Command('sed -e s/foo/bar', sed_unterminated_s))
assert match(Command('sed -es/foo/bar', sed_unterminated_s))
assert match(Command('sed -e s/foo/bar -e s/baz/quz', sed_unterminated_s))
assert not match(Command('sed -e s/foo/bar', ''))
assert not match(Command('sed -es/foo/bar', ''))
assert not match(Command('sed -e s/foo/bar -e s/baz/quz', ''))
def test_get_new_command(sed_unterminated_s):
assert (get_new_command(Command('sed -e s/foo/bar', sed_unterminated_s))
== 'sed -e s/foo/bar/')
assert (get_new_command(Command('sed -es/foo/bar', sed_unterminated_s))
== 'sed -es/foo/bar/')
assert (get_new_command(Command(r"sed -e 's/\/foo/bar'", sed_unterminated_s))
== r"sed -e 's/\/foo/bar/'")
assert (get_new_command(Command(r"sed -e s/foo/bar -es/baz/quz", sed_unterminated_s))
== r"sed -e s/foo/bar/ -es/baz/quz/")
| mit | df47a297ca4aa96a1dbf1f3eea0d86d5 | 41.357143 | 89 | 0.642496 | 2.817102 | false | true | false | false |
nvbn/thefuck | thefuck/entrypoints/shell_logger.py | 4 | 2009 | import array
import fcntl
from functools import partial
import mmap
import os
import pty
import signal
import sys
import termios
import tty
from .. import logs, const
def _read(f, fd):
data = os.read(fd, 1024)
try:
f.write(data)
except ValueError:
position = const.LOG_SIZE_IN_BYTES - const.LOG_SIZE_TO_CLEAN
f.move(0, const.LOG_SIZE_TO_CLEAN, position)
f.seek(position)
f.write(b'\x00' * const.LOG_SIZE_TO_CLEAN)
f.seek(position)
return data
def _set_pty_size(master_fd):
buf = array.array('h', [0, 0, 0, 0])
fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCGWINSZ, buf, True)
fcntl.ioctl(master_fd, termios.TIOCSWINSZ, buf)
def _spawn(shell, master_read):
"""Create a spawned process.
Modified version of pty.spawn with terminal size support.
"""
pid, master_fd = pty.fork()
if pid == pty.CHILD:
os.execlp(shell, shell)
try:
mode = tty.tcgetattr(pty.STDIN_FILENO)
tty.setraw(pty.STDIN_FILENO)
restore = True
except tty.error: # This is the same as termios.error
restore = False
_set_pty_size(master_fd)
signal.signal(signal.SIGWINCH, lambda *_: _set_pty_size(master_fd))
try:
pty._copy(master_fd, master_read, pty._read)
except OSError:
if restore:
tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
return os.waitpid(pid, 0)[1]
def shell_logger(output):
"""Logs shell output to the `output`.
Works like unix script command with `-f` flag.
"""
if not os.environ.get('SHELL'):
logs.warn("Shell logger doesn't support your platform.")
sys.exit(1)
fd = os.open(output, os.O_CREAT | os.O_TRUNC | os.O_RDWR)
os.write(fd, b'\x00' * const.LOG_SIZE_IN_BYTES)
buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_WRITE)
return_code = _spawn(os.environ['SHELL'], partial(_read, buffer))
sys.exit(return_code)
| mit | cc419f780109d61f0b53a15adc4cc683 | 24.43038 | 85 | 0.633649 | 3.153846 | false | false | false | false |
nvbn/thefuck | tests/rules/test_composer_not_command.py | 2 | 2444 | import pytest
from thefuck.rules.composer_not_command import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def composer_not_command():
# that weird spacing is part of the actual command output
return (
'\n'
'\n'
' \n'
' [InvalidArgumentException] \n'
' Command "udpate" is not defined. \n'
' Did you mean this? \n'
' update \n'
' \n'
'\n'
'\n'
)
@pytest.fixture
def composer_not_command_one_of_this():
# that weird spacing is part of the actual command output
return (
'\n'
'\n'
' \n'
' [InvalidArgumentException] \n'
' Command "pdate" is not defined. \n'
' Did you mean one of these? \n'
' selfupdate \n'
' self-update \n'
' update \n'
' \n'
'\n'
'\n'
)
@pytest.fixture
def composer_require_instead_of_install():
return 'Invalid argument package. Use "composer require package" instead to add packages to your composer.json.'
def test_match(composer_not_command, composer_not_command_one_of_this, composer_require_instead_of_install):
assert match(Command('composer udpate',
composer_not_command))
assert match(Command('composer pdate',
composer_not_command_one_of_this))
assert match(Command('composer install package',
composer_require_instead_of_install))
assert not match(Command('ls update', composer_not_command))
def test_get_new_command(composer_not_command, composer_not_command_one_of_this, composer_require_instead_of_install):
assert (get_new_command(Command('composer udpate',
composer_not_command))
== 'composer update')
assert (get_new_command(Command('composer pdate',
composer_not_command_one_of_this))
== 'composer selfupdate')
assert (get_new_command(Command('composer install package',
composer_require_instead_of_install))
== 'composer require package')
| mit | e00cc20d10a5f9d1974695ba58ffce76 | 36.030303 | 118 | 0.51964 | 4.142373 | false | true | false | false |
nvbn/thefuck | tests/functional/test_fish.py | 6 | 2095 | import pytest
from tests.functional.plots import with_confirmation, without_confirmation, \
refuse_with_confirmation, select_command_with_arrows
containers = (('thefuck/python3-fish',
u'''FROM python:3
# Use jessie-backports since it has the fish package. See here for details:
# https://github.com/tianon/docker-brew-debian/blob/88ae21052affd8a14553bb969f9d41c464032122/jessie/backports/Dockerfile
RUN awk '$1 ~ "^deb" { $3 = $3 "-backports"; print; exit }' /etc/apt/sources.list > /etc/apt/sources.list.d/backports.list
RUN apt-get update
RUN apt-get install -yy fish''',
u'fish'),
('thefuck/python2-fish',
u'''FROM python:2
# Use jessie-backports since it has the fish package. See here for details:
# https://github.com/tianon/docker-brew-debian/blob/88ae21052affd8a14553bb969f9d41c464032122/jessie/backports/Dockerfile
RUN awk '$1 ~ "^deb" { $3 = $3 "-backports"; print; exit }' /etc/apt/sources.list > /etc/apt/sources.list.d/backports.list
RUN apt-get update
RUN apt-get install -yy fish''',
u'fish'))
@pytest.fixture(params=containers)
def proc(request, spawnu, TIMEOUT):
proc = spawnu(*request.param)
proc.sendline(u"pip install /src")
assert proc.expect([TIMEOUT, u'Successfully installed'])
proc.sendline(u'thefuck --alias > ~/.config/fish/config.fish')
proc.sendline(u'fish')
return proc
@pytest.mark.functional
def test_with_confirmation(proc, TIMEOUT):
with_confirmation(proc, TIMEOUT)
@pytest.mark.functional
def test_select_command_with_arrows(proc, TIMEOUT):
select_command_with_arrows(proc, TIMEOUT)
@pytest.mark.functional
def test_refuse_with_confirmation(proc, TIMEOUT):
refuse_with_confirmation(proc, TIMEOUT)
@pytest.mark.functional
def test_without_confirmation(proc, TIMEOUT):
without_confirmation(proc, TIMEOUT)
# TODO: ensure that history changes.
| mit | a0115acda8e152d2b2680901bf3d46b5 | 39.288462 | 141 | 0.651551 | 3.526936 | false | true | false | false |
nvbn/thefuck | thefuck/rules/path_from_history.py | 5 | 1493 | from collections import Counter
import re
from thefuck.system import Path
from thefuck.utils import (get_valid_history_without_current,
memoize, replace_argument)
from thefuck.shells import shell
patterns = [r'no such file or directory: (.*)$',
r"cannot access '(.*)': No such file or directory",
r': (.*): No such file or directory',
r"can't cd to (.*)$"]
@memoize
def _get_destination(command):
for pattern in patterns:
found = re.findall(pattern, command.output)
if found:
if found[0] in command.script_parts:
return found[0]
def match(command):
return bool(_get_destination(command))
def _get_all_absolute_paths_from_history(command):
counter = Counter()
for line in get_valid_history_without_current(command):
splitted = shell.split_command(line)
for param in splitted[1:]:
if param.startswith('/') or param.startswith('~'):
if param.endswith('/'):
param = param[:-1]
counter[param] += 1
return (path for path, _ in counter.most_common(None))
def get_new_command(command):
destination = _get_destination(command)
paths = _get_all_absolute_paths_from_history(command)
return [replace_argument(command.script, destination, path)
for path in paths if path.endswith(destination)
and Path(path).expanduser().exists()]
priority = 800
| mit | 1f763da0f4b6e904b47f8973d76084d9 | 27.169811 | 63 | 0.616209 | 4.057065 | false | false | false | false |
nvbn/thefuck | thefuck/rules/no_command.py | 1 | 1254 | from thefuck.utils import get_all_executables, get_close_matches, \
get_valid_history_without_current, get_closest, which
from thefuck.specific.sudo import sudo_support
@sudo_support
def match(command):
return (not which(command.script_parts[0])
and ('not found' in command.output
or 'is not recognized as' in command.output)
and bool(get_close_matches(command.script_parts[0],
get_all_executables())))
def _get_used_executables(command):
for script in get_valid_history_without_current(command):
yield script.split(' ')[0]
@sudo_support
def get_new_command(command):
old_command = command.script_parts[0]
# One from history:
already_used = get_closest(
old_command, _get_used_executables(command),
fallback_to_first=False)
if already_used:
new_cmds = [already_used]
else:
new_cmds = []
# Other from all executables:
new_cmds += [cmd for cmd in get_close_matches(old_command,
get_all_executables())
if cmd not in new_cmds]
return [command.script.replace(old_command, cmd, 1) for cmd in new_cmds]
priority = 3000
| mit | 1452025fa1e4f2a2a5171471d3a0e83a | 29.585366 | 76 | 0.613238 | 3.8 | false | false | false | false |
nvbn/thefuck | thefuck/const.py | 2 | 2944 | # -*- encoding: utf-8 -*-
class _GenConst(object):
def __init__(self, name):
self._name = name
def __repr__(self):
return u'<const: {}>'.format(self._name)
KEY_UP = _GenConst('↑')
KEY_DOWN = _GenConst('↓')
KEY_CTRL_C = _GenConst('Ctrl+C')
KEY_CTRL_N = _GenConst('Ctrl+N')
KEY_CTRL_P = _GenConst('Ctrl+P')
KEY_MAPPING = {'\x0e': KEY_CTRL_N,
'\x03': KEY_CTRL_C,
'\x10': KEY_CTRL_P}
ACTION_SELECT = _GenConst('select')
ACTION_ABORT = _GenConst('abort')
ACTION_PREVIOUS = _GenConst('previous')
ACTION_NEXT = _GenConst('next')
ALL_ENABLED = _GenConst('All rules enabled')
DEFAULT_RULES = [ALL_ENABLED]
DEFAULT_PRIORITY = 1000
DEFAULT_SETTINGS = {'rules': DEFAULT_RULES,
'exclude_rules': [],
'wait_command': 3,
'require_confirmation': True,
'no_colors': False,
'debug': False,
'priority': {},
'history_limit': None,
'alter_history': True,
'wait_slow_command': 15,
'slow_commands': ['lein', 'react-native', 'gradle',
'./gradlew', 'vagrant'],
'repeat': False,
'instant_mode': False,
'num_close_matches': 3,
'env': {'LC_ALL': 'C', 'LANG': 'C', 'GIT_TRACE': '1'},
'excluded_search_path_prefixes': []}
ENV_TO_ATTR = {'THEFUCK_RULES': 'rules',
'THEFUCK_EXCLUDE_RULES': 'exclude_rules',
'THEFUCK_WAIT_COMMAND': 'wait_command',
'THEFUCK_REQUIRE_CONFIRMATION': 'require_confirmation',
'THEFUCK_NO_COLORS': 'no_colors',
'THEFUCK_DEBUG': 'debug',
'THEFUCK_PRIORITY': 'priority',
'THEFUCK_HISTORY_LIMIT': 'history_limit',
'THEFUCK_ALTER_HISTORY': 'alter_history',
'THEFUCK_WAIT_SLOW_COMMAND': 'wait_slow_command',
'THEFUCK_SLOW_COMMANDS': 'slow_commands',
'THEFUCK_REPEAT': 'repeat',
'THEFUCK_INSTANT_MODE': 'instant_mode',
'THEFUCK_NUM_CLOSE_MATCHES': 'num_close_matches',
'THEFUCK_EXCLUDED_SEARCH_PATH_PREFIXES': 'excluded_search_path_prefixes'}
SETTINGS_HEADER = u"""# The Fuck settings file
#
# The rules are defined as in the example bellow:
#
# rules = ['cd_parent', 'git_push', 'python_command', 'sudo']
#
# The default values are as follows. Uncomment and change to fit your needs.
# See https://github.com/nvbn/thefuck#settings for more information.
#
"""
ARGUMENT_PLACEHOLDER = 'THEFUCK_ARGUMENT_PLACEHOLDER'
CONFIGURATION_TIMEOUT = 60
USER_COMMAND_MARK = u'\u200B' * 10
LOG_SIZE_IN_BYTES = 1024 * 1024
LOG_SIZE_TO_CLEAN = 10 * 1024
DIFF_WITH_ALIAS = 0.5
SHELL_LOGGER_SOCKET_ENV = 'SHELL_LOGGER_SOCKET'
SHELL_LOGGER_LIMIT = 5
| mit | ad09d1e4ef07637864470d0ed9c5295f | 31.307692 | 88 | 0.542517 | 3.406721 | false | false | false | false |
nvbn/thefuck | thefuck/rules/apt_get.py | 4 | 1386 | from types import ModuleType
from thefuck.specific.apt import apt_available
from thefuck.utils import memoize, which
from thefuck.shells import shell
try:
from CommandNotFound import CommandNotFound
enabled_by_default = apt_available
if isinstance(CommandNotFound, ModuleType):
# For ubuntu 18.04+
_get_packages = CommandNotFound.CommandNotFound().get_packages
else:
# For older versions
_get_packages = CommandNotFound().getPackages
except ImportError:
enabled_by_default = False
def _get_executable(command):
if command.script_parts[0] == 'sudo':
return command.script_parts[1]
else:
return command.script_parts[0]
@memoize
def get_package(executable):
try:
packages = _get_packages(executable)
return packages[0][0]
except IndexError:
# IndexError is thrown when no matching package is found
return None
def match(command):
if 'not found' in command.output or 'not installed' in command.output:
executable = _get_executable(command)
return not which(executable) and get_package(executable)
else:
return False
def get_new_command(command):
executable = _get_executable(command)
name = get_package(executable)
formatme = shell.and_('sudo apt-get install {}', '{}')
return formatme.format(name, command.script)
| mit | a3d5891baa3819f6b055ee67889070c2 | 26.72 | 74 | 0.689033 | 4.040816 | false | false | false | false |
nvbn/thefuck | release.py | 3 | 1024 | #!/usr/bin/env python
from subprocess import call
import os
import re
version = None
def get_new_setup_py_lines():
global version
with open('setup.py', 'r') as sf:
current_setup = sf.readlines()
for line in current_setup:
if line.startswith('VERSION = '):
major, minor = re.findall(r"VERSION = '(\d+)\.(\d+)'", line)[0]
version = "{}.{}".format(major, int(minor) + 1)
yield "VERSION = '{}'\n".format(version)
else:
yield line
lines = list(get_new_setup_py_lines())
with open('setup.py', 'w') as sf:
sf.writelines(lines)
call('git pull', shell=True)
call('git commit -am "Bump to {}"'.format(version), shell=True)
call('git tag {}'.format(version), shell=True)
call('git push', shell=True)
call('git push --tags', shell=True)
env = os.environ
env['CONVERT_README'] = 'true'
call('rm -rf dist/*', shell=True, env=env)
call('python setup.py sdist bdist_wheel', shell=True, env=env)
call('twine upload dist/*', shell=True, env=env)
| mit | aaa6c2f2a9b06cb44103f66ce0f91ff7 | 26.675676 | 75 | 0.615234 | 3.190031 | false | false | false | false |
nvbn/thefuck | tests/rules/test_git_add.py | 5 | 1353 | import pytest
from thefuck.rules.git_add import match, get_new_command
from thefuck.types import Command
@pytest.fixture(autouse=True)
def path_exists(mocker):
return mocker.patch('thefuck.rules.git_add.Path.exists',
return_value=True)
@pytest.fixture
def output(target):
return ("error: pathspec '{}' did not match any "
'file(s) known to git.'.format(target))
@pytest.mark.parametrize('script, target', [
('git submodule update unknown', 'unknown'),
('git commit unknown', 'unknown')])
def test_match(output, script, target):
assert match(Command(script, output))
@pytest.mark.parametrize('script, target, exists', [
('git submodule update known', '', True),
('git commit known', '', True),
('git submodule update known', output, False)])
def test_not_match(path_exists, output, script, target, exists):
path_exists.return_value = exists
assert not match(Command(script, output))
@pytest.mark.parametrize('script, target, new_command', [
('git submodule update unknown', 'unknown',
'git add -- unknown && git submodule update unknown'),
('git commit unknown', 'unknown',
'git add -- unknown && git commit unknown')])
def test_get_new_command(output, script, target, new_command):
assert get_new_command(Command(script, output)) == new_command
| mit | 7c985f52f77a1fe57cb660fef0137ac3 | 32.825 | 66 | 0.675536 | 3.789916 | false | true | false | false |
honeynet/droidbot | droidbot/adapter/user_input_monitor.py | 2 | 2065 | import subprocess
import logging
from .adapter import Adapter
class UserInputMonitor(Adapter):
"""
A connection with the target device through `getevent`.
`getevent` is able to get raw user input from device.
"""
def __init__(self, device=None):
"""
initialize connection
:param device: a Device instance
"""
self.logger = logging.getLogger(self.__class__.__name__)
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.connected = False
self.process = None
if device.output_dir is None:
self.out_file = None
else:
self.out_file = "%s/user_input.txt" % device.output_dir
def connect(self):
self.process = subprocess.Popen(["adb", "-s", self.device.serial, "shell", "getevent", "-lt"],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
import threading
listen_thread = threading.Thread(target=self.handle_output)
listen_thread.start()
def disconnect(self):
self.connected = False
if self.process is not None:
self.process.terminate()
def check_connectivity(self):
return self.connected
def handle_output(self):
self.connected = True
f = None
if self.out_file is not None:
f = open(self.out_file, 'w')
while self.connected:
if self.process is None:
continue
line = self.process.stdout.readline()
if not isinstance(line, str):
line = line.decode()
self.parse_line(line)
if f is not None:
f.write(line)
if f is not None:
f.close()
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
def parse_line(self, getevent_line):
pass
| mit | 8a70e5c2e765cee71fe2f05d7459e7b7 | 28.927536 | 102 | 0.540436 | 4.548458 | false | false | false | false |
honeynet/droidbot | droidbot/input_manager.py | 2 | 6468 | import json
import logging
import subprocess
import time
from .input_event import EventLog
from .input_policy import UtgBasedInputPolicy, UtgNaiveSearchPolicy, UtgGreedySearchPolicy, \
UtgReplayPolicy, \
ManualPolicy, \
POLICY_NAIVE_DFS, POLICY_GREEDY_DFS, \
POLICY_NAIVE_BFS, POLICY_GREEDY_BFS, \
POLICY_REPLAY, POLICY_MEMORY_GUIDED, \
POLICY_MANUAL, POLICY_MONKEY, POLICY_NONE
DEFAULT_POLICY = POLICY_GREEDY_DFS
DEFAULT_EVENT_INTERVAL = 1
DEFAULT_EVENT_COUNT = 100000000
DEFAULT_TIMEOUT = -1
class UnknownInputException(Exception):
pass
class InputManager(object):
"""
This class manages all events to send during app running
"""
def __init__(self, device, app, policy_name, random_input,
event_count, event_interval,
script_path=None, profiling_method=None, master=None,
replay_output=None):
"""
manage input event sent to the target device
:param device: instance of Device
:param app: instance of App
:param policy_name: policy of generating events, string
:return:
"""
self.logger = logging.getLogger('InputEventManager')
self.enabled = True
self.device = device
self.app = app
self.policy_name = policy_name
self.random_input = random_input
self.events = []
self.policy = None
self.script = None
self.event_count = event_count
self.event_interval = event_interval
self.replay_output = replay_output
self.monkey = None
if script_path is not None:
f = open(script_path, 'r')
script_dict = json.load(f)
from .input_script import DroidBotScript
self.script = DroidBotScript(script_dict)
self.policy = self.get_input_policy(device, app, master)
self.profiling_method = profiling_method
def get_input_policy(self, device, app, master):
if self.policy_name == POLICY_NONE:
input_policy = None
elif self.policy_name == POLICY_MONKEY:
input_policy = None
elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]:
input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name)
elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]:
input_policy = UtgGreedySearchPolicy(device, app, self.random_input, self.policy_name)
elif self.policy_name == POLICY_MEMORY_GUIDED:
from .input_policy2 import MemoryGuidedPolicy
input_policy = MemoryGuidedPolicy(device, app, self.random_input)
elif self.policy_name == POLICY_REPLAY:
input_policy = UtgReplayPolicy(device, app, self.replay_output)
elif self.policy_name == POLICY_MANUAL:
input_policy = ManualPolicy(device, app)
else:
self.logger.warning("No valid input policy specified. Using policy \"none\".")
input_policy = None
if isinstance(input_policy, UtgBasedInputPolicy):
input_policy.script = self.script
input_policy.master = master
return input_policy
def add_event(self, event):
"""
add one event to the event list
:param event: the event to be added, should be subclass of AppEvent
:return:
"""
if event is None:
return
self.events.append(event)
event_log = EventLog(self.device, self.app, event, self.profiling_method)
event_log.start()
while True:
time.sleep(self.event_interval)
if not self.device.pause_sending_event:
break
event_log.stop()
def start(self):
"""
start sending event
"""
self.logger.info("start sending events, policy is %s" % self.policy_name)
try:
if self.policy is not None:
self.policy.start(self)
elif self.policy_name == POLICY_NONE:
self.device.start_app(self.app)
if self.event_count == 0:
return
while self.enabled:
time.sleep(1)
elif self.policy_name == POLICY_MONKEY:
throttle = self.event_interval * 1000
monkey_cmd = "adb -s %s shell monkey %s --ignore-crashes --ignore-security-exceptions" \
" --throttle %d -v %d" % \
(self.device.serial,
"" if self.app.get_package_name() is None else "-p " + self.app.get_package_name(),
throttle,
self.event_count)
self.monkey = subprocess.Popen(monkey_cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for monkey_out_line in iter(self.monkey.stdout.readline, ''):
if not isinstance(monkey_out_line, str):
monkey_out_line = monkey_out_line.decode()
self.logger.info(monkey_out_line)
# may be disturbed from outside
if self.monkey is not None:
self.monkey.wait()
elif self.policy_name == POLICY_MANUAL:
self.device.start_app(self.app)
while self.enabled:
keyboard_input = input("press ENTER to save current state, type q to exit...")
if keyboard_input.startswith('q'):
break
state = self.device.get_current_state()
if state is not None:
state.save2dir()
except KeyboardInterrupt:
pass
self.stop()
self.logger.info("Finish sending events")
def stop(self):
"""
stop sending event
"""
if self.monkey:
if self.monkey.returncode is None:
self.monkey.terminate()
self.monkey = None
pid = self.device.get_app_pid("com.android.commands.monkey")
if pid is not None:
self.device.adb.shell("kill -9 %d" % pid)
self.enabled = False
| mit | 5011e386583bb87a98335f2722bd2040 | 37.5 | 113 | 0.551948 | 4.205462 | false | false | false | false |
honeynet/droidbot | droidbot/adapter/telnet.py | 2 | 2897 | import logging
import threading
from .adapter import Adapter
class TelnetException(Exception):
"""
Exception in telnet connection
"""
pass
class TelnetConsole(Adapter):
"""
interface of telnet console, see:
http://developer.android.com/tools/devices/emulator.html
"""
def __init__(self, device=None, auth_token=None):
"""
Initiate a emulator console via telnet.
On some devices, an authentication token is required to use telnet
:param device: instance of Device
:return:
"""
self.logger = logging.getLogger(self.__class__.__name__)
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.auth_token = auth_token
self.console = None
self.__lock__ = threading.Lock()
def connect(self):
if self.device.serial and self.device.serial.startswith("emulator-"):
host = "localhost"
port = int(self.device.serial[9:])
from telnetlib import Telnet
self.console = Telnet(host, port)
if self.auth_token is not None:
self.run_cmd("auth %s" % self.auth_token)
if self.check_connectivity():
self.logger.debug("telnet successfully initiated, the port is %d" % port)
return
raise TelnetException()
def run_cmd(self, args):
"""
run a command in emulator console
:param args: arguments to be executed in telnet console
:return:
"""
if self.console is None:
self.logger.warning("telnet is not connected!")
return None
if isinstance(args, list):
cmd_line = " ".join(args)
elif isinstance(args, str):
cmd_line = args
else:
self.logger.warning("unsupported command format:" + args)
return None
self.logger.debug('command:')
self.logger.debug(cmd_line)
cmd_line += '\n'
self.__lock__.acquire()
self.console.write(cmd_line)
r = self.console.read_until('OK', 5)
# eat the rest outputs
self.console.read_until('NEVER MATCH', 1)
self.__lock__.release()
self.logger.debug('return:')
self.logger.debug(r)
return r
def check_connectivity(self):
"""
check if console is connected
:return: True for connected
"""
if self.console is None:
return False
try:
self.run_cmd("help")
except:
return False
return True
def disconnect(self):
"""
disconnect telnet
"""
if self.console is not None:
self.console.close()
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
| mit | a146498f8a5e6b6ca456278c07236d87 | 27.97 | 89 | 0.557818 | 4.402736 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.